forked from luck/tmp_suning_uos_patched
sched_rt: don't allocate cpumask in fastpath
Impact: cleanup As pointed out by Steven Rostedt. Since the arg in question is unused, we simply change cpupri_find() to accept NULL. Reported-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> LKML-Reference: <200903251501.22664.rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
a18b83b7ef
commit
13b8bd0a57
|
@ -55,7 +55,7 @@ static int convert_prio(int prio)
|
||||||
* cpupri_find - find the best (lowest-pri) CPU in the system
|
* cpupri_find - find the best (lowest-pri) CPU in the system
|
||||||
* @cp: The cpupri context
|
* @cp: The cpupri context
|
||||||
* @p: The task
|
* @p: The task
|
||||||
* @lowest_mask: A mask to fill in with selected CPUs
|
* @lowest_mask: A mask to fill in with selected CPUs (or NULL)
|
||||||
*
|
*
|
||||||
* Note: This function returns the recommended CPUs as calculated during the
|
* Note: This function returns the recommended CPUs as calculated during the
|
||||||
* current invokation. By the time the call returns, the CPUs may have in
|
* current invokation. By the time the call returns, the CPUs may have in
|
||||||
|
@ -81,7 +81,8 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
|
||||||
if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
|
if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
|
if (lowest_mask)
|
||||||
|
cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -805,20 +805,15 @@ static int select_task_rq_rt(struct task_struct *p, int sync)
|
||||||
|
|
||||||
static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
|
static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
|
||||||
{
|
{
|
||||||
cpumask_var_t mask;
|
|
||||||
|
|
||||||
if (rq->curr->rt.nr_cpus_allowed == 1)
|
if (rq->curr->rt.nr_cpus_allowed == 1)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!alloc_cpumask_var(&mask, GFP_ATOMIC))
|
if (p->rt.nr_cpus_allowed != 1
|
||||||
|
&& cpupri_find(&rq->rd->cpupri, p, NULL))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (p->rt.nr_cpus_allowed != 1
|
if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
|
||||||
&& cpupri_find(&rq->rd->cpupri, p, mask))
|
return;
|
||||||
goto free;
|
|
||||||
|
|
||||||
if (!cpupri_find(&rq->rd->cpupri, rq->curr, mask))
|
|
||||||
goto free;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There appears to be other cpus that can accept
|
* There appears to be other cpus that can accept
|
||||||
|
@ -827,8 +822,6 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
|
||||||
*/
|
*/
|
||||||
requeue_task_rt(rq, p, 1);
|
requeue_task_rt(rq, p, 1);
|
||||||
resched_task(rq->curr);
|
resched_task(rq->curr);
|
||||||
free:
|
|
||||||
free_cpumask_var(mask);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
Loading…
Reference in New Issue
Block a user