forked from luck/tmp_suning_uos_patched
sched: avoid stack var in move_task_off_dead_cpu, fix
Impact: locking fix We can't call cpuset_cpus_allowed_locked() with the rq lock held. However, the rq lock merely protects us from (1) cpu_online_mask changing and (2) someone else changing p->cpus_allowed. The first can't happen because we're being called from a cpu hotplug notifier. The second doesn't really matter: we are forcing the task off a CPU it was affine to, so we're not doing very well anyway. So we remove the rq lock from this path, and all is good. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Acked-by: Mike Travis <travis@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
3d8cbdf865
commit
1224e376f2
|
@ -6126,8 +6126,6 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
|
|||
*/
|
||||
static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rq *rq;
|
||||
int dest_cpu;
|
||||
/* FIXME: Use cpumask_of_node here. */
|
||||
cpumask_t _nodemask = node_to_cpumask(cpu_to_node(dead_cpu));
|
||||
|
@ -6146,10 +6144,8 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
|
|||
|
||||
/* No more Mr. Nice Guy. */
|
||||
if (dest_cpu >= nr_cpu_ids) {
|
||||
rq = task_rq_lock(p, &flags);
|
||||
cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
|
||||
dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed);
|
||||
task_rq_unlock(rq, &flags);
|
||||
|
||||
/*
|
||||
* Don't tell them about moving exiting tasks or
|
||||
|
|
Loading…
Reference in New Issue
Block a user