forked from luck/tmp_suning_uos_patched
kthreads: simplify migration_thread() exit path
Now that kthread_stop() can be used even if the task has already exited, we can kill the "wait_to_die:" loop in migration_thread(). But we must pin rq->migration_thread after creation. Actually, I don't think CPU_UP_CANCELED or CPU_DEAD should wait for ->migration_thread exit. Perhaps we can simplify this code a bit more. migration_call() can set ->should_stop and forget about this thread. But we need a new helper in kthred.c for that. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Pavel Emelyanov <xemul@openvz.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Vitaliy Gusev <vgusev@openvz.org Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
63706172f3
commit
371cbb387e
|
@ -7045,7 +7045,7 @@ static int migration_thread(void *data)
|
|||
|
||||
if (cpu_is_offline(cpu)) {
|
||||
spin_unlock_irq(&rq->lock);
|
||||
goto wait_to_die;
|
||||
break;
|
||||
}
|
||||
|
||||
if (rq->active_balance) {
|
||||
|
@ -7071,16 +7071,7 @@ static int migration_thread(void *data)
|
|||
complete(&req->done);
|
||||
}
|
||||
__set_current_state(TASK_RUNNING);
|
||||
return 0;
|
||||
|
||||
wait_to_die:
|
||||
/* Wait for kthread_stop */
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
while (!kthread_should_stop()) {
|
||||
schedule();
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
}
|
||||
__set_current_state(TASK_RUNNING);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -7494,6 +7485,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|||
rq = task_rq_lock(p, &flags);
|
||||
__setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
|
||||
task_rq_unlock(rq, &flags);
|
||||
get_task_struct(p);
|
||||
cpu_rq(cpu)->migration_thread = p;
|
||||
break;
|
||||
|
||||
|
@ -7524,6 +7516,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|||
kthread_bind(cpu_rq(cpu)->migration_thread,
|
||||
cpumask_any(cpu_online_mask));
|
||||
kthread_stop(cpu_rq(cpu)->migration_thread);
|
||||
put_task_struct(cpu_rq(cpu)->migration_thread);
|
||||
cpu_rq(cpu)->migration_thread = NULL;
|
||||
break;
|
||||
|
||||
|
@ -7533,6 +7526,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|||
migrate_live_tasks(cpu);
|
||||
rq = cpu_rq(cpu);
|
||||
kthread_stop(rq->migration_thread);
|
||||
put_task_struct(rq->migration_thread);
|
||||
rq->migration_thread = NULL;
|
||||
/* Idle task back to normal (off runqueue, low prio) */
|
||||
spin_lock_irq(&rq->lock);
|
||||
|
|
Loading…
Reference in New Issue
Block a user