kthreads: simplify migration_thread() exit path

Now that kthread_stop() can be used even if the task has already exited,
we can kill the "wait_to_die:" loop in migration_thread().  But we must
pin rq->migration_thread after creation.

Actually, I don't think CPU_UP_CANCELED or CPU_DEAD should wait for
->migration_thread exit.  Perhaps we can simplify this code a bit more.
migration_call() can set ->should_stop and forget about this thread.  But
we need a new helper in kthred.c for that.

Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Pavel Emelyanov <xemul@openvz.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Vitaliy Gusev <vgusev@openvz.org
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Oleg Nesterov 2009-06-17 16:27:45 -07:00 committed by Linus Torvalds
parent 63706172f3
commit 371cbb387e

View File

@ -7045,7 +7045,7 @@ static int migration_thread(void *data)
if (cpu_is_offline(cpu)) { if (cpu_is_offline(cpu)) {
spin_unlock_irq(&rq->lock); spin_unlock_irq(&rq->lock);
goto wait_to_die; break;
} }
if (rq->active_balance) { if (rq->active_balance) {
@ -7071,16 +7071,7 @@ static int migration_thread(void *data)
complete(&req->done); complete(&req->done);
} }
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
return 0;
wait_to_die:
/* Wait for kthread_stop */
set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop()) {
schedule();
set_current_state(TASK_INTERRUPTIBLE);
}
__set_current_state(TASK_RUNNING);
return 0; return 0;
} }
@ -7494,6 +7485,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
rq = task_rq_lock(p, &flags); rq = task_rq_lock(p, &flags);
__setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
task_rq_unlock(rq, &flags); task_rq_unlock(rq, &flags);
get_task_struct(p);
cpu_rq(cpu)->migration_thread = p; cpu_rq(cpu)->migration_thread = p;
break; break;
@ -7524,6 +7516,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
kthread_bind(cpu_rq(cpu)->migration_thread, kthread_bind(cpu_rq(cpu)->migration_thread,
cpumask_any(cpu_online_mask)); cpumask_any(cpu_online_mask));
kthread_stop(cpu_rq(cpu)->migration_thread); kthread_stop(cpu_rq(cpu)->migration_thread);
put_task_struct(cpu_rq(cpu)->migration_thread);
cpu_rq(cpu)->migration_thread = NULL; cpu_rq(cpu)->migration_thread = NULL;
break; break;
@ -7533,6 +7526,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
migrate_live_tasks(cpu); migrate_live_tasks(cpu);
rq = cpu_rq(cpu); rq = cpu_rq(cpu);
kthread_stop(rq->migration_thread); kthread_stop(rq->migration_thread);
put_task_struct(rq->migration_thread);
rq->migration_thread = NULL; rq->migration_thread = NULL;
/* Idle task back to normal (off runqueue, low prio) */ /* Idle task back to normal (off runqueue, low prio) */
spin_lock_irq(&rq->lock); spin_lock_irq(&rq->lock);