forked from luck/tmp_suning_uos_patched
[POWERPC] spufs: fix scheduler starvation by idle contexts
2.6.25 has a regression where we can starve the scheduler by creating (N_SPES+1) contexts, then running them one at a time. The final context will never be run, as the other contexts are loaded on the SPEs, none of which are repoted as free (ie, spu->alloc_state != SPU_FREE), so spu_get_idle() doesn't give us a spu to run on. Because all of the contexts are stopped, none are descheduled by the scheduler tick, as spusched_tick returns if spu_stopped(ctx). This change replaces the spu_stopped() check with checking for SCHED_IDLE in ctx->policy. We set a context's policy to SCHED_IDLE when we're not in spu_run(). We also favour SCHED_IDLE contexts when looking for contexts to unbind, but leave their timeslice intact for later resumption. This patch fixes the following test in the spufs-testsuite: tests/20-scheduler/02-yield-starvation Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
This commit is contained in:
parent
101142c37b
commit
4ef110141b
@ -234,6 +234,7 @@ static int spu_run_fini(struct spu_context *ctx, u32 *npc,
|
||||
*npc = ctx->ops->npc_read(ctx);
|
||||
|
||||
spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
|
||||
ctx->policy = SCHED_IDLE;
|
||||
spu_release(ctx);
|
||||
|
||||
if (signal_pending(current))
|
||||
|
@ -856,21 +856,18 @@ static noinline void spusched_tick(struct spu_context *ctx)
|
||||
{
|
||||
struct spu_context *new = NULL;
|
||||
struct spu *spu = NULL;
|
||||
u32 status;
|
||||
|
||||
if (spu_acquire(ctx))
|
||||
BUG(); /* a kernel thread never has signals pending */
|
||||
|
||||
if (ctx->state != SPU_STATE_RUNNABLE)
|
||||
goto out;
|
||||
if (spu_stopped(ctx, &status))
|
||||
goto out;
|
||||
if (ctx->flags & SPU_CREATE_NOSCHED)
|
||||
goto out;
|
||||
if (ctx->policy == SCHED_FIFO)
|
||||
goto out;
|
||||
|
||||
if (--ctx->time_slice)
|
||||
if (--ctx->time_slice && ctx->policy != SCHED_IDLE)
|
||||
goto out;
|
||||
|
||||
spu = ctx->spu;
|
||||
@ -880,7 +877,8 @@ static noinline void spusched_tick(struct spu_context *ctx)
|
||||
new = grab_runnable_context(ctx->prio + 1, spu->node);
|
||||
if (new) {
|
||||
spu_unschedule(spu, ctx);
|
||||
spu_add_to_rq(ctx);
|
||||
if (ctx->policy != SCHED_IDLE)
|
||||
spu_add_to_rq(ctx);
|
||||
} else {
|
||||
spu_context_nospu_trace(spusched_tick__newslice, ctx);
|
||||
ctx->time_slice++;
|
||||
|
Loading…
Reference in New Issue
Block a user