sched debug: check spread

debug feature: check how well we schedule within a reasonable
vruntime 'spread' range. (note that CPU overload can increase
the spread, so this is not a hard condition, but normal loads
should be within the spread.)

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
This commit is contained in:
Peter Zijlstra 2007-10-15 17:00:10 +02:00 committed by Ingo Molnar
parent d822ceceda
commit ddc9729750
3 changed files with 22 additions and 0 deletions

View File

@ -250,6 +250,9 @@ struct cfs_rq {
* It is set to NULL otherwise (i.e when none are currently running). * It is set to NULL otherwise (i.e when none are currently running).
*/ */
struct sched_entity *curr; struct sched_entity *curr;
unsigned long nr_spread_over;
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */

View File

@ -140,6 +140,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, " .%-30s: %ld\n", "bkl_cnt", SEQ_printf(m, " .%-30s: %ld\n", "bkl_cnt",
rq->bkl_cnt); rq->bkl_cnt);
#endif #endif
SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over",
cfs_rq->nr_spread_over);
} }
static void print_cpu(struct seq_file *m, int cpu) static void print_cpu(struct seq_file *m, int cpu)

View File

@ -447,6 +447,19 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
#endif #endif
} }
static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
#ifdef CONFIG_SCHED_DEBUG
s64 d = se->vruntime - cfs_rq->min_vruntime;
if (d < 0)
d = -d;
if (d > 3*sysctl_sched_latency)
schedstat_inc(cfs_rq, nr_spread_over);
#endif
}
static void static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{ {
@ -494,6 +507,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
} }
update_stats_enqueue(cfs_rq, se); update_stats_enqueue(cfs_rq, se);
check_spread(cfs_rq, se);
if (se != cfs_rq->curr) if (se != cfs_rq->curr)
__enqueue_entity(cfs_rq, se); __enqueue_entity(cfs_rq, se);
account_entity_enqueue(cfs_rq, se); account_entity_enqueue(cfs_rq, se);
@ -587,6 +601,7 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
update_stats_curr_end(cfs_rq, prev); update_stats_curr_end(cfs_rq, prev);
check_spread(cfs_rq, prev);
if (prev->on_rq) { if (prev->on_rq) {
update_stats_wait_start(cfs_rq, prev); update_stats_wait_start(cfs_rq, prev);
/* Put 'current' back into the tree. */ /* Put 'current' back into the tree. */
@ -996,6 +1011,8 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
} }
update_stats_enqueue(cfs_rq, se); update_stats_enqueue(cfs_rq, se);
check_spread(cfs_rq, se);
check_spread(cfs_rq, curr);
__enqueue_entity(cfs_rq, se); __enqueue_entity(cfs_rq, se);
account_entity_enqueue(cfs_rq, se); account_entity_enqueue(cfs_rq, se);
resched_task(rq->curr); resched_task(rq->curr);