sched/fair: Use rq->nr_running when balancing load

CFS load_balance() only takes care of CFS tasks whereas CPUs can be used by
other scheduling classes. Typically, a CFS task preempted by an RT or deadline
task will not get a chance to be pulled by another CPU because
load_balance() doesn't take into account tasks from other classes.
Add sum of nr_running in the statistics and use it to detect such
situations.

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Ben Segall <bsegall@google.com>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Morten.Rasmussen@arm.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: hdanton@sina.com
Cc: parth@linux.ibm.com
Cc: pauld@redhat.com
Cc: quentin.perret@arm.com
Cc: riel@surriel.com
Cc: srikar@linux.vnet.ibm.com
Cc: valentin.schneider@arm.com
Link: https://lkml.kernel.org/r/1571405198-27570-6-git-send-email-vincent.guittot@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Vincent Guittot 2019-10-18 15:26:32 +02:00 committed by Ingo Molnar
parent 0b0695f2b3
commit 5e23e47443

View File

@ -7694,6 +7694,7 @@ struct sg_lb_stats {
unsigned long group_load; /* Total load over the CPUs of the group */
unsigned long group_capacity;
unsigned long group_util; /* Total utilization of the group */
unsigned int sum_nr_running; /* Nr of tasks running in the group */
unsigned int sum_h_nr_running; /* Nr of CFS tasks running in the group */
unsigned int idle_cpus;
unsigned int group_weight;
@ -7928,7 +7929,7 @@ static inline int sg_imbalanced(struct sched_group *group)
static inline bool
group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
{
if (sgs->sum_h_nr_running < sgs->group_weight)
if (sgs->sum_nr_running < sgs->group_weight)
return true;
if ((sgs->group_capacity * 100) >
@ -7949,7 +7950,7 @@ group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
static inline bool
group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
{
if (sgs->sum_h_nr_running <= sgs->group_weight)
if (sgs->sum_nr_running <= sgs->group_weight)
return false;
if ((sgs->group_capacity * 100) <
@ -8053,6 +8054,8 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->sum_h_nr_running += rq->cfs.h_nr_running;
nr_running = rq->nr_running;
sgs->sum_nr_running += nr_running;
if (nr_running > 1)
*sg_status |= SG_OVERLOAD;
@ -8410,13 +8413,13 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
}
if (busiest->group_weight == 1 || sds->prefer_sibling) {
unsigned int nr_diff = busiest->sum_h_nr_running;
unsigned int nr_diff = busiest->sum_nr_running;
/*
* When prefer sibling, evenly spread running tasks on
* groups.
*/
env->migration_type = migrate_task;
lsub_positive(&nr_diff, local->sum_h_nr_running);
lsub_positive(&nr_diff, local->sum_nr_running);
env->imbalance = nr_diff >> 1;
return;
}
@ -8580,7 +8583,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
/* Try to move all excess tasks to child's sibling domain */
if (sds.prefer_sibling && local->group_type == group_has_spare &&
busiest->sum_h_nr_running > local->sum_h_nr_running + 1)
busiest->sum_nr_running > local->sum_nr_running + 1)
goto force_balance;
if (busiest->group_type != group_overloaded &&