forked from luck/tmp_suning_uos_patched
sched/fair: Clean up asym packing
Clean up asym packing to follow the default load balance behavior: - classify the group by creating a group_asym_packing field. - calculate the imbalance in calculate_imbalance() instead of bypassing it. We don't need to test twice same conditions anymore to detect asym packing and we consolidate the calculation of imbalance in calculate_imbalance(). There is no functional changes. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Acked-by: Rik van Riel <riel@surriel.com> Cc: Ben Segall <bsegall@google.com> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Juri Lelli <juri.lelli@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Mike Galbraith <efault@gmx.de> Cc: Morten.Rasmussen@arm.com Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: hdanton@sina.com Cc: parth@linux.ibm.com Cc: pauld@redhat.com Cc: quentin.perret@arm.com Cc: srikar@linux.vnet.ibm.com Cc: valentin.schneider@arm.com Link: https://lkml.kernel.org/r/1571405198-27570-2-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
9ae7ab20b4
commit
490ba971d8
|
@ -7665,6 +7665,7 @@ struct sg_lb_stats {
|
|||
unsigned int group_weight;
|
||||
enum group_type group_type;
|
||||
int group_no_capacity;
|
||||
unsigned int group_asym_packing; /* Tasks should be moved to preferred CPU */
|
||||
unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
unsigned int nr_numa_running;
|
||||
|
@ -8119,9 +8120,17 @@ static bool update_sd_pick_busiest(struct lb_env *env,
|
|||
* ASYM_PACKING needs to move all the work to the highest
|
||||
* prority CPUs in the group, therefore mark all groups
|
||||
* of lower priority than ourself as busy.
|
||||
*
|
||||
* This is primarily intended to used at the sibling level. Some
|
||||
* cores like POWER7 prefer to use lower numbered SMT threads. In the
|
||||
* case of POWER7, it can move to lower SMT modes only when higher
|
||||
* threads are idle. When in lower SMT modes, the threads will
|
||||
* perform better since they share less core resources. Hence when we
|
||||
* have idle threads, we want them to be the higher ones.
|
||||
*/
|
||||
if (sgs->sum_nr_running &&
|
||||
sched_asym_prefer(env->dst_cpu, sg->asym_prefer_cpu)) {
|
||||
sgs->group_asym_packing = 1;
|
||||
if (!sds->busiest)
|
||||
return true;
|
||||
|
||||
|
@ -8262,51 +8271,6 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* check_asym_packing - Check to see if the group is packed into the
|
||||
* sched domain.
|
||||
*
|
||||
* This is primarily intended to used at the sibling level. Some
|
||||
* cores like POWER7 prefer to use lower numbered SMT threads. In the
|
||||
* case of POWER7, it can move to lower SMT modes only when higher
|
||||
* threads are idle. When in lower SMT modes, the threads will
|
||||
* perform better since they share less core resources. Hence when we
|
||||
* have idle threads, we want them to be the higher ones.
|
||||
*
|
||||
* This packing function is run on idle threads. It checks to see if
|
||||
* the busiest CPU in this domain (core in the P7 case) has a higher
|
||||
* CPU number than the packing function is being run on. Here we are
|
||||
* assuming lower CPU number will be equivalent to lower a SMT thread
|
||||
* number.
|
||||
*
|
||||
* Return: 1 when packing is required and a task should be moved to
|
||||
* this CPU. The amount of the imbalance is returned in env->imbalance.
|
||||
*
|
||||
* @env: The load balancing environment.
|
||||
* @sds: Statistics of the sched_domain which is to be packed
|
||||
*/
|
||||
static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
|
||||
{
|
||||
int busiest_cpu;
|
||||
|
||||
if (!(env->sd->flags & SD_ASYM_PACKING))
|
||||
return 0;
|
||||
|
||||
if (env->idle == CPU_NOT_IDLE)
|
||||
return 0;
|
||||
|
||||
if (!sds->busiest)
|
||||
return 0;
|
||||
|
||||
busiest_cpu = sds->busiest->asym_prefer_cpu;
|
||||
if (sched_asym_prefer(busiest_cpu, env->dst_cpu))
|
||||
return 0;
|
||||
|
||||
env->imbalance = sds->busiest_stat.group_load;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* fix_small_imbalance - Calculate the minor imbalance that exists
|
||||
* amongst the groups of a sched_domain, during
|
||||
|
@ -8391,6 +8355,11 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
|
|||
local = &sds->local_stat;
|
||||
busiest = &sds->busiest_stat;
|
||||
|
||||
if (busiest->group_asym_packing) {
|
||||
env->imbalance = busiest->group_load;
|
||||
return;
|
||||
}
|
||||
|
||||
if (busiest->group_type == group_imbalanced) {
|
||||
/*
|
||||
* In the group_imb case we cannot rely on group-wide averages
|
||||
|
@ -8495,8 +8464,8 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
|
|||
busiest = &sds.busiest_stat;
|
||||
|
||||
/* ASYM feature bypasses nice load balance check */
|
||||
if (check_asym_packing(env, &sds))
|
||||
return sds.busiest;
|
||||
if (busiest->group_asym_packing)
|
||||
goto force_balance;
|
||||
|
||||
/* There is no busy sibling group to pull tasks from */
|
||||
if (!sds.busiest || busiest->sum_nr_running == 0)
|
||||
|
|
Loading…
Reference in New Issue
Block a user