sched/toplogy: Introduce the 'sched_energy_present' static key

In order to make sure Energy Aware Scheduling (EAS) will not impact
systems where no Energy Model is available, introduce a static key
guarding the access to EAS code. Since EAS is enabled on a
per-root-domain basis, the static key is enabled when at least one root
domain meets all conditions for EAS.

Signed-off-by: Quentin Perret <quentin.perret@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: adharmap@codeaurora.org
Cc: chris.redpath@arm.com
Cc: currojerez@riseup.net
Cc: dietmar.eggemann@arm.com
Cc: edubezval@gmail.com
Cc: gregkh@linuxfoundation.org
Cc: javi.merino@kernel.org
Cc: joel@joelfernandes.org
Cc: juri.lelli@redhat.com
Cc: morten.rasmussen@arm.com
Cc: patrick.bellasi@arm.com
Cc: pkondeti@codeaurora.org
Cc: rjw@rjwysocki.net
Cc: skannan@codeaurora.org
Cc: smuckle@google.com
Cc: srinivas.pandruvada@linux.intel.com
Cc: thara.gopinath@linaro.org
Cc: tkjos@google.com
Cc: valentin.schneider@arm.com
Cc: vincent.guittot@linaro.org
Cc: viresh.kumar@linaro.org
Link: https://lkml.kernel.org/r/20181203095628.11858-10-quentin.perret@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Quentin Perret 2018-12-03 09:56:22 +00:00 committed by Ingo Molnar
parent 531b5c9f5c
commit 1f74de8798
2 changed files with 28 additions and 4 deletions

View File

@ -2296,3 +2296,7 @@ unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned
#else #else
#define perf_domain_span(pd) NULL #define perf_domain_span(pd) NULL
#endif #endif
#ifdef CONFIG_SMP
extern struct static_key_false sched_energy_present;
#endif

View File

@ -201,6 +201,7 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
return 1; return 1;
} }
DEFINE_STATIC_KEY_FALSE(sched_energy_present);
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
DEFINE_MUTEX(sched_energy_mutex); DEFINE_MUTEX(sched_energy_mutex);
bool sched_energy_update; bool sched_energy_update;
@ -273,6 +274,19 @@ static void destroy_perf_domain_rcu(struct rcu_head *rp)
free_pd(pd); free_pd(pd);
} }
static void sched_energy_set(bool has_eas)
{
if (!has_eas && static_branch_unlikely(&sched_energy_present)) {
if (sched_debug())
pr_info("%s: stopping EAS\n", __func__);
static_branch_disable_cpuslocked(&sched_energy_present);
} else if (has_eas && !static_branch_unlikely(&sched_energy_present)) {
if (sched_debug())
pr_info("%s: starting EAS\n", __func__);
static_branch_enable_cpuslocked(&sched_energy_present);
}
}
/* /*
* EAS can be used on a root domain if it meets all the following conditions: * EAS can be used on a root domain if it meets all the following conditions:
* 1. an Energy Model (EM) is available; * 1. an Energy Model (EM) is available;
@ -299,7 +313,7 @@ static void destroy_perf_domain_rcu(struct rcu_head *rp)
#define EM_MAX_COMPLEXITY 2048 #define EM_MAX_COMPLEXITY 2048
extern struct cpufreq_governor schedutil_gov; extern struct cpufreq_governor schedutil_gov;
static void build_perf_domains(const struct cpumask *cpu_map) static bool build_perf_domains(const struct cpumask *cpu_map)
{ {
int i, nr_pd = 0, nr_cs = 0, nr_cpus = cpumask_weight(cpu_map); int i, nr_pd = 0, nr_cs = 0, nr_cpus = cpumask_weight(cpu_map);
struct perf_domain *pd = NULL, *tmp; struct perf_domain *pd = NULL, *tmp;
@ -365,7 +379,7 @@ static void build_perf_domains(const struct cpumask *cpu_map)
if (tmp) if (tmp)
call_rcu(&tmp->rcu, destroy_perf_domain_rcu); call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
return; return !!pd;
free: free:
free_pd(pd); free_pd(pd);
@ -373,6 +387,8 @@ static void build_perf_domains(const struct cpumask *cpu_map)
rcu_assign_pointer(rd->pd, NULL); rcu_assign_pointer(rd->pd, NULL);
if (tmp) if (tmp)
call_rcu(&tmp->rcu, destroy_perf_domain_rcu); call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
return false;
} }
#else #else
static void free_pd(struct perf_domain *pd) { } static void free_pd(struct perf_domain *pd) { }
@ -2114,6 +2130,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
struct sched_domain_attr *dattr_new) struct sched_domain_attr *dattr_new)
{ {
bool __maybe_unused has_eas = false;
int i, j, n; int i, j, n;
int new_topology; int new_topology;
@ -2177,14 +2194,17 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
for (i = 0; i < ndoms_new; i++) { for (i = 0; i < ndoms_new; i++) {
for (j = 0; j < n && !sched_energy_update; j++) { for (j = 0; j < n && !sched_energy_update; j++) {
if (cpumask_equal(doms_new[i], doms_cur[j]) && if (cpumask_equal(doms_new[i], doms_cur[j]) &&
cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) {
has_eas = true;
goto match3; goto match3;
}
} }
/* No match - add perf. domains for a new rd */ /* No match - add perf. domains for a new rd */
build_perf_domains(doms_new[i]); has_eas |= build_perf_domains(doms_new[i]);
match3: match3:
; ;
} }
sched_energy_set(has_eas);
#endif #endif
/* Remember the new sched domains: */ /* Remember the new sched domains: */