forked from luck/tmp_suning_uos_patched
Power management fixes for 5.10-rc4.
Make the intel_pstate driver behave as expected when it operates in the passive mode with HWP enabled and the "powersave" governor on top of it. -----BEGIN PGP SIGNATURE----- iQJGBAABCAAwFiEE4fcc61cGeeHD/fCwgsRv/nhiVHEFAl+tVWUSHHJqd0Byand5 c29ja2kubmV0AAoJEILEb/54YlRxNIwQALNj2uV+CJL4DCvcMPFqvtB7bwwsk1mS fqRk/wEhz5noE/x3uhD1DKlL3VPj1sDUVRmHKSdIgqrwSFX2zbw+cf2y6E94WdDz /7x0Khj4mT5cfGHacItNBnkglCrxVXxSdU4DcPTgINlWM8iv6W8D3uK5OpFYDtKr 5shqf45U8/+fh7hGCtNnofAZEVU+YTDzY0jHnnIxD8FKXFLaDFj6jVGjgdXgBD5s /XgsKz867SybzLuTW9O0SKDughMhmhaqXnHwtu9jvlw/3i1Wn16r2LeCWyIkoxWy MRNXg4rOerJvK084gxJW9BWmCuA6NnKBtKvXNlqHzl14ept3Cf3dYtaQ50x7eYQB osMWbBDdRjV1fo7SptMXQmn8sKxZgrjc0pSYicbiMOH3BkpIn5ed4+MPWfWN8pyb piRkx17sFwPE7jI5Rkuv+EucisG8tNvWImE9gFENxtelF1rj7njV1xMYlevrD/9u aYTNYUeRAc6DA2AF/mzXtqwXpDqoxa7X0UBl8JFmkLvcvORtR3XY6HlcYMgp820a /Sh0rZmuUlssUnrhd1Kr6QRiMIrCihTnbhTXsY0oZH4QSYYJCS89qijngAtqobEt K+eqsHHoGmVK3Ch+O+YpFo+GpH5Avk0b/DisX3Zu20hGEX4fvv4q7ZTuNSnHhgOL ERjLBUQZUFaf =zh3K -----END PGP SIGNATURE----- Merge tag 'pm-5.10-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm Pull power management fixes from Rafael Wysocki: "Make the intel_pstate driver behave as expected when it operates in the passive mode with HWP enabled and the 'powersave' governor on top of it" * tag 'pm-5.10-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: cpufreq: intel_pstate: Take CPUFREQ_GOV_STRICT_TARGET into account cpufreq: Add strict_target to struct cpufreq_policy cpufreq: Introduce CPUFREQ_GOV_STRICT_TARGET cpufreq: Introduce governor flags
This commit is contained in:
commit
fcfb67918c
|
@ -2254,7 +2254,7 @@ static int cpufreq_init_governor(struct cpufreq_policy *policy)
|
|||
return -EINVAL;
|
||||
|
||||
/* Platform doesn't want dynamic frequency switching ? */
|
||||
if (policy->governor->dynamic_switching &&
|
||||
if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
|
||||
cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
|
||||
struct cpufreq_governor *gov = cpufreq_fallback_governor();
|
||||
|
||||
|
@ -2280,6 +2280,8 @@ static int cpufreq_init_governor(struct cpufreq_policy *policy)
|
|||
}
|
||||
}
|
||||
|
||||
policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -156,7 +156,7 @@ void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy);
|
|||
#define CPUFREQ_DBS_GOVERNOR_INITIALIZER(_name_) \
|
||||
{ \
|
||||
.name = _name_, \
|
||||
.dynamic_switching = true, \
|
||||
.flags = CPUFREQ_GOV_DYNAMIC_SWITCHING, \
|
||||
.owner = THIS_MODULE, \
|
||||
.init = cpufreq_dbs_governor_init, \
|
||||
.exit = cpufreq_dbs_governor_exit, \
|
||||
|
|
|
@ -20,6 +20,7 @@ static void cpufreq_gov_performance_limits(struct cpufreq_policy *policy)
|
|||
static struct cpufreq_governor cpufreq_gov_performance = {
|
||||
.name = "performance",
|
||||
.owner = THIS_MODULE,
|
||||
.flags = CPUFREQ_GOV_STRICT_TARGET,
|
||||
.limits = cpufreq_gov_performance_limits,
|
||||
};
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ static struct cpufreq_governor cpufreq_gov_powersave = {
|
|||
.name = "powersave",
|
||||
.limits = cpufreq_gov_powersave_limits,
|
||||
.owner = THIS_MODULE,
|
||||
.flags = CPUFREQ_GOV_STRICT_TARGET,
|
||||
};
|
||||
|
||||
MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
|
||||
|
|
|
@ -2527,7 +2527,7 @@ static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, in
|
|||
}
|
||||
|
||||
static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate,
|
||||
bool fast_switch)
|
||||
bool strict, bool fast_switch)
|
||||
{
|
||||
u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
|
||||
|
||||
|
@ -2539,7 +2539,7 @@ static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate,
|
|||
* field in it, so opportunistically update the max too if needed.
|
||||
*/
|
||||
value &= ~HWP_MAX_PERF(~0L);
|
||||
value |= HWP_MAX_PERF(cpu->max_perf_ratio);
|
||||
value |= HWP_MAX_PERF(strict ? target_pstate : cpu->max_perf_ratio);
|
||||
|
||||
if (value == prev)
|
||||
return;
|
||||
|
@ -2562,14 +2562,16 @@ static void intel_cpufreq_adjust_perf_ctl(struct cpudata *cpu,
|
|||
pstate_funcs.get_val(cpu, target_pstate));
|
||||
}
|
||||
|
||||
static int intel_cpufreq_update_pstate(struct cpudata *cpu, int target_pstate,
|
||||
bool fast_switch)
|
||||
static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
|
||||
int target_pstate, bool fast_switch)
|
||||
{
|
||||
struct cpudata *cpu = all_cpu_data[policy->cpu];
|
||||
int old_pstate = cpu->pstate.current_pstate;
|
||||
|
||||
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
|
||||
if (hwp_active) {
|
||||
intel_cpufreq_adjust_hwp(cpu, target_pstate, fast_switch);
|
||||
intel_cpufreq_adjust_hwp(cpu, target_pstate,
|
||||
policy->strict_target, fast_switch);
|
||||
cpu->pstate.current_pstate = target_pstate;
|
||||
} else if (target_pstate != old_pstate) {
|
||||
intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, fast_switch);
|
||||
|
@ -2609,7 +2611,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
|
|||
break;
|
||||
}
|
||||
|
||||
target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, false);
|
||||
target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false);
|
||||
|
||||
freqs.new = target_pstate * cpu->pstate.scaling;
|
||||
|
||||
|
@ -2628,7 +2630,7 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
|
|||
|
||||
target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
|
||||
|
||||
target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, true);
|
||||
target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
|
||||
|
||||
return target_pstate * cpu->pstate.scaling;
|
||||
}
|
||||
|
|
|
@ -109,6 +109,12 @@ struct cpufreq_policy {
|
|||
bool fast_switch_possible;
|
||||
bool fast_switch_enabled;
|
||||
|
||||
/*
|
||||
* Set if the CPUFREQ_GOV_STRICT_TARGET flag is set for the current
|
||||
* governor.
|
||||
*/
|
||||
bool strict_target;
|
||||
|
||||
/*
|
||||
* Preferred average time interval between consecutive invocations of
|
||||
* the driver to set the frequency for this policy. To be set by the
|
||||
|
@ -570,12 +576,20 @@ struct cpufreq_governor {
|
|||
char *buf);
|
||||
int (*store_setspeed) (struct cpufreq_policy *policy,
|
||||
unsigned int freq);
|
||||
/* For governors which change frequency dynamically by themselves */
|
||||
bool dynamic_switching;
|
||||
struct list_head governor_list;
|
||||
struct module *owner;
|
||||
u8 flags;
|
||||
};
|
||||
|
||||
/* Governor flags */
|
||||
|
||||
/* For governors which change frequency dynamically by themselves */
|
||||
#define CPUFREQ_GOV_DYNAMIC_SWITCHING BIT(0)
|
||||
|
||||
/* For governors wanting the target frequency to be set exactly */
|
||||
#define CPUFREQ_GOV_STRICT_TARGET BIT(1)
|
||||
|
||||
|
||||
/* Pass a target to the cpufreq driver */
|
||||
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq);
|
||||
|
|
|
@ -881,7 +881,7 @@ static void sugov_limits(struct cpufreq_policy *policy)
|
|||
struct cpufreq_governor schedutil_gov = {
|
||||
.name = "schedutil",
|
||||
.owner = THIS_MODULE,
|
||||
.dynamic_switching = true,
|
||||
.flags = CPUFREQ_GOV_DYNAMIC_SWITCHING,
|
||||
.init = sugov_init,
|
||||
.exit = sugov_exit,
|
||||
.start = sugov_start,
|
||||
|
|
Loading…
Reference in New Issue
Block a user