forked from luck/tmp_suning_uos_patched
Merge branch 'pm-cpufreq'
* pm-cpufreq: (24 commits) cpufreq: Fix kobject memleak cpufreq: armada-37xx: fix frequency calculation for opp cpufreq: centrino: Fix centrino_setpolicy() kerneldoc comment cpufreq: qoriq: add support for lx2160a cpufreq: qoriq: Add ls1028a chip support cpufreq: Move ->get callback check outside of __cpufreq_get() cpufreq: Remove needless bios_limit check in show_bios_limit() drivers/cpufreq/acpi-cpufreq.c: This fixes the following checkpatch warning cpufreq: boost: Remove CONFIG_CPU_FREQ_BOOST_SW Kconfig option cpufreq: stats: Use lock by stat to replace global spin lock cpufreq: Remove cpufreq_driver check in cpufreq_boost_supported() cpufreq: maple: Remove redundant code from maple_cpufreq_init() cpufreq: ppc_cbe: fix possible object reference leak cpufreq: pmac32: fix possible object reference leak cpufreq/pasemi: fix possible object reference leak cpufreq: maple: fix possible object reference leak cpufreq: kirkwood: fix possible object reference leak cpufreq: imx6q: fix possible object reference leak cpufreq: ap806: fix possible object reference leak drivers/cpufreq: Convert some slow-path static_cpu_has() callers to boot_cpu_has() ...
This commit is contained in:
commit
7d4a27c1c8
|
@ -181,7 +181,7 @@ void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
|
|||
acpi_processor_ppc_ost(pr->handle, 0);
|
||||
}
|
||||
if (ret >= 0)
|
||||
cpufreq_update_policy(pr->id);
|
||||
cpufreq_update_limits(pr->id);
|
||||
}
|
||||
|
||||
int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
|
||||
|
|
|
@ -26,10 +26,6 @@ config CPU_FREQ_GOV_COMMON
|
|||
select IRQ_WORK
|
||||
bool
|
||||
|
||||
config CPU_FREQ_BOOST_SW
|
||||
bool
|
||||
depends on THERMAL
|
||||
|
||||
config CPU_FREQ_STAT
|
||||
bool "CPU frequency transition statistics"
|
||||
help
|
||||
|
|
|
@ -366,7 +366,7 @@ static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *dat
|
|||
|
||||
val = drv_read(data, mask);
|
||||
|
||||
pr_debug("get_cur_val = %u\n", val);
|
||||
pr_debug("%s = %u\n", __func__, val);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
@ -378,7 +378,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
|
|||
unsigned int freq;
|
||||
unsigned int cached_freq;
|
||||
|
||||
pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
|
||||
pr_debug("%s (%d)\n", __func__, cpu);
|
||||
|
||||
policy = cpufreq_cpu_get_raw(cpu);
|
||||
if (unlikely(!policy))
|
||||
|
@ -458,8 +458,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
|
|||
if (acpi_pstate_strict) {
|
||||
if (!check_freqs(policy, mask,
|
||||
policy->freq_table[index].frequency)) {
|
||||
pr_debug("acpi_cpufreq_target failed (%d)\n",
|
||||
policy->cpu);
|
||||
pr_debug("%s (%d)\n", __func__, policy->cpu);
|
||||
result = -EAGAIN;
|
||||
}
|
||||
}
|
||||
|
@ -573,7 +572,7 @@ static int cpufreq_boost_down_prep(unsigned int cpu)
|
|||
static int __init acpi_cpufreq_early_init(void)
|
||||
{
|
||||
unsigned int i;
|
||||
pr_debug("acpi_cpufreq_early_init\n");
|
||||
pr_debug("%s\n", __func__);
|
||||
|
||||
acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
|
||||
if (!acpi_perf_data) {
|
||||
|
@ -657,7 +656,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
static int blacklisted;
|
||||
#endif
|
||||
|
||||
pr_debug("acpi_cpufreq_cpu_init\n");
|
||||
pr_debug("%s\n", __func__);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (blacklisted)
|
||||
|
@ -856,7 +855,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
|||
{
|
||||
struct acpi_cpufreq_data *data = policy->driver_data;
|
||||
|
||||
pr_debug("acpi_cpufreq_cpu_exit\n");
|
||||
pr_debug("%s\n", __func__);
|
||||
|
||||
policy->fast_switch_possible = false;
|
||||
policy->driver_data = NULL;
|
||||
|
@ -881,7 +880,7 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
|
|||
{
|
||||
struct acpi_cpufreq_data *data = policy->driver_data;
|
||||
|
||||
pr_debug("acpi_cpufreq_resume\n");
|
||||
pr_debug("%s\n", __func__);
|
||||
|
||||
data->resume = 1;
|
||||
|
||||
|
@ -954,7 +953,7 @@ static int __init acpi_cpufreq_init(void)
|
|||
if (cpufreq_get_current_driver())
|
||||
return -EEXIST;
|
||||
|
||||
pr_debug("acpi_cpufreq_init\n");
|
||||
pr_debug("%s\n", __func__);
|
||||
|
||||
ret = acpi_cpufreq_early_init();
|
||||
if (ret)
|
||||
|
@ -991,7 +990,7 @@ static int __init acpi_cpufreq_init(void)
|
|||
|
||||
static void __exit acpi_cpufreq_exit(void)
|
||||
{
|
||||
pr_debug("acpi_cpufreq_exit\n");
|
||||
pr_debug("%s\n", __func__);
|
||||
|
||||
acpi_cpufreq_boost_exit();
|
||||
|
||||
|
|
|
@ -124,7 +124,7 @@ static int __init amd_freq_sensitivity_init(void)
|
|||
PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL);
|
||||
|
||||
if (!pcidev) {
|
||||
if (!static_cpu_has(X86_FEATURE_PROC_FEEDBACK))
|
||||
if (!boot_cpu_has(X86_FEATURE_PROC_FEEDBACK))
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
|
|
@ -359,11 +359,11 @@ static int __init armada37xx_cpufreq_driver_init(void)
|
|||
struct armada_37xx_dvfs *dvfs;
|
||||
struct platform_device *pdev;
|
||||
unsigned long freq;
|
||||
unsigned int cur_frequency;
|
||||
unsigned int cur_frequency, base_frequency;
|
||||
struct regmap *nb_pm_base, *avs_base;
|
||||
struct device *cpu_dev;
|
||||
int load_lvl, ret;
|
||||
struct clk *clk;
|
||||
struct clk *clk, *parent;
|
||||
|
||||
nb_pm_base =
|
||||
syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm");
|
||||
|
@ -399,6 +399,22 @@ static int __init armada37xx_cpufreq_driver_init(void)
|
|||
return PTR_ERR(clk);
|
||||
}
|
||||
|
||||
parent = clk_get_parent(clk);
|
||||
if (IS_ERR(parent)) {
|
||||
dev_err(cpu_dev, "Cannot get parent clock for CPU0\n");
|
||||
clk_put(clk);
|
||||
return PTR_ERR(parent);
|
||||
}
|
||||
|
||||
/* Get parent CPU frequency */
|
||||
base_frequency = clk_get_rate(parent);
|
||||
|
||||
if (!base_frequency) {
|
||||
dev_err(cpu_dev, "Failed to get parent clock rate for CPU\n");
|
||||
clk_put(clk);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Get nominal (current) CPU frequency */
|
||||
cur_frequency = clk_get_rate(clk);
|
||||
if (!cur_frequency) {
|
||||
|
@ -431,7 +447,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
|
|||
for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
|
||||
load_lvl++) {
|
||||
unsigned long u_volt = avs_map[dvfs->avs[load_lvl]] * 1000;
|
||||
freq = cur_frequency / dvfs->divider[load_lvl];
|
||||
freq = base_frequency / dvfs->divider[load_lvl];
|
||||
ret = dev_pm_opp_add(cpu_dev, freq, u_volt);
|
||||
if (ret)
|
||||
goto remove_opp;
|
||||
|
|
|
@ -132,6 +132,7 @@ static int __init armada_8k_cpufreq_init(void)
|
|||
of_node_put(node);
|
||||
return -ENODEV;
|
||||
}
|
||||
of_node_put(node);
|
||||
|
||||
nb_cpus = num_possible_cpus();
|
||||
freq_tables = kcalloc(nb_cpus, sizeof(*freq_tables), GFP_KERNEL);
|
||||
|
|
|
@ -34,11 +34,6 @@
|
|||
|
||||
static LIST_HEAD(cpufreq_policy_list);
|
||||
|
||||
static inline bool policy_is_inactive(struct cpufreq_policy *policy)
|
||||
{
|
||||
return cpumask_empty(policy->cpus);
|
||||
}
|
||||
|
||||
/* Macros to iterate over CPU policies */
|
||||
#define for_each_suitable_policy(__policy, __active) \
|
||||
list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
|
||||
|
@ -250,6 +245,51 @@ void cpufreq_cpu_put(struct cpufreq_policy *policy)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
|
||||
|
||||
/**
|
||||
* cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
|
||||
* @policy: cpufreq policy returned by cpufreq_cpu_acquire().
|
||||
*/
|
||||
void cpufreq_cpu_release(struct cpufreq_policy *policy)
|
||||
{
|
||||
if (WARN_ON(!policy))
|
||||
return;
|
||||
|
||||
lockdep_assert_held(&policy->rwsem);
|
||||
|
||||
up_write(&policy->rwsem);
|
||||
|
||||
cpufreq_cpu_put(policy);
|
||||
}
|
||||
|
||||
/**
|
||||
* cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
|
||||
* @cpu: CPU to find the policy for.
|
||||
*
|
||||
* Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
|
||||
* if the policy returned by it is not NULL, acquire its rwsem for writing.
|
||||
* Return the policy if it is active or release it and return NULL otherwise.
|
||||
*
|
||||
* The policy returned by this function has to be released with the help of
|
||||
* cpufreq_cpu_release() in order to release its rwsem and balance its usage
|
||||
* counter properly.
|
||||
*/
|
||||
struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
|
||||
{
|
||||
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
|
||||
|
||||
if (!policy)
|
||||
return NULL;
|
||||
|
||||
down_write(&policy->rwsem);
|
||||
|
||||
if (policy_is_inactive(policy)) {
|
||||
cpufreq_cpu_release(policy);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return policy;
|
||||
}
|
||||
|
||||
/*********************************************************************
|
||||
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
|
||||
*********************************************************************/
|
||||
|
@ -669,9 +709,6 @@ static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int cpufreq_set_policy(struct cpufreq_policy *policy,
|
||||
struct cpufreq_policy *new_policy);
|
||||
|
||||
/**
|
||||
* cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
|
||||
*/
|
||||
|
@ -857,11 +894,9 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
|
|||
{
|
||||
unsigned int limit;
|
||||
int ret;
|
||||
if (cpufreq_driver->bios_limit) {
|
||||
ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
|
||||
if (!ret)
|
||||
return sprintf(buf, "%u\n", limit);
|
||||
}
|
||||
ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
|
||||
if (!ret)
|
||||
return sprintf(buf, "%u\n", limit);
|
||||
return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
|
||||
}
|
||||
|
||||
|
@ -1098,6 +1133,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
|
|||
cpufreq_global_kobject, "policy%u", cpu);
|
||||
if (ret) {
|
||||
pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
|
||||
kobject_put(&policy->kobj);
|
||||
goto err_free_real_cpus;
|
||||
}
|
||||
|
||||
|
@ -1550,7 +1586,7 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
|
|||
{
|
||||
unsigned int ret_freq = 0;
|
||||
|
||||
if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get)
|
||||
if (unlikely(policy_is_inactive(policy)))
|
||||
return ret_freq;
|
||||
|
||||
ret_freq = cpufreq_driver->get(policy->cpu);
|
||||
|
@ -1588,7 +1624,8 @@ unsigned int cpufreq_get(unsigned int cpu)
|
|||
|
||||
if (policy) {
|
||||
down_read(&policy->rwsem);
|
||||
ret_freq = __cpufreq_get(policy);
|
||||
if (cpufreq_driver->get)
|
||||
ret_freq = __cpufreq_get(policy);
|
||||
up_read(&policy->rwsem);
|
||||
|
||||
cpufreq_cpu_put(policy);
|
||||
|
@ -2229,8 +2266,8 @@ EXPORT_SYMBOL(cpufreq_get_policy);
|
|||
*
|
||||
* The cpuinfo part of @policy is not updated by this function.
|
||||
*/
|
||||
static int cpufreq_set_policy(struct cpufreq_policy *policy,
|
||||
struct cpufreq_policy *new_policy)
|
||||
int cpufreq_set_policy(struct cpufreq_policy *policy,
|
||||
struct cpufreq_policy *new_policy)
|
||||
{
|
||||
struct cpufreq_governor *old_gov;
|
||||
int ret;
|
||||
|
@ -2337,17 +2374,12 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
|
|||
*/
|
||||
void cpufreq_update_policy(unsigned int cpu)
|
||||
{
|
||||
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
|
||||
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
|
||||
struct cpufreq_policy new_policy;
|
||||
|
||||
if (!policy)
|
||||
return;
|
||||
|
||||
down_write(&policy->rwsem);
|
||||
|
||||
if (policy_is_inactive(policy))
|
||||
goto unlock;
|
||||
|
||||
/*
|
||||
* BIOS might change freq behind our back
|
||||
* -> ask driver for current freq and notify governors about a change
|
||||
|
@ -2364,12 +2396,26 @@ void cpufreq_update_policy(unsigned int cpu)
|
|||
cpufreq_set_policy(policy, &new_policy);
|
||||
|
||||
unlock:
|
||||
up_write(&policy->rwsem);
|
||||
|
||||
cpufreq_cpu_put(policy);
|
||||
cpufreq_cpu_release(policy);
|
||||
}
|
||||
EXPORT_SYMBOL(cpufreq_update_policy);
|
||||
|
||||
/**
|
||||
* cpufreq_update_limits - Update policy limits for a given CPU.
|
||||
* @cpu: CPU to update the policy limits for.
|
||||
*
|
||||
* Invoke the driver's ->update_limits callback if present or call
|
||||
* cpufreq_update_policy() for @cpu.
|
||||
*/
|
||||
void cpufreq_update_limits(unsigned int cpu)
|
||||
{
|
||||
if (cpufreq_driver->update_limits)
|
||||
cpufreq_driver->update_limits(cpu);
|
||||
else
|
||||
cpufreq_update_policy(cpu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_update_limits);
|
||||
|
||||
/*********************************************************************
|
||||
* BOOST *
|
||||
*********************************************************************/
|
||||
|
@ -2426,7 +2472,7 @@ int cpufreq_boost_trigger_state(int state)
|
|||
|
||||
static bool cpufreq_boost_supported(void)
|
||||
{
|
||||
return likely(cpufreq_driver) && cpufreq_driver->set_boost;
|
||||
return cpufreq_driver->set_boost;
|
||||
}
|
||||
|
||||
static int create_boost_sysfs_file(void)
|
||||
|
|
|
@ -459,6 +459,8 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
|
|||
/* Failure, so roll back. */
|
||||
pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
|
||||
|
||||
kobject_put(&dbs_data->attr_set.kobj);
|
||||
|
||||
policy->governor_data = NULL;
|
||||
|
||||
if (!have_governor_per_policy())
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
static DEFINE_SPINLOCK(cpufreq_stats_lock);
|
||||
|
||||
struct cpufreq_stats {
|
||||
unsigned int total_trans;
|
||||
|
@ -23,6 +22,7 @@ struct cpufreq_stats {
|
|||
unsigned int state_num;
|
||||
unsigned int last_index;
|
||||
u64 *time_in_state;
|
||||
spinlock_t lock;
|
||||
unsigned int *freq_table;
|
||||
unsigned int *trans_table;
|
||||
};
|
||||
|
@ -39,12 +39,12 @@ static void cpufreq_stats_clear_table(struct cpufreq_stats *stats)
|
|||
{
|
||||
unsigned int count = stats->max_state;
|
||||
|
||||
spin_lock(&cpufreq_stats_lock);
|
||||
spin_lock(&stats->lock);
|
||||
memset(stats->time_in_state, 0, count * sizeof(u64));
|
||||
memset(stats->trans_table, 0, count * count * sizeof(int));
|
||||
stats->last_time = get_jiffies_64();
|
||||
stats->total_trans = 0;
|
||||
spin_unlock(&cpufreq_stats_lock);
|
||||
spin_unlock(&stats->lock);
|
||||
}
|
||||
|
||||
static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
|
||||
|
@ -62,9 +62,9 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
|
|||
if (policy->fast_switch_enabled)
|
||||
return 0;
|
||||
|
||||
spin_lock(&cpufreq_stats_lock);
|
||||
spin_lock(&stats->lock);
|
||||
cpufreq_stats_update(stats);
|
||||
spin_unlock(&cpufreq_stats_lock);
|
||||
spin_unlock(&stats->lock);
|
||||
|
||||
for (i = 0; i < stats->state_num; i++) {
|
||||
len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
|
||||
|
@ -211,6 +211,7 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy)
|
|||
stats->state_num = i;
|
||||
stats->last_time = get_jiffies_64();
|
||||
stats->last_index = freq_table_get_index(stats, policy->cur);
|
||||
spin_lock_init(&stats->lock);
|
||||
|
||||
policy->stats = stats;
|
||||
ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
|
||||
|
@ -242,11 +243,11 @@ void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
|
|||
if (old_index == -1 || new_index == -1 || old_index == new_index)
|
||||
return;
|
||||
|
||||
spin_lock(&cpufreq_stats_lock);
|
||||
spin_lock(&stats->lock);
|
||||
cpufreq_stats_update(stats);
|
||||
|
||||
stats->last_index = new_index;
|
||||
stats->trans_table[old_index * stats->max_state + new_index]++;
|
||||
stats->total_trans++;
|
||||
spin_unlock(&cpufreq_stats_lock);
|
||||
spin_unlock(&stats->lock);
|
||||
}
|
||||
|
|
|
@ -290,9 +290,6 @@ EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_boost_freqs);
|
|||
|
||||
struct freq_attr *cpufreq_generic_attr[] = {
|
||||
&cpufreq_freq_attr_scaling_available_freqs,
|
||||
#ifdef CONFIG_CPU_FREQ_BOOST_SW
|
||||
&cpufreq_freq_attr_scaling_boost_freqs,
|
||||
#endif
|
||||
NULL,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
|
||||
|
|
|
@ -388,11 +388,11 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
|
|||
ret = imx6ul_opp_check_speed_grading(cpu_dev);
|
||||
if (ret) {
|
||||
if (ret == -EPROBE_DEFER)
|
||||
return ret;
|
||||
goto put_node;
|
||||
|
||||
dev_err(cpu_dev, "failed to read ocotp: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
goto put_node;
|
||||
}
|
||||
} else {
|
||||
imx6q_opp_check_speed_grading(cpu_dev);
|
||||
|
|
|
@ -179,6 +179,7 @@ struct vid_data {
|
|||
* based on the MSR_IA32_MISC_ENABLE value and whether or
|
||||
* not the maximum reported turbo P-state is different from
|
||||
* the maximum reported non-turbo one.
|
||||
* @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq.
|
||||
* @min_perf_pct: Minimum capacity limit in percent of the maximum turbo
|
||||
* P-state capacity.
|
||||
* @max_perf_pct: Maximum capacity limit in percent of the maximum turbo
|
||||
|
@ -187,6 +188,7 @@ struct vid_data {
|
|||
struct global_params {
|
||||
bool no_turbo;
|
||||
bool turbo_disabled;
|
||||
bool turbo_disabled_mf;
|
||||
int max_perf_pct;
|
||||
int min_perf_pct;
|
||||
};
|
||||
|
@ -525,7 +527,7 @@ static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
|
|||
u64 epb;
|
||||
int ret;
|
||||
|
||||
if (!static_cpu_has(X86_FEATURE_EPB))
|
||||
if (!boot_cpu_has(X86_FEATURE_EPB))
|
||||
return -ENXIO;
|
||||
|
||||
ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
|
||||
|
@ -539,7 +541,7 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
|
|||
{
|
||||
s16 epp;
|
||||
|
||||
if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
|
||||
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
|
||||
/*
|
||||
* When hwp_req_data is 0, means that caller didn't read
|
||||
* MSR_HWP_REQUEST, so need to read and get EPP.
|
||||
|
@ -564,7 +566,7 @@ static int intel_pstate_set_epb(int cpu, s16 pref)
|
|||
u64 epb;
|
||||
int ret;
|
||||
|
||||
if (!static_cpu_has(X86_FEATURE_EPB))
|
||||
if (!boot_cpu_has(X86_FEATURE_EPB))
|
||||
return -ENXIO;
|
||||
|
||||
ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
|
||||
|
@ -612,7 +614,7 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
|
|||
if (epp < 0)
|
||||
return epp;
|
||||
|
||||
if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
|
||||
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
|
||||
if (epp == HWP_EPP_PERFORMANCE)
|
||||
return 1;
|
||||
if (epp <= HWP_EPP_BALANCE_PERFORMANCE)
|
||||
|
@ -621,7 +623,7 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
|
|||
return 3;
|
||||
else
|
||||
return 4;
|
||||
} else if (static_cpu_has(X86_FEATURE_EPB)) {
|
||||
} else if (boot_cpu_has(X86_FEATURE_EPB)) {
|
||||
/*
|
||||
* Range:
|
||||
* 0x00-0x03 : Performance
|
||||
|
@ -649,7 +651,7 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
|
|||
|
||||
mutex_lock(&intel_pstate_limits_lock);
|
||||
|
||||
if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
|
||||
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
|
||||
u64 value;
|
||||
|
||||
ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value);
|
||||
|
@ -824,7 +826,7 @@ static void intel_pstate_hwp_set(unsigned int cpu)
|
|||
epp = cpu_data->epp_powersave;
|
||||
}
|
||||
update_epp:
|
||||
if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
|
||||
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
|
||||
value &= ~GENMASK_ULL(31, 24);
|
||||
value |= (u64)epp << 24;
|
||||
} else {
|
||||
|
@ -849,7 +851,7 @@ static void intel_pstate_hwp_force_min_perf(int cpu)
|
|||
value |= HWP_MIN_PERF(min_perf);
|
||||
|
||||
/* Set EPP/EPB to min */
|
||||
if (static_cpu_has(X86_FEATURE_HWP_EPP))
|
||||
if (boot_cpu_has(X86_FEATURE_HWP_EPP))
|
||||
value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
|
||||
else
|
||||
intel_pstate_set_epb(cpu, HWP_EPP_BALANCE_POWERSAVE);
|
||||
|
@ -897,6 +899,48 @@ static void intel_pstate_update_policies(void)
|
|||
cpufreq_update_policy(cpu);
|
||||
}
|
||||
|
||||
static void intel_pstate_update_max_freq(unsigned int cpu)
|
||||
{
|
||||
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
|
||||
struct cpufreq_policy new_policy;
|
||||
struct cpudata *cpudata;
|
||||
|
||||
if (!policy)
|
||||
return;
|
||||
|
||||
cpudata = all_cpu_data[cpu];
|
||||
policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
|
||||
cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
|
||||
|
||||
memcpy(&new_policy, policy, sizeof(*policy));
|
||||
new_policy.max = min(policy->user_policy.max, policy->cpuinfo.max_freq);
|
||||
new_policy.min = min(policy->user_policy.min, new_policy.max);
|
||||
|
||||
cpufreq_set_policy(policy, &new_policy);
|
||||
|
||||
cpufreq_cpu_release(policy);
|
||||
}
|
||||
|
||||
static void intel_pstate_update_limits(unsigned int cpu)
|
||||
{
|
||||
mutex_lock(&intel_pstate_driver_lock);
|
||||
|
||||
update_turbo_state();
|
||||
/*
|
||||
* If turbo has been turned on or off globally, policy limits for
|
||||
* all CPUs need to be updated to reflect that.
|
||||
*/
|
||||
if (global.turbo_disabled_mf != global.turbo_disabled) {
|
||||
global.turbo_disabled_mf = global.turbo_disabled;
|
||||
for_each_possible_cpu(cpu)
|
||||
intel_pstate_update_max_freq(cpu);
|
||||
} else {
|
||||
cpufreq_update_policy(cpu);
|
||||
}
|
||||
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
}
|
||||
|
||||
/************************** sysfs begin ************************/
|
||||
#define show_one(file_name, object) \
|
||||
static ssize_t show_##file_name \
|
||||
|
@ -1197,7 +1241,7 @@ static void __init intel_pstate_sysfs_expose_params(void)
|
|||
static void intel_pstate_hwp_enable(struct cpudata *cpudata)
|
||||
{
|
||||
/* First disable HWP notification interrupt as we don't process them */
|
||||
if (static_cpu_has(X86_FEATURE_HWP_NOTIFY))
|
||||
if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
|
||||
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
|
||||
|
||||
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
|
||||
|
@ -2138,6 +2182,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
|
|||
/* cpuinfo and default policy values */
|
||||
policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
|
||||
update_turbo_state();
|
||||
global.turbo_disabled_mf = global.turbo_disabled;
|
||||
policy->cpuinfo.max_freq = global.turbo_disabled ?
|
||||
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
|
||||
policy->cpuinfo.max_freq *= cpu->pstate.scaling;
|
||||
|
@ -2182,6 +2227,7 @@ static struct cpufreq_driver intel_pstate = {
|
|||
.init = intel_pstate_cpu_init,
|
||||
.exit = intel_pstate_cpu_exit,
|
||||
.stop_cpu = intel_pstate_stop_cpu,
|
||||
.update_limits = intel_pstate_update_limits,
|
||||
.name = "intel_pstate",
|
||||
};
|
||||
|
||||
|
@ -2316,6 +2362,7 @@ static struct cpufreq_driver intel_cpufreq = {
|
|||
.init = intel_cpufreq_cpu_init,
|
||||
.exit = intel_pstate_cpu_exit,
|
||||
.stop_cpu = intel_cpufreq_stop_cpu,
|
||||
.update_limits = intel_pstate_update_limits,
|
||||
.name = "intel_cpufreq",
|
||||
};
|
||||
|
||||
|
|
|
@ -124,13 +124,14 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
|
|||
priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk");
|
||||
if (IS_ERR(priv.cpu_clk)) {
|
||||
dev_err(priv.dev, "Unable to get cpuclk\n");
|
||||
return PTR_ERR(priv.cpu_clk);
|
||||
err = PTR_ERR(priv.cpu_clk);
|
||||
goto out_node;
|
||||
}
|
||||
|
||||
err = clk_prepare_enable(priv.cpu_clk);
|
||||
if (err) {
|
||||
dev_err(priv.dev, "Unable to prepare cpuclk\n");
|
||||
return err;
|
||||
goto out_node;
|
||||
}
|
||||
|
||||
kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
|
||||
|
@ -161,20 +162,22 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
|
|||
goto out_ddr;
|
||||
}
|
||||
|
||||
of_node_put(np);
|
||||
np = NULL;
|
||||
|
||||
err = cpufreq_register_driver(&kirkwood_cpufreq_driver);
|
||||
if (!err)
|
||||
return 0;
|
||||
if (err) {
|
||||
dev_err(priv.dev, "Failed to register cpufreq driver\n");
|
||||
goto out_powersave;
|
||||
}
|
||||
|
||||
dev_err(priv.dev, "Failed to register cpufreq driver\n");
|
||||
of_node_put(np);
|
||||
return 0;
|
||||
|
||||
out_powersave:
|
||||
clk_disable_unprepare(priv.powersave_clk);
|
||||
out_ddr:
|
||||
clk_disable_unprepare(priv.ddr_clk);
|
||||
out_cpu:
|
||||
clk_disable_unprepare(priv.cpu_clk);
|
||||
out_node:
|
||||
of_node_put(np);
|
||||
|
||||
return err;
|
||||
|
|
|
@ -210,7 +210,7 @@ static int __init maple_cpufreq_init(void)
|
|||
*/
|
||||
valp = of_get_property(cpunode, "clock-frequency", NULL);
|
||||
if (!valp)
|
||||
return -ENODEV;
|
||||
goto bail_noprops;
|
||||
max_freq = (*valp)/1000;
|
||||
maple_cpu_freqs[0].frequency = max_freq;
|
||||
maple_cpu_freqs[1].frequency = max_freq/2;
|
||||
|
@ -231,10 +231,6 @@ static int __init maple_cpufreq_init(void)
|
|||
|
||||
rc = cpufreq_register_driver(&maple_cpufreq_driver);
|
||||
|
||||
of_node_put(cpunode);
|
||||
|
||||
return rc;
|
||||
|
||||
bail_noprops:
|
||||
of_node_put(cpunode);
|
||||
|
||||
|
|
|
@ -146,6 +146,7 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
|
||||
cpu = of_get_cpu_node(policy->cpu, NULL);
|
||||
|
||||
of_node_put(cpu);
|
||||
if (!cpu)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -552,6 +552,7 @@ static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
|
|||
volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
|
||||
if (volt_gpio_np)
|
||||
voltage_gpio = read_gpio(volt_gpio_np);
|
||||
of_node_put(volt_gpio_np);
|
||||
if (!voltage_gpio){
|
||||
pr_err("missing cpu-vcore-select gpio\n");
|
||||
return 1;
|
||||
|
@ -588,6 +589,7 @@ static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
|
|||
if (volt_gpio_np)
|
||||
voltage_gpio = read_gpio(volt_gpio_np);
|
||||
|
||||
of_node_put(volt_gpio_np);
|
||||
pvr = mfspr(SPRN_PVR);
|
||||
has_cpu_l2lve = !((pvr & 0xf00) == 0x100);
|
||||
|
||||
|
|
|
@ -1178,7 +1178,7 @@ static int powernowk8_init(void)
|
|||
unsigned int i, supported_cpus = 0;
|
||||
int ret;
|
||||
|
||||
if (static_cpu_has(X86_FEATURE_HW_PSTATE)) {
|
||||
if (boot_cpu_has(X86_FEATURE_HW_PSTATE)) {
|
||||
__request_acpi_cpufreq();
|
||||
return -ENODEV;
|
||||
}
|
||||
|
|
|
@ -86,6 +86,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
|
||||
!cbe_get_cpu_mic_tm_regs(policy->cpu)) {
|
||||
pr_info("invalid CBE regs pointers for cpufreq\n");
|
||||
of_node_put(cpu);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -280,10 +280,12 @@ static const struct of_device_id node_matches[] __initconst = {
|
|||
|
||||
{ .compatible = "fsl,ls1012a-clockgen", },
|
||||
{ .compatible = "fsl,ls1021a-clockgen", },
|
||||
{ .compatible = "fsl,ls1028a-clockgen", },
|
||||
{ .compatible = "fsl,ls1043a-clockgen", },
|
||||
{ .compatible = "fsl,ls1046a-clockgen", },
|
||||
{ .compatible = "fsl,ls1088a-clockgen", },
|
||||
{ .compatible = "fsl,ls2080a-clockgen", },
|
||||
{ .compatible = "fsl,lx2160a-clockgen", },
|
||||
{ .compatible = "fsl,p4080-clockgen", },
|
||||
{ .compatible = "fsl,qoriq-clockgen-1.0", },
|
||||
{ .compatible = "fsl,qoriq-clockgen-2.0", },
|
||||
|
|
|
@ -412,7 +412,7 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
|
|||
}
|
||||
|
||||
/**
|
||||
* centrino_setpolicy - set a new CPUFreq policy
|
||||
* centrino_target - set a new CPUFreq policy
|
||||
* @policy: new policy
|
||||
* @index: index of target frequency
|
||||
*
|
||||
|
|
|
@ -178,6 +178,11 @@ static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
|
|||
static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
|
||||
#endif
|
||||
|
||||
static inline bool policy_is_inactive(struct cpufreq_policy *policy)
|
||||
{
|
||||
return cpumask_empty(policy->cpus);
|
||||
}
|
||||
|
||||
static inline bool policy_is_shared(struct cpufreq_policy *policy)
|
||||
{
|
||||
return cpumask_weight(policy->cpus) > 1;
|
||||
|
@ -193,8 +198,14 @@ unsigned int cpufreq_quick_get_max(unsigned int cpu);
|
|||
void disable_cpufreq(void);
|
||||
|
||||
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
|
||||
|
||||
struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu);
|
||||
void cpufreq_cpu_release(struct cpufreq_policy *policy);
|
||||
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
|
||||
int cpufreq_set_policy(struct cpufreq_policy *policy,
|
||||
struct cpufreq_policy *new_policy);
|
||||
void cpufreq_update_policy(unsigned int cpu);
|
||||
void cpufreq_update_limits(unsigned int cpu);
|
||||
bool have_governor_per_policy(void);
|
||||
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
|
||||
void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
|
||||
|
@ -322,6 +333,9 @@ struct cpufreq_driver {
|
|||
/* should be defined, if possible */
|
||||
unsigned int (*get)(unsigned int cpu);
|
||||
|
||||
/* Called to update policy limits on firmware notifications. */
|
||||
void (*update_limits)(unsigned int cpu);
|
||||
|
||||
/* optional */
|
||||
int (*bios_limit)(int cpu, unsigned int *limit);
|
||||
|
||||
|
|
|
@ -13,6 +13,8 @@
|
|||
#include <linux/sched/cpufreq.h>
|
||||
#include <trace/events/power.h>
|
||||
|
||||
#define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8)
|
||||
|
||||
struct sugov_tunables {
|
||||
struct gov_attr_set attr_set;
|
||||
unsigned int rate_limit_us;
|
||||
|
@ -51,7 +53,6 @@ struct sugov_cpu {
|
|||
u64 last_update;
|
||||
|
||||
unsigned long bw_dl;
|
||||
unsigned long min;
|
||||
unsigned long max;
|
||||
|
||||
/* The field below is for single-CPU policies only: */
|
||||
|
@ -291,8 +292,8 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
|
|||
*
|
||||
* The IO wait boost of a task is disabled after a tick since the last update
|
||||
* of a CPU. If a new IO wait boost is requested after more then a tick, then
|
||||
* we enable the boost starting from the minimum frequency, which improves
|
||||
* energy efficiency by ignoring sporadic wakeups from IO.
|
||||
* we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy
|
||||
* efficiency by ignoring sporadic wakeups from IO.
|
||||
*/
|
||||
static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
|
||||
bool set_iowait_boost)
|
||||
|
@ -303,7 +304,7 @@ static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
|
|||
if (delta_ns <= TICK_NSEC)
|
||||
return false;
|
||||
|
||||
sg_cpu->iowait_boost = set_iowait_boost ? sg_cpu->min : 0;
|
||||
sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0;
|
||||
sg_cpu->iowait_boost_pending = set_iowait_boost;
|
||||
|
||||
return true;
|
||||
|
@ -317,8 +318,9 @@ static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
|
|||
*
|
||||
* Each time a task wakes up after an IO operation, the CPU utilization can be
|
||||
* boosted to a certain utilization which doubles at each "frequent and
|
||||
* successive" wakeup from IO, ranging from the utilization of the minimum
|
||||
* OPP to the utilization of the maximum OPP.
|
||||
* successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization
|
||||
* of the maximum OPP.
|
||||
*
|
||||
* To keep doubling, an IO boost has to be requested at least once per tick,
|
||||
* otherwise we restart from the utilization of the minimum OPP.
|
||||
*/
|
||||
|
@ -349,7 +351,7 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
|
|||
}
|
||||
|
||||
/* First wakeup after IO: start with minimum boost */
|
||||
sg_cpu->iowait_boost = sg_cpu->min;
|
||||
sg_cpu->iowait_boost = IOWAIT_BOOST_MIN;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -389,7 +391,7 @@ static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
|
|||
* No boost pending; reduce the boost value.
|
||||
*/
|
||||
sg_cpu->iowait_boost >>= 1;
|
||||
if (sg_cpu->iowait_boost < sg_cpu->min) {
|
||||
if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
|
||||
sg_cpu->iowait_boost = 0;
|
||||
return util;
|
||||
}
|
||||
|
@ -826,9 +828,6 @@ static int sugov_start(struct cpufreq_policy *policy)
|
|||
memset(sg_cpu, 0, sizeof(*sg_cpu));
|
||||
sg_cpu->cpu = cpu;
|
||||
sg_cpu->sg_policy = sg_policy;
|
||||
sg_cpu->min =
|
||||
(SCHED_CAPACITY_SCALE * policy->cpuinfo.min_freq) /
|
||||
policy->cpuinfo.max_freq;
|
||||
}
|
||||
|
||||
for_each_cpu(cpu, policy->cpus) {
|
||||
|
|
Loading…
Reference in New Issue
Block a user