kernel_optimize_test/drivers/thermal/cpu_cooling.c
Quentin Perret a4e893e802 thermal: cpu_cooling: Migrate to using the EM framework
The newly introduced Energy Model framework manages power cost tables in
a generic way. Moreover, it supports several types of models since the
tables can come from DT or firmware (through SCMI) for example. On the
other hand, the cpu_cooling subsystem manages its own power cost tables
using only DT data.

In order to avoid the duplication of data in the kernel, and in order to
enable IPA with EMs coming from more than just DT, remove the private
tables from cpu_cooling.c and migrate it to using the centralized EM
framework. Doing so should have no visible functional impact for
existing users of IPA since:

 - recent extenstions to the the PM_OPP infrastructure enable the
   registration of EMs in PM_EM using the DT property used by IPA;

 - the existing upstream cpufreq drivers marked with the
   'CPUFREQ_IS_COOLING_DEV' flag all use the aforementioned PM_OPP
   infrastructure, which means they all support PM_EM. The only two
   exceptions are qoriq-cpufreq which doesn't in fact use an EM and
   scmi-cpufreq which doesn't use DT for power costs.

For existing users of cpu_cooling, PM_EM tables will contain the exact
same power values that IPA used to compute on its own until now. The
only new dependency for them is to compile in CONFIG_ENERGY_MODEL.

The case where the thermal subsystem is used without an Energy Model
(cpufreq_cooling_ops) is handled by looking directly at CPUFreq's
frequency table which is already a dependency for cpu_cooling.c anyway.
Since the thermal framework expects the cooling states in a particular
order, bail out whenever the CPUFreq table is unsorted, since that is
fairly uncommon in general, and there are currently no users of
cpu_cooling for this use-case.

Acked-by: Daniel Lezcano <daniel.lezcano@linaro.org>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Quentin Perret <qperret@google.com>
Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>
Link: https://lore.kernel.org/r/20191030151451.7961-5-qperret@google.com
2019-11-07 07:02:40 +01:00

670 lines
19 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* linux/drivers/thermal/cpu_cooling.c
*
* Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
*
* Copyright (C) 2012-2018 Linaro Limited.
*
* Authors: Amit Daniel <amit.kachhap@linaro.org>
* Viresh Kumar <viresh.kumar@linaro.org>
*
*/
#include <linux/module.h>
#include <linux/thermal.h>
#include <linux/cpufreq.h>
#include <linux/err.h>
#include <linux/idr.h>
#include <linux/pm_opp.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/cpu_cooling.h>
#include <linux/energy_model.h>
#include <trace/events/thermal.h>
/*
* Cooling state <-> CPUFreq frequency
*
* Cooling states are translated to frequencies throughout this driver and this
* is the relation between them.
*
* Highest cooling state corresponds to lowest possible frequency.
*
* i.e.
* level 0 --> 1st Max Freq
* level 1 --> 2nd Max Freq
* ...
*/
/**
* struct time_in_idle - Idle time stats
* @time: previous reading of the absolute time that this cpu was idle
* @timestamp: wall time of the last invocation of get_cpu_idle_time_us()
*/
struct time_in_idle {
u64 time;
u64 timestamp;
};
/**
* struct cpufreq_cooling_device - data for cooling device with cpufreq
* @id: unique integer value corresponding to each cpufreq_cooling_device
* registered.
* @last_load: load measured by the latest call to cpufreq_get_requested_power()
* @cpufreq_state: integer value representing the current state of cpufreq
* cooling devices.
* @max_level: maximum cooling level. One less than total number of valid
* cpufreq frequencies.
* @em: Reference on the Energy Model of the device
* @cdev: thermal_cooling_device pointer to keep track of the
* registered cooling device.
* @policy: cpufreq policy.
* @node: list_head to link all cpufreq_cooling_device together.
* @idle_time: idle time stats
*
* This structure is required for keeping information of each registered
* cpufreq_cooling_device.
*/
struct cpufreq_cooling_device {
int id;
u32 last_load;
unsigned int cpufreq_state;
unsigned int max_level;
struct em_perf_domain *em;
struct cpufreq_policy *policy;
struct list_head node;
struct time_in_idle *idle_time;
struct freq_qos_request qos_req;
};
static DEFINE_IDA(cpufreq_ida);
static DEFINE_MUTEX(cooling_list_lock);
static LIST_HEAD(cpufreq_cdev_list);
#ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR
/**
* get_level: Find the level for a particular frequency
* @cpufreq_cdev: cpufreq_cdev for which the property is required
* @freq: Frequency
*
* Return: level corresponding to the frequency.
*/
static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_cdev,
unsigned int freq)
{
int i;
for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) {
if (freq > cpufreq_cdev->em->table[i].frequency)
break;
}
return cpufreq_cdev->max_level - i - 1;
}
static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_cdev,
u32 freq)
{
int i;
for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) {
if (freq > cpufreq_cdev->em->table[i].frequency)
break;
}
return cpufreq_cdev->em->table[i + 1].power;
}
static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
u32 power)
{
int i;
for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) {
if (power > cpufreq_cdev->em->table[i].power)
break;
}
return cpufreq_cdev->em->table[i + 1].frequency;
}
/**
* get_load() - get load for a cpu since last updated
* @cpufreq_cdev: &struct cpufreq_cooling_device for this cpu
* @cpu: cpu number
* @cpu_idx: index of the cpu in time_in_idle*
*
* Return: The average load of cpu @cpu in percentage since this
* function was last called.
*/
static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu,
int cpu_idx)
{
u32 load;
u64 now, now_idle, delta_time, delta_idle;
struct time_in_idle *idle_time = &cpufreq_cdev->idle_time[cpu_idx];
now_idle = get_cpu_idle_time(cpu, &now, 0);
delta_idle = now_idle - idle_time->time;
delta_time = now - idle_time->timestamp;
if (delta_time <= delta_idle)
load = 0;
else
load = div64_u64(100 * (delta_time - delta_idle), delta_time);
idle_time->time = now_idle;
idle_time->timestamp = now;
return load;
}
/**
* get_dynamic_power() - calculate the dynamic power
* @cpufreq_cdev: &cpufreq_cooling_device for this cdev
* @freq: current frequency
*
* Return: the dynamic power consumed by the cpus described by
* @cpufreq_cdev.
*/
static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_cdev,
unsigned long freq)
{
u32 raw_cpu_power;
raw_cpu_power = cpu_freq_to_power(cpufreq_cdev, freq);
return (raw_cpu_power * cpufreq_cdev->last_load) / 100;
}
/**
* cpufreq_get_requested_power() - get the current power
* @cdev: &thermal_cooling_device pointer
* @tz: a valid thermal zone device pointer
* @power: pointer in which to store the resulting power
*
* Calculate the current power consumption of the cpus in milliwatts
* and store it in @power. This function should actually calculate
* the requested power, but it's hard to get the frequency that
* cpufreq would have assigned if there were no thermal limits.
* Instead, we calculate the current power on the assumption that the
* immediate future will look like the immediate past.
*
* We use the current frequency and the average load since this
* function was last called. In reality, there could have been
* multiple opps since this function was last called and that affects
* the load calculation. While it's not perfectly accurate, this
* simplification is good enough and works. REVISIT this, as more
* complex code may be needed if experiments show that it's not
* accurate enough.
*
* Return: 0 on success, -E* if getting the static power failed.
*/
static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
struct thermal_zone_device *tz,
u32 *power)
{
unsigned long freq;
int i = 0, cpu;
u32 total_load = 0;
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
struct cpufreq_policy *policy = cpufreq_cdev->policy;
u32 *load_cpu = NULL;
freq = cpufreq_quick_get(policy->cpu);
if (trace_thermal_power_cpu_get_power_enabled()) {
u32 ncpus = cpumask_weight(policy->related_cpus);
load_cpu = kcalloc(ncpus, sizeof(*load_cpu), GFP_KERNEL);
}
for_each_cpu(cpu, policy->related_cpus) {
u32 load;
if (cpu_online(cpu))
load = get_load(cpufreq_cdev, cpu, i);
else
load = 0;
total_load += load;
if (load_cpu)
load_cpu[i] = load;
i++;
}
cpufreq_cdev->last_load = total_load;
*power = get_dynamic_power(cpufreq_cdev, freq);
if (load_cpu) {
trace_thermal_power_cpu_get_power(policy->related_cpus, freq,
load_cpu, i, *power);
kfree(load_cpu);
}
return 0;
}
/**
* cpufreq_state2power() - convert a cpu cdev state to power consumed
* @cdev: &thermal_cooling_device pointer
* @tz: a valid thermal zone device pointer
* @state: cooling device state to be converted
* @power: pointer in which to store the resulting power
*
* Convert cooling device state @state into power consumption in
* milliwatts assuming 100% load. Store the calculated power in
* @power.
*
* Return: 0 on success, -EINVAL if the cooling device state could not
* be converted into a frequency or other -E* if there was an error
* when calculating the static power.
*/
static int cpufreq_state2power(struct thermal_cooling_device *cdev,
struct thermal_zone_device *tz,
unsigned long state, u32 *power)
{
unsigned int freq, num_cpus, idx;
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
/* Request state should be less than max_level */
if (WARN_ON(state > cpufreq_cdev->max_level))
return -EINVAL;
num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus);
idx = cpufreq_cdev->max_level - state;
freq = cpufreq_cdev->em->table[idx].frequency;
*power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus;
return 0;
}
/**
* cpufreq_power2state() - convert power to a cooling device state
* @cdev: &thermal_cooling_device pointer
* @tz: a valid thermal zone device pointer
* @power: power in milliwatts to be converted
* @state: pointer in which to store the resulting state
*
* Calculate a cooling device state for the cpus described by @cdev
* that would allow them to consume at most @power mW and store it in
* @state. Note that this calculation depends on external factors
* such as the cpu load or the current static power. Calling this
* function with the same power as input can yield different cooling
* device states depending on those external factors.
*
* Return: 0 on success, -ENODEV if no cpus are online or -EINVAL if
* the calculated frequency could not be converted to a valid state.
* The latter should not happen unless the frequencies available to
* cpufreq have changed since the initialization of the cpu cooling
* device.
*/
static int cpufreq_power2state(struct thermal_cooling_device *cdev,
struct thermal_zone_device *tz, u32 power,
unsigned long *state)
{
unsigned int target_freq;
u32 last_load, normalised_power;
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
struct cpufreq_policy *policy = cpufreq_cdev->policy;
last_load = cpufreq_cdev->last_load ?: 1;
normalised_power = (power * 100) / last_load;
target_freq = cpu_power_to_freq(cpufreq_cdev, normalised_power);
*state = get_level(cpufreq_cdev, target_freq);
trace_thermal_power_cpu_limit(policy->related_cpus, target_freq, *state,
power);
return 0;
}
static inline bool em_is_sane(struct cpufreq_cooling_device *cpufreq_cdev,
struct em_perf_domain *em) {
struct cpufreq_policy *policy;
unsigned int nr_levels;
if (!em)
return false;
policy = cpufreq_cdev->policy;
if (!cpumask_equal(policy->related_cpus, to_cpumask(em->cpus))) {
pr_err("The span of pd %*pbl is misaligned with cpufreq policy %*pbl\n",
cpumask_pr_args(to_cpumask(em->cpus)),
cpumask_pr_args(policy->related_cpus));
return false;
}
nr_levels = cpufreq_cdev->max_level + 1;
if (em->nr_cap_states != nr_levels) {
pr_err("The number of cap states in pd %*pbl (%u) doesn't match the number of cooling levels (%u)\n",
cpumask_pr_args(to_cpumask(em->cpus)),
em->nr_cap_states, nr_levels);
return false;
}
return true;
}
#endif /* CONFIG_THERMAL_GOV_POWER_ALLOCATOR */
static unsigned int get_state_freq(struct cpufreq_cooling_device *cpufreq_cdev,
unsigned long state)
{
struct cpufreq_policy *policy;
unsigned long idx;
#ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR
/* Use the Energy Model table if available */
if (cpufreq_cdev->em) {
idx = cpufreq_cdev->max_level - state;
return cpufreq_cdev->em->table[idx].frequency;
}
#endif
/* Otherwise, fallback on the CPUFreq table */
policy = cpufreq_cdev->policy;
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
idx = cpufreq_cdev->max_level - state;
else
idx = state;
return policy->freq_table[idx].frequency;
}
/* cpufreq cooling device callback functions are defined below */
/**
* cpufreq_get_max_state - callback function to get the max cooling state.
* @cdev: thermal cooling device pointer.
* @state: fill this variable with the max cooling state.
*
* Callback for the thermal cooling device to return the cpufreq
* max cooling state.
*
* Return: 0 on success, an error code otherwise.
*/
static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
*state = cpufreq_cdev->max_level;
return 0;
}
/**
* cpufreq_get_cur_state - callback function to get the current cooling state.
* @cdev: thermal cooling device pointer.
* @state: fill this variable with the current cooling state.
*
* Callback for the thermal cooling device to return the cpufreq
* current cooling state.
*
* Return: 0 on success, an error code otherwise.
*/
static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
*state = cpufreq_cdev->cpufreq_state;
return 0;
}
/**
* cpufreq_set_cur_state - callback function to set the current cooling state.
* @cdev: thermal cooling device pointer.
* @state: set this variable to the current cooling state.
*
* Callback for the thermal cooling device to change the cpufreq
* current cooling state.
*
* Return: 0 on success, an error code otherwise.
*/
static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
/* Request state should be less than max_level */
if (WARN_ON(state > cpufreq_cdev->max_level))
return -EINVAL;
/* Check if the old cooling action is same as new cooling action */
if (cpufreq_cdev->cpufreq_state == state)
return 0;
cpufreq_cdev->cpufreq_state = state;
return freq_qos_update_request(&cpufreq_cdev->qos_req,
get_state_freq(cpufreq_cdev, state));
}
/* Bind cpufreq callbacks to thermal cooling device ops */
static struct thermal_cooling_device_ops cpufreq_cooling_ops = {
.get_max_state = cpufreq_get_max_state,
.get_cur_state = cpufreq_get_cur_state,
.set_cur_state = cpufreq_set_cur_state,
};
/**
* __cpufreq_cooling_register - helper function to create cpufreq cooling device
* @np: a valid struct device_node to the cooling device device tree node
* @policy: cpufreq policy
* Normally this should be same as cpufreq policy->related_cpus.
* @em: Energy Model of the cpufreq policy
*
* This interface function registers the cpufreq cooling device with the name
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
* cooling devices. It also gives the opportunity to link the cooling device
* with a device tree node, in order to bind it via the thermal DT code.
*
* Return: a valid struct thermal_cooling_device pointer on success,
* on failure, it returns a corresponding ERR_PTR().
*/
static struct thermal_cooling_device *
__cpufreq_cooling_register(struct device_node *np,
struct cpufreq_policy *policy,
struct em_perf_domain *em)
{
struct thermal_cooling_device *cdev;
struct cpufreq_cooling_device *cpufreq_cdev;
char dev_name[THERMAL_NAME_LENGTH];
unsigned int i, num_cpus;
struct device *dev;
int ret;
struct thermal_cooling_device_ops *cooling_ops;
dev = get_cpu_device(policy->cpu);
if (unlikely(!dev)) {
pr_warn("No cpu device for cpu %d\n", policy->cpu);
return ERR_PTR(-ENODEV);
}
if (IS_ERR_OR_NULL(policy)) {
pr_err("%s: cpufreq policy isn't valid: %p\n", __func__, policy);
return ERR_PTR(-EINVAL);
}
i = cpufreq_table_count_valid_entries(policy);
if (!i) {
pr_debug("%s: CPUFreq table not found or has no valid entries\n",
__func__);
return ERR_PTR(-ENODEV);
}
cpufreq_cdev = kzalloc(sizeof(*cpufreq_cdev), GFP_KERNEL);
if (!cpufreq_cdev)
return ERR_PTR(-ENOMEM);
cpufreq_cdev->policy = policy;
num_cpus = cpumask_weight(policy->related_cpus);
cpufreq_cdev->idle_time = kcalloc(num_cpus,
sizeof(*cpufreq_cdev->idle_time),
GFP_KERNEL);
if (!cpufreq_cdev->idle_time) {
cdev = ERR_PTR(-ENOMEM);
goto free_cdev;
}
/* max_level is an index, not a counter */
cpufreq_cdev->max_level = i - 1;
ret = ida_simple_get(&cpufreq_ida, 0, 0, GFP_KERNEL);
if (ret < 0) {
cdev = ERR_PTR(ret);
goto free_idle_time;
}
cpufreq_cdev->id = ret;
snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
cpufreq_cdev->id);
cooling_ops = &cpufreq_cooling_ops;
#ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR
if (em_is_sane(cpufreq_cdev, em)) {
cpufreq_cdev->em = em;
cooling_ops->get_requested_power = cpufreq_get_requested_power;
cooling_ops->state2power = cpufreq_state2power;
cooling_ops->power2state = cpufreq_power2state;
} else
#endif
if (policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED) {
pr_err("%s: unsorted frequency tables are not supported\n",
__func__);
cdev = ERR_PTR(-EINVAL);
goto remove_ida;
}
ret = freq_qos_add_request(&policy->constraints,
&cpufreq_cdev->qos_req, FREQ_QOS_MAX,
get_state_freq(cpufreq_cdev, 0));
if (ret < 0) {
pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
ret);
cdev = ERR_PTR(ret);
goto remove_ida;
}
cdev = thermal_of_cooling_device_register(np, dev_name, cpufreq_cdev,
cooling_ops);
if (IS_ERR(cdev))
goto remove_qos_req;
mutex_lock(&cooling_list_lock);
list_add(&cpufreq_cdev->node, &cpufreq_cdev_list);
mutex_unlock(&cooling_list_lock);
return cdev;
remove_qos_req:
freq_qos_remove_request(&cpufreq_cdev->qos_req);
remove_ida:
ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
free_idle_time:
kfree(cpufreq_cdev->idle_time);
free_cdev:
kfree(cpufreq_cdev);
return cdev;
}
/**
* cpufreq_cooling_register - function to create cpufreq cooling device.
* @policy: cpufreq policy
*
* This interface function registers the cpufreq cooling device with the name
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
* cooling devices.
*
* Return: a valid struct thermal_cooling_device pointer on success,
* on failure, it returns a corresponding ERR_PTR().
*/
struct thermal_cooling_device *
cpufreq_cooling_register(struct cpufreq_policy *policy)
{
return __cpufreq_cooling_register(NULL, policy, NULL);
}
EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
/**
* of_cpufreq_cooling_register - function to create cpufreq cooling device.
* @policy: cpufreq policy
*
* This interface function registers the cpufreq cooling device with the name
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
* cooling devices. Using this API, the cpufreq cooling device will be
* linked to the device tree node provided.
*
* Using this function, the cooling device will implement the power
* extensions by using a simple cpu power model. The cpus must have
* registered their OPPs using the OPP library.
*
* It also takes into account, if property present in policy CPU node, the
* static power consumed by the cpu.
*
* Return: a valid struct thermal_cooling_device pointer on success,
* and NULL on failure.
*/
struct thermal_cooling_device *
of_cpufreq_cooling_register(struct cpufreq_policy *policy)
{
struct device_node *np = of_get_cpu_node(policy->cpu, NULL);
struct thermal_cooling_device *cdev = NULL;
if (!np) {
pr_err("cpu_cooling: OF node not available for cpu%d\n",
policy->cpu);
return NULL;
}
if (of_find_property(np, "#cooling-cells", NULL)) {
struct em_perf_domain *em = em_cpu_get(policy->cpu);
cdev = __cpufreq_cooling_register(np, policy, em);
if (IS_ERR(cdev)) {
pr_err("cpu_cooling: cpu%d failed to register as cooling device: %ld\n",
policy->cpu, PTR_ERR(cdev));
cdev = NULL;
}
}
of_node_put(np);
return cdev;
}
EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
/**
* cpufreq_cooling_unregister - function to remove cpufreq cooling device.
* @cdev: thermal cooling device pointer.
*
* This interface function unregisters the "thermal-cpufreq-%x" cooling device.
*/
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
struct cpufreq_cooling_device *cpufreq_cdev;
if (!cdev)
return;
cpufreq_cdev = cdev->devdata;
mutex_lock(&cooling_list_lock);
list_del(&cpufreq_cdev->node);
mutex_unlock(&cooling_list_lock);
thermal_cooling_device_unregister(cdev);
freq_qos_remove_request(&cpufreq_cdev->qos_req);
ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
kfree(cpufreq_cdev->idle_time);
kfree(cpufreq_cdev);
}
EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);