forked from luck/tmp_suning_uos_patched
cpufreq: governor: Name delayed-work as dwork
Delayed work was named as 'work' and to access work within it we do work.work. Not much readable. Rename delayed_work as 'dwork'. Reviewed-by: Preeti U Murthy <preeti@linux.vnet.ibm.com> Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
This commit is contained in:
parent
ba88d4338f
commit
386d46e6d5
|
@ -105,7 +105,7 @@ static void cs_check_cpu(int cpu, unsigned int load)
|
|||
static void cs_dbs_timer(struct work_struct *work)
|
||||
{
|
||||
struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
|
||||
struct cs_cpu_dbs_info_s, cdbs.work.work);
|
||||
struct cs_cpu_dbs_info_s, cdbs.dwork.work);
|
||||
unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
|
||||
struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
|
||||
cpu);
|
||||
|
|
|
@ -165,7 +165,7 @@ static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data,
|
|||
{
|
||||
struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
|
||||
|
||||
mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay);
|
||||
mod_delayed_work_on(cpu, system_wq, &cdbs->dwork, delay);
|
||||
}
|
||||
|
||||
void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
|
||||
|
@ -204,7 +204,7 @@ static inline void gov_cancel_work(struct dbs_data *dbs_data,
|
|||
|
||||
for_each_cpu(i, policy->cpus) {
|
||||
cdbs = dbs_data->cdata->get_cpu_cdbs(i);
|
||||
cancel_delayed_work_sync(&cdbs->work);
|
||||
cancel_delayed_work_sync(&cdbs->dwork);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -367,7 +367,7 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
|
|||
j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
||||
|
||||
mutex_init(&j_cdbs->timer_mutex);
|
||||
INIT_DEFERRABLE_WORK(&j_cdbs->work, cdata->gov_dbs_timer);
|
||||
INIT_DEFERRABLE_WORK(&j_cdbs->dwork, cdata->gov_dbs_timer);
|
||||
}
|
||||
|
||||
if (cdata->governor == GOV_CONSERVATIVE) {
|
||||
|
|
|
@ -142,7 +142,7 @@ struct cpu_dbs_common_info {
|
|||
*/
|
||||
unsigned int prev_load;
|
||||
struct cpufreq_policy *cur_policy;
|
||||
struct delayed_work work;
|
||||
struct delayed_work dwork;
|
||||
/*
|
||||
* percpu mutex that serializes governor limit change with gov_dbs_timer
|
||||
* invocation. We do not want gov_dbs_timer to run when user is changing
|
||||
|
|
|
@ -194,7 +194,7 @@ static void od_check_cpu(int cpu, unsigned int load)
|
|||
static void od_dbs_timer(struct work_struct *work)
|
||||
{
|
||||
struct od_cpu_dbs_info_s *dbs_info =
|
||||
container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
|
||||
container_of(work, struct od_cpu_dbs_info_s, cdbs.dwork.work);
|
||||
unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
|
||||
struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
|
||||
cpu);
|
||||
|
@ -275,18 +275,18 @@ static void update_sampling_rate(struct dbs_data *dbs_data,
|
|||
|
||||
mutex_lock(&dbs_info->cdbs.timer_mutex);
|
||||
|
||||
if (!delayed_work_pending(&dbs_info->cdbs.work)) {
|
||||
if (!delayed_work_pending(&dbs_info->cdbs.dwork)) {
|
||||
mutex_unlock(&dbs_info->cdbs.timer_mutex);
|
||||
continue;
|
||||
}
|
||||
|
||||
next_sampling = jiffies + usecs_to_jiffies(new_rate);
|
||||
appointed_at = dbs_info->cdbs.work.timer.expires;
|
||||
appointed_at = dbs_info->cdbs.dwork.timer.expires;
|
||||
|
||||
if (time_before(next_sampling, appointed_at)) {
|
||||
|
||||
mutex_unlock(&dbs_info->cdbs.timer_mutex);
|
||||
cancel_delayed_work_sync(&dbs_info->cdbs.work);
|
||||
cancel_delayed_work_sync(&dbs_info->cdbs.dwork);
|
||||
mutex_lock(&dbs_info->cdbs.timer_mutex);
|
||||
|
||||
gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy,
|
||||
|
|
Loading…
Reference in New Issue
Block a user