forked from luck/tmp_suning_uos_patched
sched/core: Remove the rt_avg code
rt_avg is not used anywhere anymore, so we can remove all related code. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Morten.Rasmussen@arm.com Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: claudio@evidence.eu.com Cc: daniel.lezcano@linaro.org Cc: dietmar.eggemann@arm.com Cc: joel@joelfernandes.org Cc: juri.lelli@redhat.com Cc: luca.abeni@santannapisa.it Cc: patrick.bellasi@arm.com Cc: quentin.perret@arm.com Cc: rjw@rjwysocki.net Cc: valentin.schneider@arm.com Cc: viresh.kumar@linaro.org Link: http://lkml.kernel.org/r/1530200714-4504-11-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
523e979d31
commit
bbb62c0b02
|
@ -651,23 +651,6 @@ bool sched_can_stop_tick(struct rq *rq)
|
|||
return true;
|
||||
}
|
||||
#endif /* CONFIG_NO_HZ_FULL */
|
||||
|
||||
void sched_avg_update(struct rq *rq)
|
||||
{
|
||||
s64 period = sched_avg_period();
|
||||
|
||||
while ((s64)(rq_clock(rq) - rq->age_stamp) > period) {
|
||||
/*
|
||||
* Inline assembly required to prevent the compiler
|
||||
* optimising this loop into a divmod call.
|
||||
* See __iter_div_u64_rem() for another example of this.
|
||||
*/
|
||||
asm("" : "+rm" (rq->age_stamp));
|
||||
rq->age_stamp += period;
|
||||
rq->rt_avg /= 2;
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
|
||||
|
@ -5716,13 +5699,6 @@ void set_rq_offline(struct rq *rq)
|
|||
}
|
||||
}
|
||||
|
||||
static void set_cpu_rq_start_time(unsigned int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
|
||||
rq->age_stamp = sched_clock_cpu(cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* used to mark begin/end of suspend/resume:
|
||||
*/
|
||||
|
@ -5840,7 +5816,6 @@ static void sched_rq_cpu_starting(unsigned int cpu)
|
|||
|
||||
int sched_cpu_starting(unsigned int cpu)
|
||||
{
|
||||
set_cpu_rq_start_time(cpu);
|
||||
sched_rq_cpu_starting(cpu);
|
||||
sched_tick_start(cpu);
|
||||
return 0;
|
||||
|
@ -6108,7 +6083,6 @@ void __init sched_init(void)
|
|||
|
||||
#ifdef CONFIG_SMP
|
||||
idle_thread_set_boot_cpu();
|
||||
set_cpu_rq_start_time(smp_processor_id());
|
||||
#endif
|
||||
init_sched_fair_class();
|
||||
|
||||
|
|
|
@ -5323,8 +5323,6 @@ static void cpu_load_update(struct rq *this_rq, unsigned long this_load,
|
|||
|
||||
this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
|
||||
}
|
||||
|
||||
sched_avg_update(this_rq);
|
||||
}
|
||||
|
||||
/* Used instead of source_load when we know the type == 0 */
|
||||
|
|
|
@ -853,8 +853,6 @@ struct rq {
|
|||
|
||||
struct list_head cfs_tasks;
|
||||
|
||||
u64 rt_avg;
|
||||
u64 age_stamp;
|
||||
struct sched_avg avg_rt;
|
||||
struct sched_avg avg_dl;
|
||||
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
|
||||
|
@ -1719,11 +1717,6 @@ extern const_debug unsigned int sysctl_sched_time_avg;
|
|||
extern const_debug unsigned int sysctl_sched_nr_migrate;
|
||||
extern const_debug unsigned int sysctl_sched_migration_cost;
|
||||
|
||||
static inline u64 sched_avg_period(void)
|
||||
{
|
||||
return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCHED_HRTICK
|
||||
|
||||
/*
|
||||
|
@ -1760,8 +1753,6 @@ unsigned long arch_scale_freq_capacity(int cpu)
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern void sched_avg_update(struct rq *rq);
|
||||
|
||||
#ifndef arch_scale_cpu_capacity
|
||||
static __always_inline
|
||||
unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
|
||||
|
@ -1772,12 +1763,6 @@ unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
|
|||
return SCHED_CAPACITY_SCALE;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
|
||||
{
|
||||
rq->rt_avg += rt_delta * arch_scale_freq_capacity(cpu_of(rq));
|
||||
sched_avg_update(rq);
|
||||
}
|
||||
#else
|
||||
#ifndef arch_scale_cpu_capacity
|
||||
static __always_inline
|
||||
|
@ -1786,8 +1771,6 @@ unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu)
|
|||
return SCHED_CAPACITY_SCALE;
|
||||
}
|
||||
#endif
|
||||
static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
|
||||
static inline void sched_avg_update(struct rq *rq) { }
|
||||
#endif
|
||||
|
||||
struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
|
||||
|
|
Loading…
Reference in New Issue
Block a user