forked from luck/tmp_suning_uos_patched
sched/fair: Remove unused cfs_rq_clock_task() function
cfs_rq_clock_task() was first introduced and used in:f1b17280ef
("sched: Maintain runnable averages across throttled periods") Over time its use has been graduately removed by the following commits:d31b1a66cb
("sched/fair: Factorize PELT update")2312729688
("sched/fair: Update scale invariance of PELT") Today, there is no single user left, so it can be safely removed. Found via the -Wunused-function build warning. Signed-off-by: Qian Cai <cai@lca.pw> Cc: Ben Segall <bsegall@google.com> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Juri Lelli <juri.lelli@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vincent Guittot <vincent.guittot@linaro.org> Link: https://lkml.kernel.org/r/1568668775-2127-1-git-send-email-cai@lca.pw [ Rewrote the changelog. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
ad06219573
commit
dac9f027b1
|
@ -749,7 +749,6 @@ void init_entity_runnable_average(struct sched_entity *se)
|
|||
/* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
|
||||
}
|
||||
|
||||
static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
|
||||
static void attach_entity_cfs_rq(struct sched_entity *se);
|
||||
|
||||
/*
|
||||
|
@ -4376,15 +4375,6 @@ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
|
|||
return &tg->cfs_bandwidth;
|
||||
}
|
||||
|
||||
/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
|
||||
static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
|
||||
{
|
||||
if (unlikely(cfs_rq->throttle_count))
|
||||
return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
|
||||
|
||||
return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
|
||||
}
|
||||
|
||||
/* returns 0 on failure to allocate runtime */
|
||||
static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
|
||||
{
|
||||
|
@ -4476,7 +4466,6 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
|
|||
|
||||
cfs_rq->throttle_count--;
|
||||
if (!cfs_rq->throttle_count) {
|
||||
/* adjust cfs_rq_clock_task() */
|
||||
cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
|
||||
cfs_rq->throttled_clock_task;
|
||||
|
||||
|
@ -5080,11 +5069,6 @@ static inline bool cfs_bandwidth_used(void)
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
|
||||
{
|
||||
return rq_clock_task(rq_of(cfs_rq));
|
||||
}
|
||||
|
||||
static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
|
||||
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
|
||||
static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
|
||||
|
|
Loading…
Reference in New Issue
Block a user