sched/rt: Optimize cpupri_find() on non-heterogenous systems

By introducing a new cpupri_find_fitness() function that takes the
fitness_fn as an argument and only called when asym_system static key is
enabled.

cpupri_find() is now a wrapper function that calls cpupri_find_fitness()
passing NULL as a fitness_fn, hence disabling the logic that handles
fitness by default.

LINK: https://lore.kernel.org/lkml/c0772fca-0a4b-c88d-fdf2-5715fcf8447b@arm.com/
Reported-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: Qais Yousef <qais.yousef@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Fixes: 804d402fb6 ("sched/rt: Make RT capacity-aware")
Link: https://lkml.kernel.org/r/20200302132721.8353-4-qais.yousef@arm.com
This commit is contained in:
Qais Yousef 2020-03-02 13:27:18 +00:00 committed by Ingo Molnar
parent b28bc1e002
commit a1bd02e1f2
3 changed files with 31 additions and 8 deletions

View File

@ -94,8 +94,14 @@ static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
return 1; return 1;
} }
int cpupri_find(struct cpupri *cp, struct task_struct *p,
struct cpumask *lowest_mask)
{
return cpupri_find_fitness(cp, p, lowest_mask, NULL);
}
/** /**
* cpupri_find - find the best (lowest-pri) CPU in the system * cpupri_find_fitness - find the best (lowest-pri) CPU in the system
* @cp: The cpupri context * @cp: The cpupri context
* @p: The task * @p: The task
* @lowest_mask: A mask to fill in with selected CPUs (or NULL) * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
@ -111,7 +117,7 @@ static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
* *
* Return: (int)bool - CPUs were found * Return: (int)bool - CPUs were found
*/ */
int cpupri_find(struct cpupri *cp, struct task_struct *p, int cpupri_find_fitness(struct cpupri *cp, struct task_struct *p,
struct cpumask *lowest_mask, struct cpumask *lowest_mask,
bool (*fitness_fn)(struct task_struct *p, int cpu)) bool (*fitness_fn)(struct task_struct *p, int cpu))
{ {

View File

@ -19,6 +19,8 @@ struct cpupri {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
int cpupri_find(struct cpupri *cp, struct task_struct *p, int cpupri_find(struct cpupri *cp, struct task_struct *p,
struct cpumask *lowest_mask);
int cpupri_find_fitness(struct cpupri *cp, struct task_struct *p,
struct cpumask *lowest_mask, struct cpumask *lowest_mask,
bool (*fitness_fn)(struct task_struct *p, int cpu)); bool (*fitness_fn)(struct task_struct *p, int cpu));
void cpupri_set(struct cpupri *cp, int cpu, int pri); void cpupri_set(struct cpupri *cp, int cpu, int pri);

View File

@ -1504,7 +1504,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
* let's hope p can move out. * let's hope p can move out.
*/ */
if (rq->curr->nr_cpus_allowed == 1 || if (rq->curr->nr_cpus_allowed == 1 ||
!cpupri_find(&rq->rd->cpupri, rq->curr, NULL, NULL)) !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
return; return;
/* /*
@ -1512,7 +1512,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
* see if it is pushed or pulled somewhere else. * see if it is pushed or pulled somewhere else.
*/ */
if (p->nr_cpus_allowed != 1 && if (p->nr_cpus_allowed != 1 &&
cpupri_find(&rq->rd->cpupri, p, NULL, NULL)) cpupri_find(&rq->rd->cpupri, p, NULL))
return; return;
/* /*
@ -1691,6 +1691,7 @@ static int find_lowest_rq(struct task_struct *task)
struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask); struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
int this_cpu = smp_processor_id(); int this_cpu = smp_processor_id();
int cpu = task_cpu(task); int cpu = task_cpu(task);
int ret;
/* Make sure the mask is initialized first */ /* Make sure the mask is initialized first */
if (unlikely(!lowest_mask)) if (unlikely(!lowest_mask))
@ -1699,8 +1700,22 @@ static int find_lowest_rq(struct task_struct *task)
if (task->nr_cpus_allowed == 1) if (task->nr_cpus_allowed == 1)
return -1; /* No other targets possible */ return -1; /* No other targets possible */
if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask, /*
rt_task_fits_capacity)) * If we're on asym system ensure we consider the different capacities
* of the CPUs when searching for the lowest_mask.
*/
if (static_branch_unlikely(&sched_asym_cpucapacity)) {
ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
task, lowest_mask,
rt_task_fits_capacity);
} else {
ret = cpupri_find(&task_rq(task)->rd->cpupri,
task, lowest_mask);
}
if (!ret)
return -1; /* No targets found */ return -1; /* No targets found */
/* /*