forked from luck/tmp_suning_uos_patched
perf/core: Pull pmu::sched_task() into perf_event_context_sched_in()
The pmu::sched_task() is a context switch callback. It passes the cpuctx->task_ctx as a parameter to the lower code. To find the cpuctx->task_ctx, the current code iterates a cpuctx list. The same context was just iterated in perf_event_context_sched_in(), which is invoked right before the pmu::sched_task(). Reuse the cpuctx->task_ctx from perf_event_context_sched_in() can avoid the unnecessary iteration of the cpuctx list. Both pmu::sched_task and perf_event_context_sched_in() have to disable PMU. Pull the pmu::sched_task into perf_event_context_sched_in() can also save the overhead from the PMU disable and reenable. The new and old tasks may have equivalent contexts. The current code optimize this case by swapping the context, which avoids the scheduling. For this case, pmu::sched_task() is still required, e.g., restore the LBR content. Suggested-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200821195754.20159-1-kan.liang@linux.intel.com
This commit is contained in:
parent
35d1ce6bec
commit
556cccad38
|
@ -3491,21 +3491,14 @@ void perf_sched_cb_inc(struct pmu *pmu)
|
||||||
* PEBS requires this to provide PID/TID information. This requires we flush
|
* PEBS requires this to provide PID/TID information. This requires we flush
|
||||||
* all queued PEBS records before we context switch to a new task.
|
* all queued PEBS records before we context switch to a new task.
|
||||||
*/
|
*/
|
||||||
static void perf_pmu_sched_task(struct task_struct *prev,
|
static void __perf_pmu_sched_task(struct perf_cpu_context *cpuctx, bool sched_in)
|
||||||
struct task_struct *next,
|
|
||||||
bool sched_in)
|
|
||||||
{
|
{
|
||||||
struct perf_cpu_context *cpuctx;
|
|
||||||
struct pmu *pmu;
|
struct pmu *pmu;
|
||||||
|
|
||||||
if (prev == next)
|
|
||||||
return;
|
|
||||||
|
|
||||||
list_for_each_entry(cpuctx, this_cpu_ptr(&sched_cb_list), sched_cb_entry) {
|
|
||||||
pmu = cpuctx->ctx.pmu; /* software PMUs will not have sched_task */
|
pmu = cpuctx->ctx.pmu; /* software PMUs will not have sched_task */
|
||||||
|
|
||||||
if (WARN_ON_ONCE(!pmu->sched_task))
|
if (WARN_ON_ONCE(!pmu->sched_task))
|
||||||
continue;
|
return;
|
||||||
|
|
||||||
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
|
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
|
||||||
perf_pmu_disable(pmu);
|
perf_pmu_disable(pmu);
|
||||||
|
@ -3514,7 +3507,20 @@ static void perf_pmu_sched_task(struct task_struct *prev,
|
||||||
|
|
||||||
perf_pmu_enable(pmu);
|
perf_pmu_enable(pmu);
|
||||||
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
|
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void perf_pmu_sched_task(struct task_struct *prev,
|
||||||
|
struct task_struct *next,
|
||||||
|
bool sched_in)
|
||||||
|
{
|
||||||
|
struct perf_cpu_context *cpuctx;
|
||||||
|
|
||||||
|
if (prev == next)
|
||||||
|
return;
|
||||||
|
|
||||||
|
list_for_each_entry(cpuctx, this_cpu_ptr(&sched_cb_list), sched_cb_entry)
|
||||||
|
__perf_pmu_sched_task(cpuctx, sched_in);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void perf_event_switch(struct task_struct *task,
|
static void perf_event_switch(struct task_struct *task,
|
||||||
|
@ -3773,10 +3779,14 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
|
||||||
struct task_struct *task)
|
struct task_struct *task)
|
||||||
{
|
{
|
||||||
struct perf_cpu_context *cpuctx;
|
struct perf_cpu_context *cpuctx;
|
||||||
|
struct pmu *pmu = ctx->pmu;
|
||||||
|
|
||||||
cpuctx = __get_cpu_context(ctx);
|
cpuctx = __get_cpu_context(ctx);
|
||||||
if (cpuctx->task_ctx == ctx)
|
if (cpuctx->task_ctx == ctx) {
|
||||||
|
if (cpuctx->sched_cb_usage)
|
||||||
|
__perf_pmu_sched_task(cpuctx, true);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
perf_ctx_lock(cpuctx, ctx);
|
perf_ctx_lock(cpuctx, ctx);
|
||||||
/*
|
/*
|
||||||
|
@ -3786,7 +3796,7 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
|
||||||
if (!ctx->nr_events)
|
if (!ctx->nr_events)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
perf_pmu_disable(ctx->pmu);
|
perf_pmu_disable(pmu);
|
||||||
/*
|
/*
|
||||||
* We want to keep the following priority order:
|
* We want to keep the following priority order:
|
||||||
* cpu pinned (that don't need to move), task pinned,
|
* cpu pinned (that don't need to move), task pinned,
|
||||||
|
@ -3798,7 +3808,11 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
|
||||||
if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree))
|
if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree))
|
||||||
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
|
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
|
||||||
perf_event_sched_in(cpuctx, ctx, task);
|
perf_event_sched_in(cpuctx, ctx, task);
|
||||||
perf_pmu_enable(ctx->pmu);
|
|
||||||
|
if (cpuctx->sched_cb_usage && pmu->sched_task)
|
||||||
|
pmu->sched_task(cpuctx->task_ctx, true);
|
||||||
|
|
||||||
|
perf_pmu_enable(pmu);
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
perf_ctx_unlock(cpuctx, ctx);
|
perf_ctx_unlock(cpuctx, ctx);
|
||||||
|
@ -3841,9 +3855,6 @@ void __perf_event_task_sched_in(struct task_struct *prev,
|
||||||
|
|
||||||
if (atomic_read(&nr_switch_events))
|
if (atomic_read(&nr_switch_events))
|
||||||
perf_event_switch(task, prev, true);
|
perf_event_switch(task, prev, true);
|
||||||
|
|
||||||
if (__this_cpu_read(perf_sched_cb_usages))
|
|
||||||
perf_pmu_sched_task(prev, task, true);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
|
static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
|
||||||
|
|
Loading…
Reference in New Issue
Block a user