forked from luck/tmp_suning_uos_patched
sched: Add a tracepoint to track rq->nr_running
Add a bare tracepoint trace_sched_update_nr_running_tp which tracks ->nr_running CPU's rq. This is used to accurately trace this data and provide a visualization of scheduler imbalances in, for example, the form of a heat map. The tracepoint is accessed by loading an external kernel module. An example module (forked from Qais' module and including the pelt related tracepoints) can be found at: https://github.com/auldp/tracepoints-helpers.git A script to turn the trace-cmd report output into a heatmap plot can be found at: https://github.com/jirvoz/plot-nr-running The tracepoints are added to add_nr_running() and sub_nr_running() which are in kernel/sched/sched.h. In order to avoid CREATE_TRACE_POINTS in the header a wrapper call is used and the trace/events/sched.h include is moved before sched.h in kernel/sched/core. Signed-off-by: Phil Auld <pauld@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200629192303.GC120228@lorien.usersys.redhat.com
This commit is contained in:
parent
07bbecb341
commit
9d246053a6
|
@ -2044,6 +2044,7 @@ const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq);
|
|||
const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq);
|
||||
|
||||
int sched_trace_rq_cpu(struct rq *rq);
|
||||
int sched_trace_rq_nr_running(struct rq *rq);
|
||||
|
||||
const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
|
||||
|
||||
|
|
|
@ -642,6 +642,10 @@ DECLARE_TRACE(sched_util_est_se_tp,
|
|||
TP_PROTO(struct sched_entity *se),
|
||||
TP_ARGS(se));
|
||||
|
||||
DECLARE_TRACE(sched_update_nr_running_tp,
|
||||
TP_PROTO(struct rq *rq, int change),
|
||||
TP_ARGS(rq, change));
|
||||
|
||||
#endif /* _TRACE_SCHED_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
|
|
@ -6,6 +6,10 @@
|
|||
*
|
||||
* Copyright (C) 1991-2002 Linus Torvalds
|
||||
*/
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/sched.h>
|
||||
#undef CREATE_TRACE_POINTS
|
||||
|
||||
#include "sched.h"
|
||||
|
||||
#include <linux/nospec.h>
|
||||
|
@ -23,9 +27,6 @@
|
|||
#include "pelt.h"
|
||||
#include "smp.h"
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/sched.h>
|
||||
|
||||
/*
|
||||
* Export tracepoints that act as a bare tracehook (ie: have no trace event
|
||||
* associated with them) to allow external modules to probe them.
|
||||
|
@ -38,6 +39,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
|
|||
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
|
||||
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
|
||||
|
||||
|
@ -8195,4 +8197,7 @@ const u32 sched_prio_to_wmult[40] = {
|
|||
/* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
|
||||
};
|
||||
|
||||
#undef CREATE_TRACE_POINTS
|
||||
void call_trace_sched_update_nr_running(struct rq *rq, int count)
|
||||
{
|
||||
trace_sched_update_nr_running_tp(rq, count);
|
||||
}
|
||||
|
|
|
@ -22,8 +22,6 @@
|
|||
*/
|
||||
#include "sched.h"
|
||||
|
||||
#include <trace/events/sched.h>
|
||||
|
||||
/*
|
||||
* Targeted preemption latency for CPU-bound tasks:
|
||||
*
|
||||
|
@ -11296,3 +11294,9 @@ const struct cpumask *sched_trace_rd_span(struct root_domain *rd)
|
|||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sched_trace_rd_span);
|
||||
|
||||
int sched_trace_rq_nr_running(struct rq *rq)
|
||||
{
|
||||
return rq ? rq->nr_running : -1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sched_trace_rq_nr_running);
|
||||
|
|
|
@ -28,8 +28,6 @@
|
|||
#include "sched.h"
|
||||
#include "pelt.h"
|
||||
|
||||
#include <trace/events/sched.h>
|
||||
|
||||
/*
|
||||
* Approximate:
|
||||
* val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
|
||||
|
|
|
@ -76,6 +76,8 @@
|
|||
#include "cpupri.h"
|
||||
#include "cpudeadline.h"
|
||||
|
||||
#include <trace/events/sched.h>
|
||||
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
# define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
|
||||
#else
|
||||
|
@ -97,6 +99,7 @@ extern atomic_long_t calc_load_tasks;
|
|||
extern void calc_global_load_tick(struct rq *this_rq);
|
||||
extern long calc_load_fold_active(struct rq *this_rq, long adjust);
|
||||
|
||||
extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
|
||||
/*
|
||||
* Helpers for converting nanosecond timing to jiffy resolution
|
||||
*/
|
||||
|
@ -1973,6 +1976,9 @@ static inline void add_nr_running(struct rq *rq, unsigned count)
|
|||
unsigned prev_nr = rq->nr_running;
|
||||
|
||||
rq->nr_running = prev_nr + count;
|
||||
if (trace_sched_update_nr_running_tp_enabled()) {
|
||||
call_trace_sched_update_nr_running(rq, count);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (prev_nr < 2 && rq->nr_running >= 2) {
|
||||
|
@ -1987,6 +1993,10 @@ static inline void add_nr_running(struct rq *rq, unsigned count)
|
|||
static inline void sub_nr_running(struct rq *rq, unsigned count)
|
||||
{
|
||||
rq->nr_running -= count;
|
||||
if (trace_sched_update_nr_running_tp_enabled()) {
|
||||
call_trace_sched_update_nr_running(rq, count);
|
||||
}
|
||||
|
||||
/* Check if we still need preemption */
|
||||
sched_update_tick_dependency(rq);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user