forked from luck/tmp_suning_uos_patched
this_cpu: Use this_cpu_xx for ftrace
this_cpu_xx can reduce the instruction count here and also avoid address arithmetic. Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
494f6a9e12
commit
9288f99aa5
|
@ -86,17 +86,17 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
|
|||
*/
|
||||
static int tracing_disabled = 1;
|
||||
|
||||
DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
|
||||
DEFINE_PER_CPU(int, ftrace_cpu_disabled);
|
||||
|
||||
static inline void ftrace_disable_cpu(void)
|
||||
{
|
||||
preempt_disable();
|
||||
local_inc(&__get_cpu_var(ftrace_cpu_disabled));
|
||||
__this_cpu_inc(per_cpu_var(ftrace_cpu_disabled));
|
||||
}
|
||||
|
||||
static inline void ftrace_enable_cpu(void)
|
||||
{
|
||||
local_dec(&__get_cpu_var(ftrace_cpu_disabled));
|
||||
__this_cpu_dec(per_cpu_var(ftrace_cpu_disabled));
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
|
@ -1085,7 +1085,7 @@ trace_function(struct trace_array *tr,
|
|||
struct ftrace_entry *entry;
|
||||
|
||||
/* If we are reading the ring buffer, don't trace */
|
||||
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
|
||||
if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
|
||||
return;
|
||||
|
||||
event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
|
||||
|
|
|
@ -413,7 +413,7 @@ extern int DYN_FTRACE_TEST_NAME(void);
|
|||
|
||||
extern int ring_buffer_expanded;
|
||||
extern bool tracing_selftest_disabled;
|
||||
DECLARE_PER_CPU(local_t, ftrace_cpu_disabled);
|
||||
DECLARE_PER_CPU(int, ftrace_cpu_disabled);
|
||||
|
||||
#ifdef CONFIG_FTRACE_STARTUP_TEST
|
||||
extern int trace_selftest_startup_function(struct tracer *trace,
|
||||
|
|
Loading…
Reference in New Issue
Block a user