forked from luck/tmp_suning_uos_patched
tracing/function-graph-tracer: append the tracing_graph_flag
Impact: Provide a way to pause the function graph tracer As suggested by Steven Rostedt, the previous patch that prevented from spinlock function tracing shouldn't use the raw_spinlock to fix it. It's much better to follow lockdep with normal spinlock, so this patch adds a new flag for each task to make the function graph tracer able to be paused. We also can send an ftrace_printk whithout worrying of the irrelevant traced spinlock during insertion. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
8e1b82e086
commit
380c4b1411
|
@ -476,7 +476,10 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
|
||||||
&return_to_handler;
|
&return_to_handler;
|
||||||
|
|
||||||
/* Nmi's are currently unsupported */
|
/* Nmi's are currently unsupported */
|
||||||
if (atomic_read(&in_nmi))
|
if (unlikely(atomic_read(&in_nmi)))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -401,6 +401,16 @@ static inline int task_curr_ret_stack(struct task_struct *t)
|
||||||
{
|
{
|
||||||
return t->curr_ret_stack;
|
return t->curr_ret_stack;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void pause_graph_tracing(void)
|
||||||
|
{
|
||||||
|
atomic_inc(¤t->tracing_graph_pause);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void unpause_graph_tracing(void)
|
||||||
|
{
|
||||||
|
atomic_dec(¤t->tracing_graph_pause);
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
|
|
||||||
#define __notrace_funcgraph
|
#define __notrace_funcgraph
|
||||||
|
@ -412,6 +422,9 @@ static inline int task_curr_ret_stack(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void pause_graph_tracing(void) { }
|
||||||
|
static inline void unpause_graph_tracing(void) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_TRACING
|
#ifdef CONFIG_TRACING
|
||||||
|
|
|
@ -1379,6 +1379,8 @@ struct task_struct {
|
||||||
* because of depth overrun.
|
* because of depth overrun.
|
||||||
*/
|
*/
|
||||||
atomic_t trace_overrun;
|
atomic_t trace_overrun;
|
||||||
|
/* Pause for the tracing */
|
||||||
|
atomic_t tracing_graph_pause;
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_TRACING
|
#ifdef CONFIG_TRACING
|
||||||
/* state flags for use by tracers */
|
/* state flags for use by tracers */
|
||||||
|
|
|
@ -1998,6 +1998,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
|
||||||
/* Make sure IRQs see the -1 first: */
|
/* Make sure IRQs see the -1 first: */
|
||||||
barrier();
|
barrier();
|
||||||
t->ret_stack = ret_stack_list[start++];
|
t->ret_stack = ret_stack_list[start++];
|
||||||
|
atomic_set(&t->tracing_graph_pause, 0);
|
||||||
atomic_set(&t->trace_overrun, 0);
|
atomic_set(&t->trace_overrun, 0);
|
||||||
}
|
}
|
||||||
} while_each_thread(g, t);
|
} while_each_thread(g, t);
|
||||||
|
@ -2077,6 +2078,7 @@ void ftrace_graph_init_task(struct task_struct *t)
|
||||||
if (!t->ret_stack)
|
if (!t->ret_stack)
|
||||||
return;
|
return;
|
||||||
t->curr_ret_stack = -1;
|
t->curr_ret_stack = -1;
|
||||||
|
atomic_set(&t->tracing_graph_pause, 0);
|
||||||
atomic_set(&t->trace_overrun, 0);
|
atomic_set(&t->trace_overrun, 0);
|
||||||
} else
|
} else
|
||||||
t->ret_stack = NULL;
|
t->ret_stack = NULL;
|
||||||
|
|
|
@ -3590,14 +3590,7 @@ static __init int tracer_init_debugfs(void)
|
||||||
|
|
||||||
int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
|
int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
|
||||||
{
|
{
|
||||||
/*
|
static DEFINE_SPINLOCK(trace_buf_lock);
|
||||||
* Raw Spinlock because a normal spinlock would be traced here
|
|
||||||
* and append an irrelevant couple spin_lock_irqsave/
|
|
||||||
* spin_unlock_irqrestore traced by ftrace around this
|
|
||||||
* TRACE_PRINTK trace.
|
|
||||||
*/
|
|
||||||
static raw_spinlock_t trace_buf_lock =
|
|
||||||
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
|
||||||
static char trace_buf[TRACE_BUF_SIZE];
|
static char trace_buf[TRACE_BUF_SIZE];
|
||||||
|
|
||||||
struct ring_buffer_event *event;
|
struct ring_buffer_event *event;
|
||||||
|
@ -3618,8 +3611,8 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
|
||||||
if (unlikely(atomic_read(&data->disabled)))
|
if (unlikely(atomic_read(&data->disabled)))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
local_irq_save(flags);
|
pause_graph_tracing();
|
||||||
__raw_spin_lock(&trace_buf_lock);
|
spin_lock_irqsave(&trace_buf_lock, irq_flags);
|
||||||
len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
|
len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
|
||||||
|
|
||||||
len = min(len, TRACE_BUF_SIZE-1);
|
len = min(len, TRACE_BUF_SIZE-1);
|
||||||
|
@ -3640,9 +3633,8 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
|
||||||
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
|
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
__raw_spin_unlock(&trace_buf_lock);
|
spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
|
||||||
local_irq_restore(flags);
|
unpause_graph_tracing();
|
||||||
|
|
||||||
out:
|
out:
|
||||||
preempt_enable_notrace();
|
preempt_enable_notrace();
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user