forked from luck/tmp_suning_uos_patched
ring-buffer: Add nesting for adding events within events
The ring-buffer code has recusion protection in case tracing ends up tracing itself, the ring-buffer will detect that it was called at the same context (normal, softirq, interrupt or NMI), and not continue to record the event. With the histogram synthetic events, they are called while tracing another event at the same context. The recusion protection triggers because it detects tracing at the same context and stops it. Add ring_buffer_nest_start() and ring_buffer_nest_end() that will notify the ring buffer that a trace is about to happen within another trace and that it is intended, and not to trigger the recursion blocking. Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
This commit is contained in:
parent
a4072fe85b
commit
8e012066fe
|
@ -117,6 +117,9 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
|
|||
int ring_buffer_write(struct ring_buffer *buffer,
|
||||
unsigned long length, void *data);
|
||||
|
||||
void ring_buffer_nest_start(struct ring_buffer *buffer);
|
||||
void ring_buffer_nest_end(struct ring_buffer *buffer);
|
||||
|
||||
struct ring_buffer_event *
|
||||
ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
|
||||
unsigned long *lost_events);
|
||||
|
|
|
@ -477,6 +477,7 @@ struct ring_buffer_per_cpu {
|
|||
struct buffer_page *reader_page;
|
||||
unsigned long lost_events;
|
||||
unsigned long last_overrun;
|
||||
unsigned long nest;
|
||||
local_t entries_bytes;
|
||||
local_t entries;
|
||||
local_t overrun;
|
||||
|
@ -2624,10 +2625,10 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
|
|||
bit = pc & NMI_MASK ? RB_CTX_NMI :
|
||||
pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
|
||||
|
||||
if (unlikely(val & (1 << bit)))
|
||||
if (unlikely(val & (1 << (bit + cpu_buffer->nest))))
|
||||
return 1;
|
||||
|
||||
val |= (1 << bit);
|
||||
val |= (1 << (bit + cpu_buffer->nest));
|
||||
cpu_buffer->current_context = val;
|
||||
|
||||
return 0;
|
||||
|
@ -2636,7 +2637,57 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
|
|||
static __always_inline void
|
||||
trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
{
|
||||
cpu_buffer->current_context &= cpu_buffer->current_context - 1;
|
||||
cpu_buffer->current_context &=
|
||||
cpu_buffer->current_context - (1 << cpu_buffer->nest);
|
||||
}
|
||||
|
||||
/* The recursive locking above uses 4 bits */
|
||||
#define NESTED_BITS 4
|
||||
|
||||
/**
|
||||
* ring_buffer_nest_start - Allow to trace while nested
|
||||
* @buffer: The ring buffer to modify
|
||||
*
|
||||
* The ring buffer has a safty mechanism to prevent recursion.
|
||||
* But there may be a case where a trace needs to be done while
|
||||
* tracing something else. In this case, calling this function
|
||||
* will allow this function to nest within a currently active
|
||||
* ring_buffer_lock_reserve().
|
||||
*
|
||||
* Call this function before calling another ring_buffer_lock_reserve() and
|
||||
* call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
|
||||
*/
|
||||
void ring_buffer_nest_start(struct ring_buffer *buffer)
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
int cpu;
|
||||
|
||||
/* Enabled by ring_buffer_nest_end() */
|
||||
preempt_disable_notrace();
|
||||
cpu = raw_smp_processor_id();
|
||||
cpu_buffer = buffer->buffers[cpu];
|
||||
/* This is the shift value for the above recusive locking */
|
||||
cpu_buffer->nest += NESTED_BITS;
|
||||
}
|
||||
|
||||
/**
|
||||
* ring_buffer_nest_end - Allow to trace while nested
|
||||
* @buffer: The ring buffer to modify
|
||||
*
|
||||
* Must be called after ring_buffer_nest_start() and after the
|
||||
* ring_buffer_unlock_commit().
|
||||
*/
|
||||
void ring_buffer_nest_end(struct ring_buffer *buffer)
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
int cpu;
|
||||
|
||||
/* disabled by ring_buffer_nest_start() */
|
||||
cpu = raw_smp_processor_id();
|
||||
cpu_buffer = buffer->buffers[cpu];
|
||||
/* This is the shift value for the above recusive locking */
|
||||
cpu_buffer->nest -= NESTED_BITS;
|
||||
preempt_enable_notrace();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in New Issue
Block a user