tracing: Add tracefs file buffer_percentage

Add a "buffer_percentage" file, that allows users to specify how much of the
buffer (percentage of pages) need to be filled before waking up a task
blocked on a per cpu trace_pipe_raw file.

Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
This commit is contained in:
Steven Rostedt (VMware) 2018-11-29 21:38:42 -05:00
parent 2c2b0a78b3
commit 03329f9939
3 changed files with 77 additions and 17 deletions

View File

@ -489,6 +489,7 @@ struct ring_buffer_per_cpu {
local_t commits; local_t commits;
local_t pages_touched; local_t pages_touched;
local_t pages_read; local_t pages_read;
long last_pages_touch;
size_t shortest_full; size_t shortest_full;
unsigned long read; unsigned long read;
unsigned long read_bytes; unsigned long read_bytes;
@ -2632,7 +2633,9 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
static __always_inline void static __always_inline void
rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
{ {
bool pagebusy; size_t nr_pages;
size_t dirty;
size_t full;
if (buffer->irq_work.waiters_pending) { if (buffer->irq_work.waiters_pending) {
buffer->irq_work.waiters_pending = false; buffer->irq_work.waiters_pending = false;
@ -2646,24 +2649,27 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
irq_work_queue(&cpu_buffer->irq_work.work); irq_work_queue(&cpu_buffer->irq_work.work);
} }
pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched))
return;
if (!pagebusy && cpu_buffer->irq_work.full_waiters_pending) { if (cpu_buffer->reader_page == cpu_buffer->commit_page)
size_t nr_pages; return;
size_t dirty;
size_t full;
full = cpu_buffer->shortest_full; if (!cpu_buffer->irq_work.full_waiters_pending)
nr_pages = cpu_buffer->nr_pages; return;
dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu);
if (full && nr_pages && (dirty * 100) <= full * nr_pages)
return;
cpu_buffer->irq_work.wakeup_full = true; cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
cpu_buffer->irq_work.full_waiters_pending = false;
/* irq_work_queue() supplies it's own memory barriers */ full = cpu_buffer->shortest_full;
irq_work_queue(&cpu_buffer->irq_work.work); nr_pages = cpu_buffer->nr_pages;
} dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu);
if (full && nr_pages && (dirty * 100) <= full * nr_pages)
return;
cpu_buffer->irq_work.wakeup_full = true;
cpu_buffer->irq_work.full_waiters_pending = false;
/* irq_work_queue() supplies it's own memory barriers */
irq_work_queue(&cpu_buffer->irq_work.work);
} }
/* /*
@ -4394,6 +4400,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
local_set(&cpu_buffer->commits, 0); local_set(&cpu_buffer->commits, 0);
local_set(&cpu_buffer->pages_touched, 0); local_set(&cpu_buffer->pages_touched, 0);
local_set(&cpu_buffer->pages_read, 0); local_set(&cpu_buffer->pages_read, 0);
cpu_buffer->last_pages_touch = 0;
cpu_buffer->shortest_full = 0; cpu_buffer->shortest_full = 0;
cpu_buffer->read = 0; cpu_buffer->read = 0;
cpu_buffer->read_bytes = 0; cpu_buffer->read_bytes = 0;

View File

@ -6948,7 +6948,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
goto out; goto out;
ret = wait_on_pipe(iter, 1); ret = wait_on_pipe(iter, iter->tr->buffer_percent);
if (ret) if (ret)
goto out; goto out;
@ -7662,6 +7662,53 @@ static const struct file_operations rb_simple_fops = {
.llseek = default_llseek, .llseek = default_llseek,
}; };
static ssize_t
buffer_percent_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
char buf[64];
int r;
r = tr->buffer_percent;
r = sprintf(buf, "%d\n", r);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
static ssize_t
buffer_percent_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
unsigned long val;
int ret;
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;
if (val > 100)
return -EINVAL;
if (!val)
val = 1;
tr->buffer_percent = val;
(*ppos)++;
return cnt;
}
static const struct file_operations buffer_percent_fops = {
.open = tracing_open_generic_tr,
.read = buffer_percent_read,
.write = buffer_percent_write,
.release = tracing_release_generic_tr,
.llseek = default_llseek,
};
struct dentry *trace_instance_dir; struct dentry *trace_instance_dir;
static void static void
@ -7970,6 +8017,11 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
trace_create_file("timestamp_mode", 0444, d_tracer, tr, trace_create_file("timestamp_mode", 0444, d_tracer, tr,
&trace_time_stamp_mode_fops); &trace_time_stamp_mode_fops);
tr->buffer_percent = 1;
trace_create_file("buffer_percent", 0444, d_tracer,
tr, &buffer_percent_fops);
create_trace_options_dir(tr); create_trace_options_dir(tr);
#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)

View File

@ -247,6 +247,7 @@ struct trace_array {
int clock_id; int clock_id;
int nr_topts; int nr_topts;
bool clear_trace; bool clear_trace;
int buffer_percent;
struct tracer *current_trace; struct tracer *current_trace;
unsigned int trace_flags; unsigned int trace_flags;
unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE]; unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];