tracing: kdb: Allow ftdump to skip all but the last few entries

The 'ftdump' command in kdb is currently a bit of a last resort, at
least if you have lots of traces turned on.  It's going to print a
whole boatload of data out your serial port which is probably running
at 115200.  This could easily take many, many minutes.

Usually you're most interested in what's at the _end_ of the ftrace
buffer, AKA what happened most recently.  That means you've got to
wait the full time for the dump.  The 'ftdump' command does attempt to
help you a little bit by allowing you to skip a fixed number of
entries.  Unfortunately it provides no way for you to know how many
entries you should skip.

Let's do similar to python and allow you to use a negative number to
indicate that you want to skip all entries except the last few.  This
allows you to quickly see what you want.

Note that we also change the printout in ftdump to print the
(positive) number of entries actually skipped since that could be
helpful to know when you've specified a negative skip count.

Link: http://lkml.kernel.org/r/20190319171206.97107-3-dianders@chromium.org

Signed-off-by: Douglas Anderson <dianders@chromium.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
This commit is contained in:
Douglas Anderson 2019-03-19 10:12:06 -07:00 committed by Steven Rostedt (VMware)
parent ecffc8a8c7
commit 03197fc02b

View File

@ -17,29 +17,25 @@
#include "trace.h"
#include "trace_output.h"
static struct trace_iterator iter;
static struct ring_buffer_iter *buffer_iter[CONFIG_NR_CPUS];
static void ftrace_dump_buf(int skip_entries, long cpu_file)
{
/* use static because iter can be a bit big for the stack */
static struct trace_iterator iter;
static struct ring_buffer_iter *buffer_iter[CONFIG_NR_CPUS];
struct trace_array *tr;
unsigned int old_userobj;
int cnt = 0, cpu;
trace_init_global_iter(&iter);
iter.buffer_iter = buffer_iter;
tr = iter.tr;
for_each_tracing_cpu(cpu) {
atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
}
old_userobj = tr->trace_flags;
/* don't look at user memory in panic mode */
tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
kdb_printf("Dumping ftrace buffer:\n");
if (skip_entries)
kdb_printf("(skipping %d entries)\n", skip_entries);
/* reset all but tr, trace, and overruns */
memset(&iter.seq, 0,
@ -89,10 +85,6 @@ static void ftrace_dump_buf(int skip_entries, long cpu_file)
out:
tr->trace_flags = old_userobj;
for_each_tracing_cpu(cpu) {
atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
}
for_each_tracing_cpu(cpu) {
if (iter.buffer_iter[cpu]) {
ring_buffer_read_finish(iter.buffer_iter[cpu]);
@ -109,6 +101,8 @@ static int kdb_ftdump(int argc, const char **argv)
int skip_entries = 0;
long cpu_file;
char *cp;
int cnt;
int cpu;
if (argc > 2)
return KDB_ARGCOUNT;
@ -129,7 +123,29 @@ static int kdb_ftdump(int argc, const char **argv)
}
kdb_trap_printk++;
trace_init_global_iter(&iter);
iter.buffer_iter = buffer_iter;
for_each_tracing_cpu(cpu) {
atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
}
/* A negative skip_entries means skip all but the last entries */
if (skip_entries < 0) {
if (cpu_file == RING_BUFFER_ALL_CPUS)
cnt = trace_total_entries(NULL);
else
cnt = trace_total_entries_cpu(NULL, cpu_file);
skip_entries = max(cnt + skip_entries, 0);
}
ftrace_dump_buf(skip_entries, cpu_file);
for_each_tracing_cpu(cpu) {
atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
}
kdb_trap_printk--;
return 0;
@ -138,7 +154,8 @@ static int kdb_ftdump(int argc, const char **argv)
static __init int kdb_ftrace_register(void)
{
kdb_register_flags("ftdump", kdb_ftdump, "[skip_#entries] [cpu]",
"Dump ftrace log", 0, KDB_ENABLE_ALWAYS_SAFE);
"Dump ftrace log; -skip dumps last #entries", 0,
KDB_ENABLE_ALWAYS_SAFE);
return 0;
}