forked from luck/tmp_suning_uos_patched
Merge branch 'tip/perf/core-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/core
This commit is contained in:
commit
931da6137e
|
@ -66,7 +66,7 @@ void save_stack_trace(struct stack_trace *trace)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(save_stack_trace);
|
||||
|
||||
void save_stack_trace_regs(struct stack_trace *trace, struct pt_regs *regs)
|
||||
void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
|
||||
{
|
||||
dump_trace(current, regs, NULL, 0, &save_stack_ops, trace);
|
||||
if (trace->nr_entries < trace->max_entries)
|
||||
|
|
|
@ -185,7 +185,7 @@ void kmemcheck_error_save(enum kmemcheck_shadow state,
|
|||
e->trace.entries = e->trace_entries;
|
||||
e->trace.max_entries = ARRAY_SIZE(e->trace_entries);
|
||||
e->trace.skip = 0;
|
||||
save_stack_trace_regs(&e->trace, regs);
|
||||
save_stack_trace_regs(regs, &e->trace);
|
||||
|
||||
/* Round address down to nearest 16 bytes */
|
||||
shadow_copy = kmemcheck_shadow_lookup(address
|
||||
|
|
|
@ -129,6 +129,10 @@ void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
|
|||
void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
|
||||
struct ring_buffer_event *event,
|
||||
unsigned long flags, int pc);
|
||||
void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
|
||||
struct ring_buffer_event *event,
|
||||
unsigned long flags, int pc,
|
||||
struct pt_regs *regs);
|
||||
void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
|
||||
struct ring_buffer_event *event);
|
||||
|
||||
|
|
|
@ -169,7 +169,7 @@ void ring_buffer_set_clock(struct ring_buffer *buffer,
|
|||
size_t ring_buffer_page_len(void *page);
|
||||
|
||||
|
||||
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer);
|
||||
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu);
|
||||
void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data);
|
||||
int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
|
||||
size_t len, int cpu, int full);
|
||||
|
|
|
@ -14,8 +14,8 @@ struct stack_trace {
|
|||
};
|
||||
|
||||
extern void save_stack_trace(struct stack_trace *trace);
|
||||
extern void save_stack_trace_regs(struct stack_trace *trace,
|
||||
struct pt_regs *regs);
|
||||
extern void save_stack_trace_regs(struct pt_regs *regs,
|
||||
struct stack_trace *trace);
|
||||
extern void save_stack_trace_tsk(struct task_struct *tsk,
|
||||
struct stack_trace *trace);
|
||||
|
||||
|
|
|
@ -49,12 +49,13 @@ asynchronous and synchronous parts of the kernel.
|
|||
*/
|
||||
|
||||
#include <linux/async.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
static async_cookie_t next_cookie = 1;
|
||||
|
||||
|
@ -128,7 +129,8 @@ static void async_run_entry_fn(struct work_struct *work)
|
|||
|
||||
/* 2) run (and print duration) */
|
||||
if (initcall_debug && system_state == SYSTEM_BOOTING) {
|
||||
printk("calling %lli_%pF @ %i\n", (long long)entry->cookie,
|
||||
printk(KERN_DEBUG "calling %lli_%pF @ %i\n",
|
||||
(long long)entry->cookie,
|
||||
entry->func, task_pid_nr(current));
|
||||
calltime = ktime_get();
|
||||
}
|
||||
|
@ -136,7 +138,7 @@ static void async_run_entry_fn(struct work_struct *work)
|
|||
if (initcall_debug && system_state == SYSTEM_BOOTING) {
|
||||
rettime = ktime_get();
|
||||
delta = ktime_sub(rettime, calltime);
|
||||
printk("initcall %lli_%pF returned 0 after %lld usecs\n",
|
||||
printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n",
|
||||
(long long)entry->cookie,
|
||||
entry->func,
|
||||
(long long)ktime_to_ns(delta) >> 10);
|
||||
|
@ -270,7 +272,7 @@ void async_synchronize_cookie_domain(async_cookie_t cookie,
|
|||
ktime_t starttime, delta, endtime;
|
||||
|
||||
if (initcall_debug && system_state == SYSTEM_BOOTING) {
|
||||
printk("async_waiting @ %i\n", task_pid_nr(current));
|
||||
printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
|
||||
starttime = ktime_get();
|
||||
}
|
||||
|
||||
|
@ -280,7 +282,7 @@ void async_synchronize_cookie_domain(async_cookie_t cookie,
|
|||
endtime = ktime_get();
|
||||
delta = ktime_sub(endtime, starttime);
|
||||
|
||||
printk("async_continuing @ %i after %lli usec\n",
|
||||
printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n",
|
||||
task_pid_nr(current),
|
||||
(long long)ktime_to_ns(delta) >> 10);
|
||||
}
|
||||
|
|
|
@ -26,12 +26,18 @@ void print_stack_trace(struct stack_trace *trace, int spaces)
|
|||
EXPORT_SYMBOL_GPL(print_stack_trace);
|
||||
|
||||
/*
|
||||
* Architectures that do not implement save_stack_trace_tsk get this
|
||||
* weak alias and a once-per-bootup warning (whenever this facility
|
||||
* is utilized - for example by procfs):
|
||||
* Architectures that do not implement save_stack_trace_tsk or
|
||||
* save_stack_trace_regs get this weak alias and a once-per-bootup warning
|
||||
* (whenever this facility is utilized - for example by procfs):
|
||||
*/
|
||||
__weak void
|
||||
save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
||||
{
|
||||
WARN_ONCE(1, KERN_INFO "save_stack_trace_tsk() not implemented yet.\n");
|
||||
}
|
||||
|
||||
__weak void
|
||||
save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
|
||||
{
|
||||
WARN_ONCE(1, KERN_INFO "save_stack_trace_regs() not implemented yet.\n");
|
||||
}
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
|
||||
#include <trace/events/sched.h>
|
||||
|
||||
#include <asm/ftrace.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
#include "trace_output.h"
|
||||
|
@ -82,8 +81,7 @@ static int ftrace_disabled __read_mostly;
|
|||
|
||||
static DEFINE_MUTEX(ftrace_lock);
|
||||
|
||||
static struct ftrace_ops ftrace_list_end __read_mostly =
|
||||
{
|
||||
static struct ftrace_ops ftrace_list_end __read_mostly = {
|
||||
.func = ftrace_stub,
|
||||
};
|
||||
|
||||
|
@ -785,8 +783,7 @@ static void unregister_ftrace_profiler(void)
|
|||
unregister_ftrace_graph();
|
||||
}
|
||||
#else
|
||||
static struct ftrace_ops ftrace_profile_ops __read_mostly =
|
||||
{
|
||||
static struct ftrace_ops ftrace_profile_ops __read_mostly = {
|
||||
.func = function_profile_call,
|
||||
};
|
||||
|
||||
|
@ -806,19 +803,10 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
|
|||
size_t cnt, loff_t *ppos)
|
||||
{
|
||||
unsigned long val;
|
||||
char buf[64]; /* big enough to hold a number */
|
||||
int ret;
|
||||
|
||||
if (cnt >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
|
||||
buf[cnt] = 0;
|
||||
|
||||
ret = strict_strtoul(buf, 10, &val);
|
||||
if (ret < 0)
|
||||
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
val = !!val;
|
||||
|
|
|
@ -997,15 +997,21 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
|
|||
unsigned nr_pages)
|
||||
{
|
||||
struct buffer_page *bpage, *tmp;
|
||||
unsigned long addr;
|
||||
LIST_HEAD(pages);
|
||||
unsigned i;
|
||||
|
||||
WARN_ON(!nr_pages);
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
struct page *page;
|
||||
/*
|
||||
* __GFP_NORETRY flag makes sure that the allocation fails
|
||||
* gracefully without invoking oom-killer and the system is
|
||||
* not destabilized.
|
||||
*/
|
||||
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
|
||||
GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
|
||||
GFP_KERNEL | __GFP_NORETRY,
|
||||
cpu_to_node(cpu_buffer->cpu));
|
||||
if (!bpage)
|
||||
goto free_pages;
|
||||
|
||||
|
@ -1013,10 +1019,11 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
|
|||
|
||||
list_add(&bpage->list, &pages);
|
||||
|
||||
addr = __get_free_page(GFP_KERNEL);
|
||||
if (!addr)
|
||||
page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
|
||||
GFP_KERNEL | __GFP_NORETRY, 0);
|
||||
if (!page)
|
||||
goto free_pages;
|
||||
bpage->page = (void *)addr;
|
||||
bpage->page = page_address(page);
|
||||
rb_init_page(bpage->page);
|
||||
}
|
||||
|
||||
|
@ -1045,7 +1052,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
|
|||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
struct buffer_page *bpage;
|
||||
unsigned long addr;
|
||||
struct page *page;
|
||||
int ret;
|
||||
|
||||
cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
|
||||
|
@ -1067,10 +1074,10 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
|
|||
rb_check_bpage(cpu_buffer, bpage);
|
||||
|
||||
cpu_buffer->reader_page = bpage;
|
||||
addr = __get_free_page(GFP_KERNEL);
|
||||
if (!addr)
|
||||
page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
|
||||
if (!page)
|
||||
goto fail_free_reader;
|
||||
bpage->page = (void *)addr;
|
||||
bpage->page = page_address(page);
|
||||
rb_init_page(bpage->page);
|
||||
|
||||
INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
|
||||
|
@ -1314,7 +1321,6 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
|
|||
unsigned nr_pages, rm_pages, new_pages;
|
||||
struct buffer_page *bpage, *tmp;
|
||||
unsigned long buffer_size;
|
||||
unsigned long addr;
|
||||
LIST_HEAD(pages);
|
||||
int i, cpu;
|
||||
|
||||
|
@ -1375,16 +1381,24 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
|
|||
|
||||
for_each_buffer_cpu(buffer, cpu) {
|
||||
for (i = 0; i < new_pages; i++) {
|
||||
struct page *page;
|
||||
/*
|
||||
* __GFP_NORETRY flag makes sure that the allocation
|
||||
* fails gracefully without invoking oom-killer and
|
||||
* the system is not destabilized.
|
||||
*/
|
||||
bpage = kzalloc_node(ALIGN(sizeof(*bpage),
|
||||
cache_line_size()),
|
||||
GFP_KERNEL, cpu_to_node(cpu));
|
||||
GFP_KERNEL | __GFP_NORETRY,
|
||||
cpu_to_node(cpu));
|
||||
if (!bpage)
|
||||
goto free_pages;
|
||||
list_add(&bpage->list, &pages);
|
||||
addr = __get_free_page(GFP_KERNEL);
|
||||
if (!addr)
|
||||
page = alloc_pages_node(cpu_to_node(cpu),
|
||||
GFP_KERNEL | __GFP_NORETRY, 0);
|
||||
if (!page)
|
||||
goto free_pages;
|
||||
bpage->page = (void *)addr;
|
||||
bpage->page = page_address(page);
|
||||
rb_init_page(bpage->page);
|
||||
}
|
||||
}
|
||||
|
@ -3730,16 +3744,17 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
|
|||
* Returns:
|
||||
* The page allocated, or NULL on error.
|
||||
*/
|
||||
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
|
||||
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
|
||||
{
|
||||
struct buffer_data_page *bpage;
|
||||
unsigned long addr;
|
||||
struct page *page;
|
||||
|
||||
addr = __get_free_page(GFP_KERNEL);
|
||||
if (!addr)
|
||||
page = alloc_pages_node(cpu_to_node(cpu),
|
||||
GFP_KERNEL | __GFP_NORETRY, 0);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
bpage = (void *)addr;
|
||||
bpage = page_address(page);
|
||||
|
||||
rb_init_page(bpage);
|
||||
|
||||
|
@ -3978,20 +3993,11 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
|
|||
size_t cnt, loff_t *ppos)
|
||||
{
|
||||
unsigned long *p = filp->private_data;
|
||||
char buf[64];
|
||||
unsigned long val;
|
||||
int ret;
|
||||
|
||||
if (cnt >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
|
||||
buf[cnt] = 0;
|
||||
|
||||
ret = strict_strtoul(buf, 10, &val);
|
||||
if (ret < 0)
|
||||
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (val)
|
||||
|
|
|
@ -106,7 +106,7 @@ static enum event_status read_page(int cpu)
|
|||
int inc;
|
||||
int i;
|
||||
|
||||
bpage = ring_buffer_alloc_read_page(buffer);
|
||||
bpage = ring_buffer_alloc_read_page(buffer, cpu);
|
||||
if (!bpage)
|
||||
return EVENT_DROPPED;
|
||||
|
||||
|
|
|
@ -343,26 +343,27 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
|
|||
static int trace_stop_count;
|
||||
static DEFINE_SPINLOCK(tracing_start_lock);
|
||||
|
||||
static void wakeup_work_handler(struct work_struct *work)
|
||||
{
|
||||
wake_up(&trace_wait);
|
||||
}
|
||||
|
||||
static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);
|
||||
|
||||
/**
|
||||
* trace_wake_up - wake up tasks waiting for trace input
|
||||
*
|
||||
* Simply wakes up any task that is blocked on the trace_wait
|
||||
* queue. These is used with trace_poll for tasks polling the trace.
|
||||
* Schedules a delayed work to wake up any task that is blocked on the
|
||||
* trace_wait queue. These is used with trace_poll for tasks polling the
|
||||
* trace.
|
||||
*/
|
||||
void trace_wake_up(void)
|
||||
{
|
||||
int cpu;
|
||||
const unsigned long delay = msecs_to_jiffies(2);
|
||||
|
||||
if (trace_flags & TRACE_ITER_BLOCK)
|
||||
return;
|
||||
/*
|
||||
* The runqueue_is_locked() can fail, but this is the best we
|
||||
* have for now:
|
||||
*/
|
||||
cpu = get_cpu();
|
||||
if (!runqueue_is_locked(cpu))
|
||||
wake_up(&trace_wait);
|
||||
put_cpu();
|
||||
schedule_delayed_work(&wakeup_work, delay);
|
||||
}
|
||||
|
||||
static int __init set_buf_size(char *str)
|
||||
|
@ -424,6 +425,7 @@ static const char *trace_options[] = {
|
|||
"graph-time",
|
||||
"record-cmd",
|
||||
"overwrite",
|
||||
"disable_on_free",
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -1191,6 +1193,18 @@ void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
|
||||
|
||||
void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
|
||||
struct ring_buffer_event *event,
|
||||
unsigned long flags, int pc,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
ring_buffer_unlock_commit(buffer, event);
|
||||
|
||||
ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
|
||||
ftrace_trace_userstack(buffer, flags, pc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs);
|
||||
|
||||
void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
|
||||
struct ring_buffer_event *event)
|
||||
{
|
||||
|
@ -1236,7 +1250,7 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
|
|||
#ifdef CONFIG_STACKTRACE
|
||||
static void __ftrace_trace_stack(struct ring_buffer *buffer,
|
||||
unsigned long flags,
|
||||
int skip, int pc)
|
||||
int skip, int pc, struct pt_regs *regs)
|
||||
{
|
||||
struct ftrace_event_call *call = &event_kernel_stack;
|
||||
struct ring_buffer_event *event;
|
||||
|
@ -1255,24 +1269,36 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
|
|||
trace.skip = skip;
|
||||
trace.entries = entry->caller;
|
||||
|
||||
save_stack_trace(&trace);
|
||||
if (regs)
|
||||
save_stack_trace_regs(regs, &trace);
|
||||
else
|
||||
save_stack_trace(&trace);
|
||||
if (!filter_check_discard(call, entry, buffer, event))
|
||||
ring_buffer_unlock_commit(buffer, event);
|
||||
}
|
||||
|
||||
void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
|
||||
int skip, int pc, struct pt_regs *regs)
|
||||
{
|
||||
if (!(trace_flags & TRACE_ITER_STACKTRACE))
|
||||
return;
|
||||
|
||||
__ftrace_trace_stack(buffer, flags, skip, pc, regs);
|
||||
}
|
||||
|
||||
void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
|
||||
int skip, int pc)
|
||||
{
|
||||
if (!(trace_flags & TRACE_ITER_STACKTRACE))
|
||||
return;
|
||||
|
||||
__ftrace_trace_stack(buffer, flags, skip, pc);
|
||||
__ftrace_trace_stack(buffer, flags, skip, pc, NULL);
|
||||
}
|
||||
|
||||
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
|
||||
int pc)
|
||||
{
|
||||
__ftrace_trace_stack(tr->buffer, flags, skip, pc);
|
||||
__ftrace_trace_stack(tr->buffer, flags, skip, pc, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1288,7 +1314,7 @@ void trace_dump_stack(void)
|
|||
local_save_flags(flags);
|
||||
|
||||
/* skipping 3 traces, seems to get us at the caller of this function */
|
||||
__ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count());
|
||||
__ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count(), NULL);
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(int, user_stack_count);
|
||||
|
@ -2051,6 +2077,9 @@ void trace_default_header(struct seq_file *m)
|
|||
{
|
||||
struct trace_iterator *iter = m->private;
|
||||
|
||||
if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
|
||||
return;
|
||||
|
||||
if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
|
||||
/* print nothing if the buffers are empty */
|
||||
if (trace_empty(iter))
|
||||
|
@ -2701,20 +2730,11 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf,
|
|||
size_t cnt, loff_t *ppos)
|
||||
{
|
||||
struct trace_array *tr = filp->private_data;
|
||||
char buf[64];
|
||||
unsigned long val;
|
||||
int ret;
|
||||
|
||||
if (cnt >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
|
||||
buf[cnt] = 0;
|
||||
|
||||
ret = strict_strtoul(buf, 10, &val);
|
||||
if (ret < 0)
|
||||
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
val = !!val;
|
||||
|
@ -2767,7 +2787,7 @@ int tracer_init(struct tracer *t, struct trace_array *tr)
|
|||
return t->init(tr);
|
||||
}
|
||||
|
||||
static int tracing_resize_ring_buffer(unsigned long size)
|
||||
static int __tracing_resize_ring_buffer(unsigned long size)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -2819,6 +2839,41 @@ static int tracing_resize_ring_buffer(unsigned long size)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t tracing_resize_ring_buffer(unsigned long size)
|
||||
{
|
||||
int cpu, ret = size;
|
||||
|
||||
mutex_lock(&trace_types_lock);
|
||||
|
||||
tracing_stop();
|
||||
|
||||
/* disable all cpu buffers */
|
||||
for_each_tracing_cpu(cpu) {
|
||||
if (global_trace.data[cpu])
|
||||
atomic_inc(&global_trace.data[cpu]->disabled);
|
||||
if (max_tr.data[cpu])
|
||||
atomic_inc(&max_tr.data[cpu]->disabled);
|
||||
}
|
||||
|
||||
if (size != global_trace.entries)
|
||||
ret = __tracing_resize_ring_buffer(size);
|
||||
|
||||
if (ret < 0)
|
||||
ret = -ENOMEM;
|
||||
|
||||
for_each_tracing_cpu(cpu) {
|
||||
if (global_trace.data[cpu])
|
||||
atomic_dec(&global_trace.data[cpu]->disabled);
|
||||
if (max_tr.data[cpu])
|
||||
atomic_dec(&max_tr.data[cpu]->disabled);
|
||||
}
|
||||
|
||||
tracing_start();
|
||||
mutex_unlock(&trace_types_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* tracing_update_buffers - used by tracing facility to expand ring buffers
|
||||
|
@ -2836,7 +2891,7 @@ int tracing_update_buffers(void)
|
|||
|
||||
mutex_lock(&trace_types_lock);
|
||||
if (!ring_buffer_expanded)
|
||||
ret = tracing_resize_ring_buffer(trace_buf_size);
|
||||
ret = __tracing_resize_ring_buffer(trace_buf_size);
|
||||
mutex_unlock(&trace_types_lock);
|
||||
|
||||
return ret;
|
||||
|
@ -2860,7 +2915,7 @@ static int tracing_set_tracer(const char *buf)
|
|||
mutex_lock(&trace_types_lock);
|
||||
|
||||
if (!ring_buffer_expanded) {
|
||||
ret = tracing_resize_ring_buffer(trace_buf_size);
|
||||
ret = __tracing_resize_ring_buffer(trace_buf_size);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
ret = 0;
|
||||
|
@ -2966,20 +3021,11 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
|
|||
size_t cnt, loff_t *ppos)
|
||||
{
|
||||
unsigned long *ptr = filp->private_data;
|
||||
char buf[64];
|
||||
unsigned long val;
|
||||
int ret;
|
||||
|
||||
if (cnt >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
|
||||
buf[cnt] = 0;
|
||||
|
||||
ret = strict_strtoul(buf, 10, &val);
|
||||
if (ret < 0)
|
||||
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*ptr = val * 1000;
|
||||
|
@ -3434,67 +3480,54 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
|
|||
size_t cnt, loff_t *ppos)
|
||||
{
|
||||
unsigned long val;
|
||||
char buf[64];
|
||||
int ret, cpu;
|
||||
int ret;
|
||||
|
||||
if (cnt >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
|
||||
buf[cnt] = 0;
|
||||
|
||||
ret = strict_strtoul(buf, 10, &val);
|
||||
if (ret < 0)
|
||||
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* must have at least 1 entry */
|
||||
if (!val)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&trace_types_lock);
|
||||
|
||||
tracing_stop();
|
||||
|
||||
/* disable all cpu buffers */
|
||||
for_each_tracing_cpu(cpu) {
|
||||
if (global_trace.data[cpu])
|
||||
atomic_inc(&global_trace.data[cpu]->disabled);
|
||||
if (max_tr.data[cpu])
|
||||
atomic_inc(&max_tr.data[cpu]->disabled);
|
||||
}
|
||||
|
||||
/* value is in KB */
|
||||
val <<= 10;
|
||||
|
||||
if (val != global_trace.entries) {
|
||||
ret = tracing_resize_ring_buffer(val);
|
||||
if (ret < 0) {
|
||||
cnt = ret;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
ret = tracing_resize_ring_buffer(val);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
*ppos += cnt;
|
||||
|
||||
/* If check pages failed, return ENOMEM */
|
||||
if (tracing_disabled)
|
||||
cnt = -ENOMEM;
|
||||
out:
|
||||
for_each_tracing_cpu(cpu) {
|
||||
if (global_trace.data[cpu])
|
||||
atomic_dec(&global_trace.data[cpu]->disabled);
|
||||
if (max_tr.data[cpu])
|
||||
atomic_dec(&max_tr.data[cpu]->disabled);
|
||||
}
|
||||
return cnt;
|
||||
}
|
||||
|
||||
tracing_start();
|
||||
mutex_unlock(&trace_types_lock);
|
||||
static ssize_t
|
||||
tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
|
||||
size_t cnt, loff_t *ppos)
|
||||
{
|
||||
/*
|
||||
* There is no need to read what the user has written, this function
|
||||
* is just to make sure that there is no error when "echo" is used
|
||||
*/
|
||||
|
||||
*ppos += cnt;
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static int
|
||||
tracing_free_buffer_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
/* disable tracing ? */
|
||||
if (trace_flags & TRACE_ITER_STOP_ON_FREE)
|
||||
tracing_off();
|
||||
/* resize the ring buffer to 0 */
|
||||
tracing_resize_ring_buffer(0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mark_printk(const char *fmt, ...)
|
||||
{
|
||||
int ret;
|
||||
|
@ -3640,6 +3673,11 @@ static const struct file_operations tracing_entries_fops = {
|
|||
.llseek = generic_file_llseek,
|
||||
};
|
||||
|
||||
static const struct file_operations tracing_free_buffer_fops = {
|
||||
.write = tracing_free_buffer_write,
|
||||
.release = tracing_free_buffer_release,
|
||||
};
|
||||
|
||||
static const struct file_operations tracing_mark_fops = {
|
||||
.open = tracing_open_generic,
|
||||
.write = tracing_mark_write,
|
||||
|
@ -3696,7 +3734,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
|
|||
return 0;
|
||||
|
||||
if (!info->spare)
|
||||
info->spare = ring_buffer_alloc_read_page(info->tr->buffer);
|
||||
info->spare = ring_buffer_alloc_read_page(info->tr->buffer, info->cpu);
|
||||
if (!info->spare)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -3853,7 +3891,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
|
|||
|
||||
ref->ref = 1;
|
||||
ref->buffer = info->tr->buffer;
|
||||
ref->page = ring_buffer_alloc_read_page(ref->buffer);
|
||||
ref->page = ring_buffer_alloc_read_page(ref->buffer, info->cpu);
|
||||
if (!ref->page) {
|
||||
kfree(ref);
|
||||
break;
|
||||
|
@ -3862,8 +3900,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
|
|||
r = ring_buffer_read_page(ref->buffer, &ref->page,
|
||||
len, info->cpu, 1);
|
||||
if (r < 0) {
|
||||
ring_buffer_free_read_page(ref->buffer,
|
||||
ref->page);
|
||||
ring_buffer_free_read_page(ref->buffer, ref->page);
|
||||
kfree(ref);
|
||||
break;
|
||||
}
|
||||
|
@ -4099,19 +4136,10 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|||
{
|
||||
struct trace_option_dentry *topt = filp->private_data;
|
||||
unsigned long val;
|
||||
char buf[64];
|
||||
int ret;
|
||||
|
||||
if (cnt >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
|
||||
buf[cnt] = 0;
|
||||
|
||||
ret = strict_strtoul(buf, 10, &val);
|
||||
if (ret < 0)
|
||||
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (val != 0 && val != 1)
|
||||
|
@ -4159,20 +4187,11 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|||
loff_t *ppos)
|
||||
{
|
||||
long index = (long)filp->private_data;
|
||||
char buf[64];
|
||||
unsigned long val;
|
||||
int ret;
|
||||
|
||||
if (cnt >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
|
||||
buf[cnt] = 0;
|
||||
|
||||
ret = strict_strtoul(buf, 10, &val);
|
||||
if (ret < 0)
|
||||
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (val != 0 && val != 1)
|
||||
|
@ -4365,6 +4384,9 @@ static __init int tracer_init_debugfs(void)
|
|||
trace_create_file("buffer_size_kb", 0644, d_tracer,
|
||||
&global_trace, &tracing_entries_fops);
|
||||
|
||||
trace_create_file("free_buffer", 0644, d_tracer,
|
||||
&global_trace, &tracing_free_buffer_fops);
|
||||
|
||||
trace_create_file("trace_marker", 0220, d_tracer,
|
||||
NULL, &tracing_mark_fops);
|
||||
|
||||
|
|
|
@ -389,6 +389,9 @@ void update_max_tr_single(struct trace_array *tr,
|
|||
void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
|
||||
int skip, int pc);
|
||||
|
||||
void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
|
||||
int skip, int pc, struct pt_regs *regs);
|
||||
|
||||
void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
|
||||
int pc);
|
||||
|
||||
|
@ -400,6 +403,12 @@ static inline void ftrace_trace_stack(struct ring_buffer *buffer,
|
|||
{
|
||||
}
|
||||
|
||||
static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer,
|
||||
unsigned long flags, int skip,
|
||||
int pc, struct pt_regs *regs)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
|
||||
unsigned long flags, int pc)
|
||||
{
|
||||
|
@ -609,6 +618,7 @@ enum trace_iterator_flags {
|
|||
TRACE_ITER_GRAPH_TIME = 0x80000,
|
||||
TRACE_ITER_RECORD_CMD = 0x100000,
|
||||
TRACE_ITER_OVERWRITE = 0x200000,
|
||||
TRACE_ITER_STOP_ON_FREE = 0x400000,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -486,20 +486,11 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|||
loff_t *ppos)
|
||||
{
|
||||
struct ftrace_event_call *call = filp->private_data;
|
||||
char buf[64];
|
||||
unsigned long val;
|
||||
int ret;
|
||||
|
||||
if (cnt >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
|
||||
buf[cnt] = 0;
|
||||
|
||||
ret = strict_strtoul(buf, 10, &val);
|
||||
if (ret < 0)
|
||||
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = tracing_update_buffers();
|
||||
|
@ -571,19 +562,10 @@ system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|||
{
|
||||
const char *system = filp->private_data;
|
||||
unsigned long val;
|
||||
char buf[64];
|
||||
ssize_t ret;
|
||||
|
||||
if (cnt >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
|
||||
buf[cnt] = 0;
|
||||
|
||||
ret = strict_strtoul(buf, 10, &val);
|
||||
if (ret < 0)
|
||||
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = tracing_update_buffers();
|
||||
|
|
|
@ -74,6 +74,20 @@ static struct tracer_flags tracer_flags = {
|
|||
|
||||
static struct trace_array *graph_array;
|
||||
|
||||
/*
|
||||
* DURATION column is being also used to display IRQ signs,
|
||||
* following values are used by print_graph_irq and others
|
||||
* to fill in space into DURATION column.
|
||||
*/
|
||||
enum {
|
||||
DURATION_FILL_FULL = -1,
|
||||
DURATION_FILL_START = -2,
|
||||
DURATION_FILL_END = -3,
|
||||
};
|
||||
|
||||
static enum print_line_t
|
||||
print_graph_duration(unsigned long long duration, struct trace_seq *s,
|
||||
u32 flags);
|
||||
|
||||
/* Add a function return address to the trace stack on thread info.*/
|
||||
int
|
||||
|
@ -577,32 +591,6 @@ get_return_for_leaf(struct trace_iterator *iter,
|
|||
return next;
|
||||
}
|
||||
|
||||
/* Signal a overhead of time execution to the output */
|
||||
static int
|
||||
print_graph_overhead(unsigned long long duration, struct trace_seq *s,
|
||||
u32 flags)
|
||||
{
|
||||
/* If duration disappear, we don't need anything */
|
||||
if (!(flags & TRACE_GRAPH_PRINT_DURATION))
|
||||
return 1;
|
||||
|
||||
/* Non nested entry or return */
|
||||
if (duration == -1)
|
||||
return trace_seq_printf(s, " ");
|
||||
|
||||
if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
|
||||
/* Duration exceeded 100 msecs */
|
||||
if (duration > 100000ULL)
|
||||
return trace_seq_printf(s, "! ");
|
||||
|
||||
/* Duration exceeded 10 msecs */
|
||||
if (duration > 10000ULL)
|
||||
return trace_seq_printf(s, "+ ");
|
||||
}
|
||||
|
||||
return trace_seq_printf(s, " ");
|
||||
}
|
||||
|
||||
static int print_graph_abs_time(u64 t, struct trace_seq *s)
|
||||
{
|
||||
unsigned long usecs_rem;
|
||||
|
@ -625,34 +613,36 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
|
|||
addr >= (unsigned long)__irqentry_text_end)
|
||||
return TRACE_TYPE_UNHANDLED;
|
||||
|
||||
/* Absolute time */
|
||||
if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
|
||||
ret = print_graph_abs_time(iter->ts, s);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
|
||||
/* Absolute time */
|
||||
if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
|
||||
ret = print_graph_abs_time(iter->ts, s);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* Cpu */
|
||||
if (flags & TRACE_GRAPH_PRINT_CPU) {
|
||||
ret = print_graph_cpu(s, cpu);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
/* Cpu */
|
||||
if (flags & TRACE_GRAPH_PRINT_CPU) {
|
||||
ret = print_graph_cpu(s, cpu);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* Proc */
|
||||
if (flags & TRACE_GRAPH_PRINT_PROC) {
|
||||
ret = print_graph_proc(s, pid);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
ret = trace_seq_printf(s, " | ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
/* Proc */
|
||||
if (flags & TRACE_GRAPH_PRINT_PROC) {
|
||||
ret = print_graph_proc(s, pid);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
ret = trace_seq_printf(s, " | ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
}
|
||||
|
||||
/* No overhead */
|
||||
ret = print_graph_overhead(-1, s, flags);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
ret = print_graph_duration(DURATION_FILL_START, s, flags);
|
||||
if (ret != TRACE_TYPE_HANDLED)
|
||||
return ret;
|
||||
|
||||
if (type == TRACE_GRAPH_ENT)
|
||||
ret = trace_seq_printf(s, "==========>");
|
||||
|
@ -662,9 +652,10 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
|
|||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Don't close the duration column if haven't one */
|
||||
if (flags & TRACE_GRAPH_PRINT_DURATION)
|
||||
trace_seq_printf(s, " |");
|
||||
ret = print_graph_duration(DURATION_FILL_END, s, flags);
|
||||
if (ret != TRACE_TYPE_HANDLED)
|
||||
return ret;
|
||||
|
||||
ret = trace_seq_printf(s, "\n");
|
||||
|
||||
if (!ret)
|
||||
|
@ -716,9 +707,49 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
|
|||
}
|
||||
|
||||
static enum print_line_t
|
||||
print_graph_duration(unsigned long long duration, struct trace_seq *s)
|
||||
print_graph_duration(unsigned long long duration, struct trace_seq *s,
|
||||
u32 flags)
|
||||
{
|
||||
int ret;
|
||||
int ret = -1;
|
||||
|
||||
if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
|
||||
!(trace_flags & TRACE_ITER_CONTEXT_INFO))
|
||||
return TRACE_TYPE_HANDLED;
|
||||
|
||||
/* No real adata, just filling the column with spaces */
|
||||
switch (duration) {
|
||||
case DURATION_FILL_FULL:
|
||||
ret = trace_seq_printf(s, " | ");
|
||||
return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
|
||||
case DURATION_FILL_START:
|
||||
ret = trace_seq_printf(s, " ");
|
||||
return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
|
||||
case DURATION_FILL_END:
|
||||
ret = trace_seq_printf(s, " |");
|
||||
return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* Signal a overhead of time execution to the output */
|
||||
if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
|
||||
/* Duration exceeded 100 msecs */
|
||||
if (duration > 100000ULL)
|
||||
ret = trace_seq_printf(s, "! ");
|
||||
/* Duration exceeded 10 msecs */
|
||||
else if (duration > 10000ULL)
|
||||
ret = trace_seq_printf(s, "+ ");
|
||||
}
|
||||
|
||||
/*
|
||||
* The -1 means we either did not exceed the duration tresholds
|
||||
* or we dont want to print out the overhead. Either way we need
|
||||
* to fill out the space.
|
||||
*/
|
||||
if (ret == -1)
|
||||
ret = trace_seq_printf(s, " ");
|
||||
|
||||
/* Catching here any failure happenned above */
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ret = trace_print_graph_duration(duration, s);
|
||||
if (ret != TRACE_TYPE_HANDLED)
|
||||
|
@ -767,18 +798,11 @@ print_graph_entry_leaf(struct trace_iterator *iter,
|
|||
cpu_data->enter_funcs[call->depth] = 0;
|
||||
}
|
||||
|
||||
/* Overhead */
|
||||
ret = print_graph_overhead(duration, s, flags);
|
||||
if (!ret)
|
||||
/* Overhead and duration */
|
||||
ret = print_graph_duration(duration, s, flags);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Duration */
|
||||
if (flags & TRACE_GRAPH_PRINT_DURATION) {
|
||||
ret = print_graph_duration(duration, s);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* Function */
|
||||
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
|
||||
ret = trace_seq_printf(s, " ");
|
||||
|
@ -815,17 +839,10 @@ print_graph_entry_nested(struct trace_iterator *iter,
|
|||
cpu_data->enter_funcs[call->depth] = call->func;
|
||||
}
|
||||
|
||||
/* No overhead */
|
||||
ret = print_graph_overhead(-1, s, flags);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* No time */
|
||||
if (flags & TRACE_GRAPH_PRINT_DURATION) {
|
||||
ret = trace_seq_printf(s, " | ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
|
||||
if (ret != TRACE_TYPE_HANDLED)
|
||||
return ret;
|
||||
|
||||
/* Function */
|
||||
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
|
||||
|
@ -865,6 +882,9 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
|
|||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
|
||||
return 0;
|
||||
|
||||
/* Absolute time */
|
||||
if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
|
||||
ret = print_graph_abs_time(iter->ts, s);
|
||||
|
@ -1078,18 +1098,11 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
|
|||
if (print_graph_prologue(iter, s, 0, 0, flags))
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Overhead */
|
||||
ret = print_graph_overhead(duration, s, flags);
|
||||
if (!ret)
|
||||
/* Overhead and duration */
|
||||
ret = print_graph_duration(duration, s, flags);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Duration */
|
||||
if (flags & TRACE_GRAPH_PRINT_DURATION) {
|
||||
ret = print_graph_duration(duration, s);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* Closing brace */
|
||||
for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
|
||||
ret = trace_seq_printf(s, " ");
|
||||
|
@ -1146,17 +1159,10 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
|
|||
if (print_graph_prologue(iter, s, 0, 0, flags))
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* No overhead */
|
||||
ret = print_graph_overhead(-1, s, flags);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* No time */
|
||||
if (flags & TRACE_GRAPH_PRINT_DURATION) {
|
||||
ret = trace_seq_printf(s, " | ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
|
||||
if (ret != TRACE_TYPE_HANDLED)
|
||||
return ret;
|
||||
|
||||
/* Indentation */
|
||||
if (depth > 0)
|
||||
|
@ -1207,7 +1213,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
|
|||
|
||||
|
||||
enum print_line_t
|
||||
__print_graph_function_flags(struct trace_iterator *iter, u32 flags)
|
||||
print_graph_function_flags(struct trace_iterator *iter, u32 flags)
|
||||
{
|
||||
struct ftrace_graph_ent_entry *field;
|
||||
struct fgraph_data *data = iter->private;
|
||||
|
@ -1270,18 +1276,7 @@ __print_graph_function_flags(struct trace_iterator *iter, u32 flags)
|
|||
static enum print_line_t
|
||||
print_graph_function(struct trace_iterator *iter)
|
||||
{
|
||||
return __print_graph_function_flags(iter, tracer_flags.val);
|
||||
}
|
||||
|
||||
enum print_line_t print_graph_function_flags(struct trace_iterator *iter,
|
||||
u32 flags)
|
||||
{
|
||||
if (trace_flags & TRACE_ITER_LATENCY_FMT)
|
||||
flags |= TRACE_GRAPH_PRINT_DURATION;
|
||||
else
|
||||
flags |= TRACE_GRAPH_PRINT_ABS_TIME;
|
||||
|
||||
return __print_graph_function_flags(iter, flags);
|
||||
return print_graph_function_flags(iter, tracer_flags.val);
|
||||
}
|
||||
|
||||
static enum print_line_t
|
||||
|
@ -1309,8 +1304,7 @@ static void print_lat_header(struct seq_file *s, u32 flags)
|
|||
seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
|
||||
seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
|
||||
seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
|
||||
seq_printf(s, "#%.*s||| / _-=> lock-depth \n", size, spaces);
|
||||
seq_printf(s, "#%.*s|||| / \n", size, spaces);
|
||||
seq_printf(s, "#%.*s||| / \n", size, spaces);
|
||||
}
|
||||
|
||||
static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
|
||||
|
@ -1329,7 +1323,7 @@ static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
|
|||
if (flags & TRACE_GRAPH_PRINT_PROC)
|
||||
seq_printf(s, " TASK/PID ");
|
||||
if (lat)
|
||||
seq_printf(s, "|||||");
|
||||
seq_printf(s, "||||");
|
||||
if (flags & TRACE_GRAPH_PRINT_DURATION)
|
||||
seq_printf(s, " DURATION ");
|
||||
seq_printf(s, " FUNCTION CALLS\n");
|
||||
|
@ -1343,7 +1337,7 @@ static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
|
|||
if (flags & TRACE_GRAPH_PRINT_PROC)
|
||||
seq_printf(s, " | | ");
|
||||
if (lat)
|
||||
seq_printf(s, "|||||");
|
||||
seq_printf(s, "||||");
|
||||
if (flags & TRACE_GRAPH_PRINT_DURATION)
|
||||
seq_printf(s, " | | ");
|
||||
seq_printf(s, " | | | |\n");
|
||||
|
@ -1358,15 +1352,16 @@ void print_graph_headers_flags(struct seq_file *s, u32 flags)
|
|||
{
|
||||
struct trace_iterator *iter = s->private;
|
||||
|
||||
if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
|
||||
return;
|
||||
|
||||
if (trace_flags & TRACE_ITER_LATENCY_FMT) {
|
||||
/* print nothing if the buffers are empty */
|
||||
if (trace_empty(iter))
|
||||
return;
|
||||
|
||||
print_trace_header(s, iter);
|
||||
flags |= TRACE_GRAPH_PRINT_DURATION;
|
||||
} else
|
||||
flags |= TRACE_GRAPH_PRINT_ABS_TIME;
|
||||
}
|
||||
|
||||
__print_graph_headers_flags(s, flags);
|
||||
}
|
||||
|
|
|
@ -226,7 +226,9 @@ static void irqsoff_trace_close(struct trace_iterator *iter)
|
|||
}
|
||||
|
||||
#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
|
||||
TRACE_GRAPH_PRINT_PROC)
|
||||
TRACE_GRAPH_PRINT_PROC | \
|
||||
TRACE_GRAPH_PRINT_ABS_TIME | \
|
||||
TRACE_GRAPH_PRINT_DURATION)
|
||||
|
||||
static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
|
||||
{
|
||||
|
|
|
@ -1397,7 +1397,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
|
|||
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
|
||||
|
||||
if (!filter_current_check_discard(buffer, call, entry, event))
|
||||
trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
|
||||
trace_nowake_buffer_unlock_commit_regs(buffer, event,
|
||||
irq_flags, pc, regs);
|
||||
}
|
||||
|
||||
/* Kretprobe handler */
|
||||
|
@ -1429,7 +1430,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
|
|||
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
|
||||
|
||||
if (!filter_current_check_discard(buffer, call, entry, event))
|
||||
trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
|
||||
trace_nowake_buffer_unlock_commit_regs(buffer, event,
|
||||
irq_flags, pc, regs);
|
||||
}
|
||||
|
||||
/* Event entry printers */
|
||||
|
|
|
@ -227,7 +227,9 @@ static void wakeup_trace_close(struct trace_iterator *iter)
|
|||
graph_trace_close(iter);
|
||||
}
|
||||
|
||||
#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC)
|
||||
#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
|
||||
TRACE_GRAPH_PRINT_ABS_TIME | \
|
||||
TRACE_GRAPH_PRINT_DURATION)
|
||||
|
||||
static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
|
||||
{
|
||||
|
|
|
@ -156,20 +156,11 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
|
|||
{
|
||||
long *ptr = filp->private_data;
|
||||
unsigned long val, flags;
|
||||
char buf[64];
|
||||
int ret;
|
||||
int cpu;
|
||||
|
||||
if (count >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, count))
|
||||
return -EFAULT;
|
||||
|
||||
buf[count] = 0;
|
||||
|
||||
ret = strict_strtoul(buf, 10, &val);
|
||||
if (ret < 0)
|
||||
ret = kstrtoul_from_user(ubuf, count, 10, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
|
Loading…
Reference in New Issue
Block a user