forked from luck/tmp_suning_uos_patched
tracing: kprobes: Prohibit probing on notrace function
Prohibit kprobe-events probing on notrace functions. Since probing on a notrace function can cause a recursive event call. In most cases those are just skipped, but in some cases it falls into an infinite recursive call. This protection can be disabled by the kconfig CONFIG_KPROBE_EVENTS_ON_NOTRACE=y, but it is highly recommended to keep it "n" for normal kernel builds. Note that this is only available if "kprobes on ftrace" has been implemented on the target arch and CONFIG_KPROBES_ON_FTRACE=y. Link: http://lkml.kernel.org/r/153294601436.32740.10557881188933661239.stgit@devbox Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org> Tested-by: Francis Deslauriers <francis.deslauriers@efficios.com> [ Slight grammar and spelling fixes ] Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
This commit is contained in:
parent
518eeca05c
commit
45408c4f92
|
@ -456,6 +456,26 @@ config KPROBE_EVENTS
|
||||||
This option is also required by perf-probe subcommand of perf tools.
|
This option is also required by perf-probe subcommand of perf tools.
|
||||||
If you want to use perf tools, this option is strongly recommended.
|
If you want to use perf tools, this option is strongly recommended.
|
||||||
|
|
||||||
|
config KPROBE_EVENTS_ON_NOTRACE
|
||||||
|
bool "Do NOT protect notrace function from kprobe events"
|
||||||
|
depends on KPROBE_EVENTS
|
||||||
|
depends on KPROBES_ON_FTRACE
|
||||||
|
default n
|
||||||
|
help
|
||||||
|
This is only for the developers who want to debug ftrace itself
|
||||||
|
using kprobe events.
|
||||||
|
|
||||||
|
If kprobes can use ftrace instead of breakpoint, ftrace related
|
||||||
|
functions are protected from kprobe-events to prevent an infinit
|
||||||
|
recursion or any unexpected execution path which leads to a kernel
|
||||||
|
crash.
|
||||||
|
|
||||||
|
This option disables such protection and allows you to put kprobe
|
||||||
|
events on ftrace functions for debugging ftrace by itself.
|
||||||
|
Note that this might let you shoot yourself in the foot.
|
||||||
|
|
||||||
|
If unsure, say N.
|
||||||
|
|
||||||
config UPROBE_EVENTS
|
config UPROBE_EVENTS
|
||||||
bool "Enable uprobes-based dynamic events"
|
bool "Enable uprobes-based dynamic events"
|
||||||
depends on ARCH_SUPPORTS_UPROBES
|
depends on ARCH_SUPPORTS_UPROBES
|
||||||
|
|
|
@ -87,6 +87,21 @@ static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
|
||||||
return nhit;
|
return nhit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static nokprobe_inline
|
||||||
|
unsigned long trace_kprobe_address(struct trace_kprobe *tk)
|
||||||
|
{
|
||||||
|
unsigned long addr;
|
||||||
|
|
||||||
|
if (tk->symbol) {
|
||||||
|
addr = (unsigned long)
|
||||||
|
kallsyms_lookup_name(trace_kprobe_symbol(tk));
|
||||||
|
addr += tk->rp.kp.offset;
|
||||||
|
} else {
|
||||||
|
addr = (unsigned long)tk->rp.kp.addr;
|
||||||
|
}
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
|
||||||
bool trace_kprobe_on_func_entry(struct trace_event_call *call)
|
bool trace_kprobe_on_func_entry(struct trace_event_call *call)
|
||||||
{
|
{
|
||||||
struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
|
struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
|
||||||
|
@ -99,16 +114,8 @@ bool trace_kprobe_on_func_entry(struct trace_event_call *call)
|
||||||
bool trace_kprobe_error_injectable(struct trace_event_call *call)
|
bool trace_kprobe_error_injectable(struct trace_event_call *call)
|
||||||
{
|
{
|
||||||
struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
|
struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
|
||||||
unsigned long addr;
|
|
||||||
|
|
||||||
if (tk->symbol) {
|
return within_error_injection_list(trace_kprobe_address(tk));
|
||||||
addr = (unsigned long)
|
|
||||||
kallsyms_lookup_name(trace_kprobe_symbol(tk));
|
|
||||||
addr += tk->rp.kp.offset;
|
|
||||||
} else {
|
|
||||||
addr = (unsigned long)tk->rp.kp.addr;
|
|
||||||
}
|
|
||||||
return within_error_injection_list(addr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int register_kprobe_event(struct trace_kprobe *tk);
|
static int register_kprobe_event(struct trace_kprobe *tk);
|
||||||
|
@ -504,6 +511,22 @@ disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined(CONFIG_KPROBES_ON_FTRACE) && \
|
||||||
|
!defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
|
||||||
|
static bool within_notrace_func(struct trace_kprobe *tk)
|
||||||
|
{
|
||||||
|
unsigned long offset, size, addr;
|
||||||
|
|
||||||
|
addr = trace_kprobe_address(tk);
|
||||||
|
if (!kallsyms_lookup_size_offset(addr, &size, &offset))
|
||||||
|
return true; /* Out of range. */
|
||||||
|
|
||||||
|
return !ftrace_location_range(addr - offset, addr - offset + size);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
#define within_notrace_func(tk) (false)
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Internal register function - just handle k*probes and flags */
|
/* Internal register function - just handle k*probes and flags */
|
||||||
static int __register_trace_kprobe(struct trace_kprobe *tk)
|
static int __register_trace_kprobe(struct trace_kprobe *tk)
|
||||||
{
|
{
|
||||||
|
@ -512,6 +535,12 @@ static int __register_trace_kprobe(struct trace_kprobe *tk)
|
||||||
if (trace_probe_is_registered(&tk->tp))
|
if (trace_probe_is_registered(&tk->tp))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (within_notrace_func(tk)) {
|
||||||
|
pr_warn("Could not probe notrace function %s\n",
|
||||||
|
trace_kprobe_symbol(tk));
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < tk->tp.nr_args; i++)
|
for (i = 0; i < tk->tp.nr_args; i++)
|
||||||
traceprobe_update_arg(&tk->tp.args[i]);
|
traceprobe_update_arg(&tk->tp.args[i]);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user