forked from luck/tmp_suning_uos_patched
tracing: Protect ftrace_trace_arrays list in trace_events.c
There are multiple places where the ftrace_trace_arrays list is accessed in trace_events.c without the trace_types_lock held. Link: http://lkml.kernel.org/r/1372732674-22726-1-git-send-email-azl@google.com Cc: Vaibhav Nagarnaik <vnagarnaik@google.com> Cc: David Sharp <dhsharp@google.com> Cc: Alexander Z Lam <lambchop468@gmail.com> Cc: stable@vger.kernel.org # 3.10 Signed-off-by: Alexander Z Lam <azl@google.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
2d71619c59
commit
a82274151a
|
@ -266,7 +266,7 @@ static struct tracer *trace_types __read_mostly;
|
|||
/*
|
||||
* trace_types_lock is used to protect the trace_types list.
|
||||
*/
|
||||
static DEFINE_MUTEX(trace_types_lock);
|
||||
DEFINE_MUTEX(trace_types_lock);
|
||||
|
||||
/*
|
||||
* serialize the access of the ring buffer
|
||||
|
|
|
@ -224,6 +224,8 @@ enum {
|
|||
|
||||
extern struct list_head ftrace_trace_arrays;
|
||||
|
||||
extern struct mutex trace_types_lock;
|
||||
|
||||
/*
|
||||
* The global tracer (top) should be the first trace array added,
|
||||
* but we check the flag anyway.
|
||||
|
|
|
@ -1008,6 +1008,7 @@ static int subsystem_open(struct inode *inode, struct file *filp)
|
|||
int ret;
|
||||
|
||||
/* Make sure the system still exists */
|
||||
mutex_lock(&trace_types_lock);
|
||||
mutex_lock(&event_mutex);
|
||||
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
|
||||
list_for_each_entry(dir, &tr->systems, list) {
|
||||
|
@ -1023,6 +1024,7 @@ static int subsystem_open(struct inode *inode, struct file *filp)
|
|||
}
|
||||
exit_loop:
|
||||
mutex_unlock(&event_mutex);
|
||||
mutex_unlock(&trace_types_lock);
|
||||
|
||||
if (!system)
|
||||
return -ENODEV;
|
||||
|
@ -1617,6 +1619,7 @@ static void __add_event_to_tracers(struct ftrace_event_call *call,
|
|||
int trace_add_event_call(struct ftrace_event_call *call)
|
||||
{
|
||||
int ret;
|
||||
mutex_lock(&trace_types_lock);
|
||||
mutex_lock(&event_mutex);
|
||||
|
||||
ret = __register_event(call, NULL);
|
||||
|
@ -1624,11 +1627,13 @@ int trace_add_event_call(struct ftrace_event_call *call)
|
|||
__add_event_to_tracers(call, NULL);
|
||||
|
||||
mutex_unlock(&event_mutex);
|
||||
mutex_unlock(&trace_types_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Must be called under locking both of event_mutex and trace_event_sem.
|
||||
* Must be called under locking of trace_types_lock, event_mutex and
|
||||
* trace_event_sem.
|
||||
*/
|
||||
static void __trace_remove_event_call(struct ftrace_event_call *call)
|
||||
{
|
||||
|
@ -1640,11 +1645,13 @@ static void __trace_remove_event_call(struct ftrace_event_call *call)
|
|||
/* Remove an event_call */
|
||||
void trace_remove_event_call(struct ftrace_event_call *call)
|
||||
{
|
||||
mutex_lock(&trace_types_lock);
|
||||
mutex_lock(&event_mutex);
|
||||
down_write(&trace_event_sem);
|
||||
__trace_remove_event_call(call);
|
||||
up_write(&trace_event_sem);
|
||||
mutex_unlock(&event_mutex);
|
||||
mutex_unlock(&trace_types_lock);
|
||||
}
|
||||
|
||||
#define for_each_event(event, start, end) \
|
||||
|
@ -1788,6 +1795,7 @@ static int trace_module_notify(struct notifier_block *self,
|
|||
{
|
||||
struct module *mod = data;
|
||||
|
||||
mutex_lock(&trace_types_lock);
|
||||
mutex_lock(&event_mutex);
|
||||
switch (val) {
|
||||
case MODULE_STATE_COMING:
|
||||
|
@ -1798,6 +1806,7 @@ static int trace_module_notify(struct notifier_block *self,
|
|||
break;
|
||||
}
|
||||
mutex_unlock(&event_mutex);
|
||||
mutex_unlock(&trace_types_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user