forked from luck/tmp_suning_uos_patched
kprobes: Prevent re-registration of the same kprobe
Prevent re-registration of the same kprobe. This situation, though unlikely, needs to be flagged since it can lead to a system crash if it's not handled. The core change itself is small, but the helper routine needed to be moved around a bit; hence the diffstat. Signed-off-by: Ananth N Mavinakayanahalli<ananth@in.ibm.com> Acked-by: Masami Hiramatsu <mhiramat@redhat.com> Cc: Jim Keniston <jkenisto@us.ibm.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Frank Ch. Eigler <fche@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Jason Baron <jbaron@redhat.com> Cc: K.Prasad <prasad@linux.vnet.ibm.com> Cc: Lai Jiangshan <laijs@cn.fujitsu.com> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Tom Zanussi <tzanussi@gmail.com> LKML-Reference: <20090915051307.GB26458@in.ibm.com> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
This commit is contained in:
parent
5a0d9050db
commit
1f0ab40976
|
@ -676,6 +676,40 @@ static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
|
|||
return (kprobe_opcode_t *)(((char *)addr) + p->offset);
|
||||
}
|
||||
|
||||
/* Check passed kprobe is valid and return kprobe in kprobe_table. */
|
||||
static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
|
||||
{
|
||||
struct kprobe *old_p, *list_p;
|
||||
|
||||
old_p = get_kprobe(p->addr);
|
||||
if (unlikely(!old_p))
|
||||
return NULL;
|
||||
|
||||
if (p != old_p) {
|
||||
list_for_each_entry_rcu(list_p, &old_p->list, list)
|
||||
if (list_p == p)
|
||||
/* kprobe p is a valid probe */
|
||||
goto valid;
|
||||
return NULL;
|
||||
}
|
||||
valid:
|
||||
return old_p;
|
||||
}
|
||||
|
||||
/* Return error if the kprobe is being re-registered */
|
||||
static inline int check_kprobe_rereg(struct kprobe *p)
|
||||
{
|
||||
int ret = 0;
|
||||
struct kprobe *old_p;
|
||||
|
||||
mutex_lock(&kprobe_mutex);
|
||||
old_p = __get_valid_kprobe(p);
|
||||
if (old_p)
|
||||
ret = -EINVAL;
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __kprobes register_kprobe(struct kprobe *p)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -688,6 +722,10 @@ int __kprobes register_kprobe(struct kprobe *p)
|
|||
return -EINVAL;
|
||||
p->addr = addr;
|
||||
|
||||
ret = check_kprobe_rereg(p);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
preempt_disable();
|
||||
if (!kernel_text_address((unsigned long) p->addr) ||
|
||||
in_kprobes_functions((unsigned long) p->addr)) {
|
||||
|
@ -757,26 +795,6 @@ int __kprobes register_kprobe(struct kprobe *p)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(register_kprobe);
|
||||
|
||||
/* Check passed kprobe is valid and return kprobe in kprobe_table. */
|
||||
static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
|
||||
{
|
||||
struct kprobe *old_p, *list_p;
|
||||
|
||||
old_p = get_kprobe(p->addr);
|
||||
if (unlikely(!old_p))
|
||||
return NULL;
|
||||
|
||||
if (p != old_p) {
|
||||
list_for_each_entry_rcu(list_p, &old_p->list, list)
|
||||
if (list_p == p)
|
||||
/* kprobe p is a valid probe */
|
||||
goto valid;
|
||||
return NULL;
|
||||
}
|
||||
valid:
|
||||
return old_p;
|
||||
}
|
||||
|
||||
/*
|
||||
* Unregister a kprobe without a scheduler synchronization.
|
||||
*/
|
||||
|
|
Loading…
Reference in New Issue
Block a user