From 66ada2ccae4ed4dd07ba91df3b5fdb4c11335bd1 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 29 Aug 2020 22:00:01 +0900 Subject: [PATCH 01/17] kprobes: Add generic kretprobe trampoline handler Add a generic kretprobe trampoline handler for unifying the all cloned /arch/* kretprobe trampoline handlers. The generic kretprobe trampoline handler is based on the x86 implementation, because it is the latest implementation. It has frame pointer checking, kprobe_busy_begin/end and return address fixup for user handlers. [ mingo: Minor edits. ] Signed-off-by: Masami Hiramatsu Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/159870600138.1229682.3424065380448088833.stgit@devnote2 --- include/linux/kprobes.h | 32 ++++++++++++-- kernel/kprobes.c | 98 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 126 insertions(+), 4 deletions(-) diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 9be1bff4f586..72142ae5df3e 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -187,10 +187,38 @@ static inline int kprobes_built_in(void) return 1; } +extern struct kprobe kprobe_busy; +extern void kprobe_busy_begin(void); +extern void kprobe_busy_end(void); + #ifdef CONFIG_KRETPROBES extern void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs); extern int arch_trampoline_kprobe(struct kprobe *p); + +/* If the trampoline handler called from a kprobe, use this version */ +unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs, + void *trampoline_address, + void *frame_pointer); + +static nokprobe_inline +unsigned long kretprobe_trampoline_handler(struct pt_regs *regs, + void *trampoline_address, + void *frame_pointer) +{ + unsigned long ret; + /* + * Set a dummy kprobe for avoiding kretprobe recursion. + * Since kretprobe never runs in kprobe handler, no kprobe must + * be running at this point. + */ + kprobe_busy_begin(); + ret = __kretprobe_trampoline_handler(regs, trampoline_address, frame_pointer); + kprobe_busy_end(); + + return ret; +} + #else /* CONFIG_KRETPROBES */ static inline void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) @@ -354,10 +382,6 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) return this_cpu_ptr(&kprobe_ctlblk); } -extern struct kprobe kprobe_busy; -void kprobe_busy_begin(void); -void kprobe_busy_end(void); - kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset); int register_kprobe(struct kprobe *p); void unregister_kprobe(struct kprobe *p); diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 287b263c9cb9..a0afaa79024e 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1927,6 +1927,104 @@ unsigned long __weak arch_deref_entry_point(void *entry) } #ifdef CONFIG_KRETPROBES + +unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs, + void *trampoline_address, + void *frame_pointer) +{ + struct kretprobe_instance *ri = NULL, *last = NULL; + struct hlist_head *head, empty_rp; + struct hlist_node *tmp; + unsigned long flags; + kprobe_opcode_t *correct_ret_addr = NULL; + bool skipped = false; + + INIT_HLIST_HEAD(&empty_rp); + kretprobe_hash_lock(current, &head, &flags); + + /* + * It is possible to have multiple instances associated with a given + * task either because multiple functions in the call path have + * return probes installed on them, and/or more than one + * return probe was registered for a target function. + * + * We can handle this because: + * - instances are always pushed into the head of the list + * - when multiple return probes are registered for the same + * function, the (chronologically) first instance's ret_addr + * will be the real return address, and all the rest will + * point to kretprobe_trampoline. + */ + hlist_for_each_entry(ri, head, hlist) { + if (ri->task != current) + /* another task is sharing our hash bucket */ + continue; + /* + * Return probes must be pushed on this hash list correct + * order (same as return order) so that it can be popped + * correctly. However, if we find it is pushed it incorrect + * order, this means we find a function which should not be + * probed, because the wrong order entry is pushed on the + * path of processing other kretprobe itself. + */ + if (ri->fp != frame_pointer) { + if (!skipped) + pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n"); + skipped = true; + continue; + } + + correct_ret_addr = ri->ret_addr; + if (skipped) + pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n", + ri->rp->kp.addr); + + if (correct_ret_addr != trampoline_address) + /* + * This is the real return address. Any other + * instances associated with this task are for + * other calls deeper on the call stack + */ + break; + } + + kretprobe_assert(ri, (unsigned long)correct_ret_addr, + (unsigned long)trampoline_address); + last = ri; + + hlist_for_each_entry_safe(ri, tmp, head, hlist) { + if (ri->task != current) + /* another task is sharing our hash bucket */ + continue; + if (ri->fp != frame_pointer) + continue; + + if (ri->rp && ri->rp->handler) { + struct kprobe *prev = kprobe_running(); + + __this_cpu_write(current_kprobe, &ri->rp->kp); + ri->ret_addr = correct_ret_addr; + ri->rp->handler(ri, regs); + __this_cpu_write(current_kprobe, prev); + } + + recycle_rp_inst(ri, &empty_rp); + + if (ri == last) + break; + } + + kretprobe_hash_unlock(current, &flags); + + hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { + hlist_del(&ri->hlist); + kfree(ri); + } + + return (unsigned long)correct_ret_addr; +} +NOKPROBE_SYMBOL(__kretprobe_trampoline_handler) + /* * This kprobe pre_handler is registered with every kretprobe. When probe * hits it will set up the return probe. From d7641289dad95df3531f573112778c548331ab83 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 29 Aug 2020 22:00:12 +0900 Subject: [PATCH 02/17] x86/kprobes: Use generic kretprobe trampoline handler Use the generic kretprobe trampoline handler. Use regs->sp for framepointer verification. [ mingo: Minor edits. ] Signed-off-by: Masami Hiramatsu Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/159870601250.1229682.14598707734683575237.stgit@devnote2 --- arch/x86/kernel/kprobes/core.c | 108 +-------------------------------- 1 file changed, 3 insertions(+), 105 deletions(-) diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index fdadc37d72af..882b95313ad6 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -767,124 +767,22 @@ asm( NOKPROBE_SYMBOL(kretprobe_trampoline); STACK_FRAME_NON_STANDARD(kretprobe_trampoline); + /* * Called from kretprobe_trampoline */ __used __visible void *trampoline_handler(struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; - kprobe_opcode_t *correct_ret_addr = NULL; - void *frame_pointer; - bool skipped = false; - - /* - * Set a dummy kprobe for avoiding kretprobe recursion. - * Since kretprobe never run in kprobe handler, kprobe must not - * be running at this point. - */ - kprobe_busy_begin(); - - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); /* fixup registers */ regs->cs = __KERNEL_CS; #ifdef CONFIG_X86_32 regs->cs |= get_kernel_rpl(); regs->gs = 0; #endif - /* We use pt_regs->sp for return address holder. */ - frame_pointer = ®s->sp; - regs->ip = trampoline_address; + regs->ip = (unsigned long)&kretprobe_trampoline; regs->orig_ax = ~0UL; - /* - * It is possible to have multiple instances associated with a given - * task either because multiple functions in the call path have - * return probes installed on them, and/or more than one - * return probe was registered for a target function. - * - * We can handle this because: - * - instances are always pushed into the head of the list - * - when multiple return probes are registered for the same - * function, the (chronologically) first instance's ret_addr - * will be the real return address, and all the rest will - * point to kretprobe_trampoline. - */ - hlist_for_each_entry(ri, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - /* - * Return probes must be pushed on this hash list correct - * order (same as return order) so that it can be popped - * correctly. However, if we find it is pushed it incorrect - * order, this means we find a function which should not be - * probed, because the wrong order entry is pushed on the - * path of processing other kretprobe itself. - */ - if (ri->fp != frame_pointer) { - if (!skipped) - pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n"); - skipped = true; - continue; - } - - orig_ret_address = (unsigned long)ri->ret_addr; - if (skipped) - pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n", - ri->rp->kp.addr); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_assert(ri, orig_ret_address, trampoline_address); - - correct_ret_addr = ri->ret_addr; - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - if (ri->fp != frame_pointer) - continue; - - orig_ret_address = (unsigned long)ri->ret_addr; - if (ri->rp && ri->rp->handler) { - __this_cpu_write(current_kprobe, &ri->rp->kp); - ri->ret_addr = correct_ret_addr; - ri->rp->handler(ri, regs); - __this_cpu_write(current_kprobe, &kprobe_busy); - } - - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_hash_unlock(current, &flags); - - kprobe_busy_end(); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } - return (void *)orig_ret_address; + return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline, ®s->sp); } NOKPROBE_SYMBOL(trampoline_handler); From 94509582d1d173c4777b8832ecbac6dff2ccbfa7 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 29 Aug 2020 22:00:24 +0900 Subject: [PATCH 03/17] arm: kprobes: Use generic kretprobe trampoline handler Use the generic kretprobe trampoline handler. Use regs->ARM_fp for framepointer verification. Signed-off-by: Masami Hiramatsu Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/159870602406.1229682.10496730247473708592.stgit@devnote2 --- arch/arm/probes/kprobes/core.c | 78 ++-------------------------------- 1 file changed, 3 insertions(+), 75 deletions(-) diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c index feefa2055eba..a9653117ca0d 100644 --- a/arch/arm/probes/kprobes/core.c +++ b/arch/arm/probes/kprobes/core.c @@ -413,87 +413,15 @@ void __naked __kprobes kretprobe_trampoline(void) /* Called from kretprobe_trampoline */ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; - kprobe_opcode_t *correct_ret_addr = NULL; - - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); - - /* - * It is possible to have multiple instances associated with a given - * task either because multiple functions in the call path have - * a return probe installed on them, and/or more than one return - * probe was registered for a target function. - * - * We can handle this because: - * - instances are always inserted at the head of the list - * - when multiple return probes are registered for the same - * function, the first instance's ret_addr will point to the - * real return address, and all the rest will point to - * kretprobe_trampoline - */ - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - orig_ret_address = (unsigned long)ri->ret_addr; - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_assert(ri, orig_ret_address, trampoline_address); - - correct_ret_addr = ri->ret_addr; - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - orig_ret_address = (unsigned long)ri->ret_addr; - if (ri->rp && ri->rp->handler) { - __this_cpu_write(current_kprobe, &ri->rp->kp); - get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; - ri->ret_addr = correct_ret_addr; - ri->rp->handler(ri, regs); - __this_cpu_write(current_kprobe, NULL); - } - - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_hash_unlock(current, &flags); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } - - return (void *)orig_ret_address; + return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline, + (void *)regs->ARM_fp); } void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *)regs->ARM_lr; + ri->fp = (void *)regs->ARM_fp; /* Replace the return addr with trampoline addr. */ regs->ARM_lr = (unsigned long)&kretprobe_trampoline; From 95a4b7a24f55efd4fbc77c020fe0aab3e1f37f8a Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 29 Aug 2020 22:00:35 +0900 Subject: [PATCH 04/17] arm64: kprobes: Use generic kretprobe trampoline handler Use the generic kretprobe trampoline handler, and use kernel_stack_pointer(regs) for framepointer verification. Signed-off-by: Masami Hiramatsu Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/159870603544.1229682.10309733593594205725.stgit@devnote2 --- arch/arm64/kernel/probes/kprobes.c | 78 ++---------------------------- 1 file changed, 3 insertions(+), 75 deletions(-) diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index 5290f17a4d80..deba738142ed 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -464,87 +464,15 @@ int __init arch_populate_kprobe_blacklist(void) void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address = - (unsigned long)&kretprobe_trampoline; - kprobe_opcode_t *correct_ret_addr = NULL; - - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); - - /* - * It is possible to have multiple instances associated with a given - * task either because multiple functions in the call path have - * return probes installed on them, and/or more than one - * return probe was registered for a target function. - * - * We can handle this because: - * - instances are always pushed into the head of the list - * - when multiple return probes are registered for the same - * function, the (chronologically) first instance's ret_addr - * will be the real return address, and all the rest will - * point to kretprobe_trampoline. - */ - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - orig_ret_address = (unsigned long)ri->ret_addr; - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_assert(ri, orig_ret_address, trampoline_address); - - correct_ret_addr = ri->ret_addr; - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - orig_ret_address = (unsigned long)ri->ret_addr; - if (ri->rp && ri->rp->handler) { - __this_cpu_write(current_kprobe, &ri->rp->kp); - get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; - ri->ret_addr = correct_ret_addr; - ri->rp->handler(ri, regs); - __this_cpu_write(current_kprobe, NULL); - } - - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_hash_unlock(current, &flags); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } - return (void *)orig_ret_address; + return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline, + (void *)kernel_stack_pointer(regs)); } void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *)regs->regs[30]; + ri->fp = (void *)kernel_stack_pointer(regs); /* replace return addr (x30) with trampoline */ regs->regs[30] = (long)&kretprobe_trampoline; From f75dd136b65cccfaf3869be5380f401538fa9f72 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 29 Aug 2020 22:00:46 +0900 Subject: [PATCH 05/17] arc: kprobes: Use generic kretprobe trampoline handler Use the generic kretprobe trampoline handler. Don't use framepointer verification. Signed-off-by: Masami Hiramatsu Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/159870604671.1229682.13639386820521358456.stgit@devnote2 --- arch/arc/kernel/kprobes.c | 54 ++------------------------------------- 1 file changed, 2 insertions(+), 52 deletions(-) diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c index 7d3efe83cba7..cabef45f11df 100644 --- a/arch/arc/kernel/kprobes.c +++ b/arch/arc/kernel/kprobes.c @@ -388,6 +388,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, { ri->ret_addr = (kprobe_opcode_t *) regs->blink; + ri->fp = NULL; /* Replace the return addr with trampoline addr */ regs->blink = (unsigned long)&kretprobe_trampoline; @@ -396,58 +397,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, static int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; - - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); - - /* - * It is possible to have multiple instances associated with a given - * task either because an multiple functions in the call path - * have a return probe installed on them, and/or more than one return - * return probe was registered for a target function. - * - * We can handle this because: - * - instances are always inserted at the head of the list - * - when multiple return probes are registered for the same - * function, the first instance's ret_addr will point to the - * real return address, and all the rest will point to - * kretprobe_trampoline - */ - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - if (ri->rp && ri->rp->handler) - ri->rp->handler(ri, regs); - - orig_ret_address = (unsigned long)ri->ret_addr; - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) { - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - } - - kretprobe_assert(ri, orig_ret_address, trampoline_address); - regs->ret = orig_ret_address; - - kretprobe_hash_unlock(current, &flags); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } + regs->ret = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); /* By returning a non zero value, we are telling the kprobe handler * that we don't want the post_handler to run From 03c8a4a447008c1a755cd32bc89af24119bd08f1 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 29 Aug 2020 22:00:58 +0900 Subject: [PATCH 06/17] csky: kprobes: Use generic kretprobe trampoline handler Use the generic kretprobe trampoline handler. Don't use framepointer verification. Signed-off-by: Masami Hiramatsu Signed-off-by: Ingo Molnar Acked-by: Guo Ren Link: https://lore.kernel.org/r/159870605775.1229682.2627276871589951304.stgit@devnote2 --- arch/csky/kernel/probes/kprobes.c | 77 +------------------------------ 1 file changed, 2 insertions(+), 75 deletions(-) diff --git a/arch/csky/kernel/probes/kprobes.c b/arch/csky/kernel/probes/kprobes.c index f0f733b7ac5a..589f090f48b9 100644 --- a/arch/csky/kernel/probes/kprobes.c +++ b/arch/csky/kernel/probes/kprobes.c @@ -404,87 +404,14 @@ int __init arch_populate_kprobe_blacklist(void) void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address = - (unsigned long)&kretprobe_trampoline; - kprobe_opcode_t *correct_ret_addr = NULL; - - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); - - /* - * It is possible to have multiple instances associated with a given - * task either because multiple functions in the call path have - * return probes installed on them, and/or more than one - * return probe was registered for a target function. - * - * We can handle this because: - * - instances are always pushed into the head of the list - * - when multiple return probes are registered for the same - * function, the (chronologically) first instance's ret_addr - * will be the real return address, and all the rest will - * point to kretprobe_trampoline. - */ - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - orig_ret_address = (unsigned long)ri->ret_addr; - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_assert(ri, orig_ret_address, trampoline_address); - - correct_ret_addr = ri->ret_addr; - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - orig_ret_address = (unsigned long)ri->ret_addr; - if (ri->rp && ri->rp->handler) { - __this_cpu_write(current_kprobe, &ri->rp->kp); - get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; - ri->ret_addr = correct_ret_addr; - ri->rp->handler(ri, regs); - __this_cpu_write(current_kprobe, NULL); - } - - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_hash_unlock(current, &flags); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } - return (void *)orig_ret_address; + return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); } void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *)regs->lr; + ri->fp = NULL; regs->lr = (unsigned long) &kretprobe_trampoline; } From e792ff804f49720ce003b3e4c618b5d996256a18 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 29 Aug 2020 22:01:09 +0900 Subject: [PATCH 07/17] ia64: kprobes: Use generic kretprobe trampoline handler Use the generic kretprobe trampoline handler. Don't use framepointer verification. Signed-off-by: Masami Hiramatsu Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/159870606883.1229682.12331813108378725668.stgit@devnote2 --- arch/ia64/kernel/kprobes.c | 77 +------------------------------------- 1 file changed, 2 insertions(+), 75 deletions(-) diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 7a7df944d798..fc1ff8a4d7de 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -396,83 +396,9 @@ static void kretprobe_trampoline(void) { } -/* - * At this point the target function has been tricked into - * returning into our trampoline. Lookup the associated instance - * and then: - * - call the handler function - * - cleanup by marking the instance as unused - * - long jump back to the original return address - */ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address = - ((struct fnptr *)kretprobe_trampoline)->ip; - - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); - - /* - * It is possible to have multiple instances associated with a given - * task either because an multiple functions in the call path - * have a return probe installed on them, and/or more than one return - * return probe was registered for a target function. - * - * We can handle this because: - * - instances are always inserted at the head of the list - * - when multiple return probes are registered for the same - * function, the first instance's ret_addr will point to the - * real return address, and all the rest will point to - * kretprobe_trampoline - */ - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - orig_ret_address = (unsigned long)ri->ret_addr; - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - regs->cr_iip = orig_ret_address; - - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - if (ri->rp && ri->rp->handler) - ri->rp->handler(ri, regs); - - orig_ret_address = (unsigned long)ri->ret_addr; - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - kretprobe_assert(ri, orig_ret_address, trampoline_address); - - kretprobe_hash_unlock(current, &flags); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } + regs->cr_iip = __kretprobe_trampoline_handler(regs, kretprobe_trampoline, NULL); /* * By returning a non-zero value, we are telling * kprobe_handler() that we don't want the post_handler @@ -485,6 +411,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *)regs->b0; + ri->fp = NULL; /* Replace the return addr with trampoline addr */ regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip; From 2ef12450856121f092621d4305885ec20283260e Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 29 Aug 2020 22:01:19 +0900 Subject: [PATCH 08/17] mips: kprobes: Use generic kretprobe trampoline handler Use the generic kretprobe trampoline handler. Don't use framepointer verification. Signed-off-by: Masami Hiramatsu Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/159870607968.1229682.12100697467108845587.stgit@devnote2 --- arch/mips/kernel/kprobes.c | 54 +++----------------------------------- 1 file changed, 3 insertions(+), 51 deletions(-) diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c index d043c2f897fc..54dfba8fa77c 100644 --- a/arch/mips/kernel/kprobes.c +++ b/arch/mips/kernel/kprobes.c @@ -477,6 +477,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *) regs->regs[31]; + ri->fp = NULL; /* Replace the return addr with trampoline addr */ regs->regs[31] = (unsigned long)kretprobe_trampoline; @@ -488,57 +489,8 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, static int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address = (unsigned long)kretprobe_trampoline; - - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); - - /* - * It is possible to have multiple instances associated with a given - * task either because an multiple functions in the call path - * have a return probe installed on them, and/or more than one return - * return probe was registered for a target function. - * - * We can handle this because: - * - instances are always inserted at the head of the list - * - when multiple return probes are registered for the same - * function, the first instance's ret_addr will point to the - * real return address, and all the rest will point to - * kretprobe_trampoline - */ - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - if (ri->rp && ri->rp->handler) - ri->rp->handler(ri, regs); - - orig_ret_address = (unsigned long)ri->ret_addr; - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_assert(ri, orig_ret_address, trampoline_address); - instruction_pointer(regs) = orig_ret_address; - - kretprobe_hash_unlock(current, &flags); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } + instruction_pointer(regs) = __kretprobe_trampoline_handler(regs, + kretprobe_trampoline, NULL); /* * By returning a non-zero value, we are telling * kprobe_handler() that we don't want the post_handler From 16ff6f7ac92e506a05d3720e4d9fa0647bb0b5c4 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 29 Aug 2020 22:01:37 +0900 Subject: [PATCH 09/17] parisc: kprobes: Use generic kretprobe trampoline handler Use the generic kretprobe trampoline handler. Don't use framepointer verification. Signed-off-by: Masami Hiramatsu Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/159870609708.1229682.1861714117180719169.stgit@devnote2 --- arch/parisc/kernel/kprobes.c | 76 ++---------------------------------- 1 file changed, 4 insertions(+), 72 deletions(-) diff --git a/arch/parisc/kernel/kprobes.c b/arch/parisc/kernel/kprobes.c index 77ec51818916..6d21a515eea5 100644 --- a/arch/parisc/kernel/kprobes.c +++ b/arch/parisc/kernel/kprobes.c @@ -191,80 +191,11 @@ static struct kprobe trampoline_p = { static int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address = (unsigned long)trampoline_p.addr; - kprobe_opcode_t *correct_ret_addr = NULL; + unsigned long orig_ret_address; - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); - - /* - * It is possible to have multiple instances associated with a given - * task either because multiple functions in the call path have - * a return probe installed on them, and/or more than one return - * probe was registered for a target function. - * - * We can handle this because: - * - instances are always inserted at the head of the list - * - when multiple return probes are registered for the same - * function, the first instance's ret_addr will point to the - * real return address, and all the rest will point to - * kretprobe_trampoline - */ - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - orig_ret_address = (unsigned long)ri->ret_addr; - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_assert(ri, orig_ret_address, trampoline_address); - - correct_ret_addr = ri->ret_addr; - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - orig_ret_address = (unsigned long)ri->ret_addr; - if (ri->rp && ri->rp->handler) { - __this_cpu_write(current_kprobe, &ri->rp->kp); - get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; - ri->ret_addr = correct_ret_addr; - ri->rp->handler(ri, regs); - __this_cpu_write(current_kprobe, NULL); - } - - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_hash_unlock(current, &flags); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } + orig_ret_address = __kretprobe_trampoline_handler(regs, trampoline_p.addr, NULL); instruction_pointer_set(regs, orig_ret_address); + return 1; } @@ -272,6 +203,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *)regs->gr[2]; + ri->fp = NULL; /* Replace the return addr with trampoline addr. */ regs->gr[2] = (unsigned long)trampoline_p.addr; From b6c5a58dd89e683e66b52a57a6f022fda79a0aab Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 29 Aug 2020 22:01:48 +0900 Subject: [PATCH 10/17] powerpc: kprobes: Use generic kretprobe trampoline handler Use the generic kretprobe trampoline handler. Don't use framepointer verification. Signed-off-by: Masami Hiramatsu Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/159870610825.1229682.2090635992093223399.stgit@devnote2 --- arch/powerpc/kernel/kprobes.c | 53 ++--------------------------------- 1 file changed, 3 insertions(+), 50 deletions(-) diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 6ab9b4d037c3..01ab2163659e 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -218,6 +218,7 @@ bool arch_kprobe_on_func_entry(unsigned long offset) void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *)regs->link; + ri->fp = NULL; /* Replace the return addr with trampoline addr */ regs->link = (unsigned long)kretprobe_trampoline; @@ -396,50 +397,9 @@ asm(".global kretprobe_trampoline\n" */ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; - - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); - - /* - * It is possible to have multiple instances associated with a given - * task either because an multiple functions in the call path - * have a return probe installed on them, and/or more than one return - * return probe was registered for a target function. - * - * We can handle this because: - * - instances are always inserted at the head of the list - * - when multiple return probes are registered for the same - * function, the first instance's ret_addr will point to the - * real return address, and all the rest will point to - * kretprobe_trampoline - */ - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - if (ri->rp && ri->rp->handler) - ri->rp->handler(ri, regs); - - orig_ret_address = (unsigned long)ri->ret_addr; - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_assert(ri, orig_ret_address, trampoline_address); + unsigned long orig_ret_address; + orig_ret_address = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); /* * We get here through one of two paths: * 1. by taking a trap -> kprobe_handler() -> here @@ -458,13 +418,6 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) regs->nip = orig_ret_address - 4; regs->link = orig_ret_address; - kretprobe_hash_unlock(current, &flags); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } - return 0; } NOKPROBE_SYMBOL(trampoline_probe_handler); From 26a24a6b43d51c6fc70bef9b9016d81e4cf75e6e Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 29 Aug 2020 22:02:04 +0900 Subject: [PATCH 11/17] s390: kprobes: Use generic kretprobe trampoline handler Use the generic kretprobe trampoline handler. Don't use framepointer verification. Signed-off-by: Masami Hiramatsu Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/159870612453.1229682.15950927742606892302.stgit@devnote2 --- arch/s390/kernel/kprobes.c | 79 +------------------------------------- 1 file changed, 2 insertions(+), 77 deletions(-) diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index d2a71d872638..fc30e799bd84 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -228,6 +228,7 @@ NOKPROBE_SYMBOL(pop_kprobe); void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14]; + ri->fp = NULL; /* Replace the return addr with trampoline addr */ regs->gprs[14] = (unsigned long) &kretprobe_trampoline; @@ -331,83 +332,7 @@ static void __used kretprobe_trampoline_holder(void) */ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { - struct kretprobe_instance *ri; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address; - unsigned long trampoline_address; - kprobe_opcode_t *correct_ret_addr; - - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); - - /* - * It is possible to have multiple instances associated with a given - * task either because an multiple functions in the call path - * have a return probe installed on them, and/or more than one return - * return probe was registered for a target function. - * - * We can handle this because: - * - instances are always inserted at the head of the list - * - when multiple return probes are registered for the same - * function, the first instance's ret_addr will point to the - * real return address, and all the rest will point to - * kretprobe_trampoline - */ - ri = NULL; - orig_ret_address = 0; - correct_ret_addr = NULL; - trampoline_address = (unsigned long) &kretprobe_trampoline; - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - orig_ret_address = (unsigned long) ri->ret_addr; - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_assert(ri, orig_ret_address, trampoline_address); - - correct_ret_addr = ri->ret_addr; - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - orig_ret_address = (unsigned long) ri->ret_addr; - - if (ri->rp && ri->rp->handler) { - ri->ret_addr = correct_ret_addr; - ri->rp->handler(ri, regs); - } - - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - regs->psw.addr = orig_ret_address; - - kretprobe_hash_unlock(current, &flags); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } + regs->psw.addr = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); /* * By returning a non-zero value, we are telling * kprobe_handler() that we don't want the post_handler From 0cf0e2fe91fade050bfda2a7240550ba6106b760 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 29 Aug 2020 22:02:15 +0900 Subject: [PATCH 12/17] sh: kprobes: Use generic kretprobe trampoline handler Use the generic kretprobe trampoline handler. Don't use framepointer verification. Signed-off-by: Masami Hiramatsu Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/159870613547.1229682.15519965962108261812.stgit@devnote2 --- arch/sh/kernel/kprobes.c | 58 +++------------------------------------- 1 file changed, 3 insertions(+), 55 deletions(-) diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c index 318296f48f1a..756100b01e84 100644 --- a/arch/sh/kernel/kprobes.c +++ b/arch/sh/kernel/kprobes.c @@ -204,6 +204,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *) regs->pr; + ri->fp = NULL; /* Replace the return addr with trampoline addr */ regs->pr = (unsigned long)kretprobe_trampoline; @@ -302,62 +303,9 @@ static void __used kretprobe_trampoline_holder(void) */ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; + regs->pc = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); - - /* - * It is possible to have multiple instances associated with a given - * task either because an multiple functions in the call path - * have a return probe installed on them, and/or more then one return - * return probe was registered for a target function. - * - * We can handle this because: - * - instances are always inserted at the head of the list - * - when multiple return probes are registered for the same - * function, the first instance's ret_addr will point to the - * real return address, and all the rest will point to - * kretprobe_trampoline - */ - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - if (ri->rp && ri->rp->handler) { - __this_cpu_write(current_kprobe, &ri->rp->kp); - ri->rp->handler(ri, regs); - __this_cpu_write(current_kprobe, NULL); - } - - orig_ret_address = (unsigned long)ri->ret_addr; - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_assert(ri, orig_ret_address, trampoline_address); - - regs->pc = orig_ret_address; - kretprobe_hash_unlock(current, &flags); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } - - return orig_ret_address; + return 1; } static int __kprobes post_kprobe_handler(struct pt_regs *regs) From 5e96ce8ae5b1428e6f4953be5fb1daf0a6d18426 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 29 Aug 2020 22:02:25 +0900 Subject: [PATCH 13/17] sparc: kprobes: Use generic kretprobe trampoline handler Use the generic kretprobe trampoline handler. Don't use framepointer verification. Signed-off-by: Masami Hiramatsu Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/159870614572.1229682.2273450776108579676.stgit@devnote2 --- arch/sparc/kernel/kprobes.c | 51 +++---------------------------------- 1 file changed, 3 insertions(+), 48 deletions(-) diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c index dfbca2470536..217c21a6986a 100644 --- a/arch/sparc/kernel/kprobes.c +++ b/arch/sparc/kernel/kprobes.c @@ -453,6 +453,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *)(regs->u_regs[UREG_RETPC] + 8); + ri->fp = NULL; /* Replace the return addr with trampoline addr */ regs->u_regs[UREG_RETPC] = @@ -465,58 +466,12 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, static int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; + unsigned long orig_ret_address = 0; - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); - - /* - * It is possible to have multiple instances associated with a given - * task either because an multiple functions in the call path - * have a return probe installed on them, and/or more than one return - * return probe was registered for a target function. - * - * We can handle this because: - * - instances are always inserted at the head of the list - * - when multiple return probes are registered for the same - * function, the first instance's ret_addr will point to the - * real return address, and all the rest will point to - * kretprobe_trampoline - */ - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - if (ri->rp && ri->rp->handler) - ri->rp->handler(ri, regs); - - orig_ret_address = (unsigned long)ri->ret_addr; - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_assert(ri, orig_ret_address, trampoline_address); + orig_ret_address = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); regs->tpc = orig_ret_address; regs->tnpc = orig_ret_address + 4; - kretprobe_hash_unlock(current, &flags); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } /* * By returning a non-zero value, we are telling * kprobe_handler() that we don't want the post_handler From e03b4a084ea6b0a18b0e874baec439e69090c168 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 29 Aug 2020 22:02:36 +0900 Subject: [PATCH 14/17] kprobes: Remove NMI context check The in_nmi() check in pre_handler_kretprobe() is meant to avoid recursion, and blindly assumes that anything NMI is recursive. However, since commit: 9b38cc704e84 ("kretprobe: Prevent triggering kretprobe from within kprobe_flush_task") there is a better way to detect and avoid actual recursion. By setting a dummy kprobe, any actual exceptions will terminate early (by trying to handle the dummy kprobe), and recursion will not happen. Employ this to avoid the kretprobe_table_lock() recursion, replacing the over-eager in_nmi() check. Signed-off-by: Masami Hiramatsu Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lkml.kernel.org/r/159870615628.1229682.6087311596892125907.stgit@devnote2 --- kernel/kprobes.c | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index a0afaa79024e..211138225fa5 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1359,7 +1359,8 @@ static void cleanup_rp_inst(struct kretprobe *rp) struct hlist_node *next; struct hlist_head *head; - /* No race here */ + /* To avoid recursive kretprobe by NMI, set kprobe busy here */ + kprobe_busy_begin(); for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) { kretprobe_table_lock(hash, &flags); head = &kretprobe_inst_table[hash]; @@ -1369,6 +1370,8 @@ static void cleanup_rp_inst(struct kretprobe *rp) } kretprobe_table_unlock(hash, &flags); } + kprobe_busy_end(); + free_rp_inst(rp); } NOKPROBE_SYMBOL(cleanup_rp_inst); @@ -2035,17 +2038,6 @@ static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) unsigned long hash, flags = 0; struct kretprobe_instance *ri; - /* - * To avoid deadlocks, prohibit return probing in NMI contexts, - * just skip the probe and increase the (inexact) 'nmissed' - * statistical counter, so that the user is informed that - * something happened: - */ - if (unlikely(in_nmi())) { - rp->nmissed++; - return 0; - } - /* TODO: consider to only swap the RA after the last pre_handler fired */ hash = hash_ptr(current, KPROBE_HASH_BITS); raw_spin_lock_irqsave(&rp->lock, flags); From b338817807538c893540e393856b79cbbdf777ea Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 29 Aug 2020 22:02:47 +0900 Subject: [PATCH 15/17] kprobes: Free kretprobe_instance with RCU callback Free kretprobe_instance with RCU callback instead of directly freeing the object in the kretprobe handler context. This will make kretprobe run safer in NMI context. Signed-off-by: Masami Hiramatsu Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/159870616685.1229682.11978742048709542226.stgit@devnote2 --- include/linux/kprobes.h | 6 ++++-- kernel/kprobes.c | 25 ++++++------------------- 2 files changed, 10 insertions(+), 21 deletions(-) diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 72142ae5df3e..3389067d88b1 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -156,7 +156,10 @@ struct kretprobe { }; struct kretprobe_instance { - struct hlist_node hlist; + union { + struct hlist_node hlist; + struct rcu_head rcu; + }; struct kretprobe *rp; kprobe_opcode_t *ret_addr; struct task_struct *task; @@ -395,7 +398,6 @@ int register_kretprobes(struct kretprobe **rps, int num); void unregister_kretprobes(struct kretprobe **rps, int num); void kprobe_flush_task(struct task_struct *tk); -void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); int disable_kprobe(struct kprobe *kp); int enable_kprobe(struct kprobe *kp); diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 211138225fa5..0676868f1ac2 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1223,8 +1223,7 @@ void kprobes_inc_nmissed_count(struct kprobe *p) } NOKPROBE_SYMBOL(kprobes_inc_nmissed_count); -void recycle_rp_inst(struct kretprobe_instance *ri, - struct hlist_head *head) +static void recycle_rp_inst(struct kretprobe_instance *ri) { struct kretprobe *rp = ri->rp; @@ -1236,8 +1235,7 @@ void recycle_rp_inst(struct kretprobe_instance *ri, hlist_add_head(&ri->hlist, &rp->free_instances); raw_spin_unlock(&rp->lock); } else - /* Unregistering */ - hlist_add_head(&ri->hlist, head); + kfree_rcu(ri, rcu); } NOKPROBE_SYMBOL(recycle_rp_inst); @@ -1313,7 +1311,7 @@ void kprobe_busy_end(void) void kprobe_flush_task(struct task_struct *tk) { struct kretprobe_instance *ri; - struct hlist_head *head, empty_rp; + struct hlist_head *head; struct hlist_node *tmp; unsigned long hash, flags = 0; @@ -1323,19 +1321,14 @@ void kprobe_flush_task(struct task_struct *tk) kprobe_busy_begin(); - INIT_HLIST_HEAD(&empty_rp); hash = hash_ptr(tk, KPROBE_HASH_BITS); head = &kretprobe_inst_table[hash]; kretprobe_table_lock(hash, &flags); hlist_for_each_entry_safe(ri, tmp, head, hlist) { if (ri->task == tk) - recycle_rp_inst(ri, &empty_rp); + recycle_rp_inst(ri); } kretprobe_table_unlock(hash, &flags); - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } kprobe_busy_end(); } @@ -1936,13 +1929,12 @@ unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs, void *frame_pointer) { struct kretprobe_instance *ri = NULL, *last = NULL; - struct hlist_head *head, empty_rp; + struct hlist_head *head; struct hlist_node *tmp; unsigned long flags; kprobe_opcode_t *correct_ret_addr = NULL; bool skipped = false; - INIT_HLIST_HEAD(&empty_rp); kretprobe_hash_lock(current, &head, &flags); /* @@ -2011,7 +2003,7 @@ unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs, __this_cpu_write(current_kprobe, prev); } - recycle_rp_inst(ri, &empty_rp); + recycle_rp_inst(ri); if (ri == last) break; @@ -2019,11 +2011,6 @@ unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs, kretprobe_hash_unlock(current, &flags); - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } - return (unsigned long)correct_ret_addr; } NOKPROBE_SYMBOL(__kretprobe_trampoline_handler) From 319f0ce284fff8e4f95167cb144acc905d0584c7 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 29 Aug 2020 22:03:02 +0900 Subject: [PATCH 16/17] kprobes: Make local functions static Since we unified the kretprobe trampoline handler from arch/* code, some functions and objects do not need to be exported anymore. Signed-off-by: Masami Hiramatsu Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/159870618256.1229682.8692046612635810882.stgit@devnote2 --- include/linux/kprobes.h | 15 --------------- kernel/kprobes.c | 9 ++++----- 2 files changed, 4 insertions(+), 20 deletions(-) diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 3389067d88b1..5c8c271fa1e9 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -190,7 +190,6 @@ static inline int kprobes_built_in(void) return 1; } -extern struct kprobe kprobe_busy; extern void kprobe_busy_begin(void); extern void kprobe_busy_end(void); @@ -235,16 +234,6 @@ static inline int arch_trampoline_kprobe(struct kprobe *p) extern struct kretprobe_blackpoint kretprobe_blacklist[]; -static inline void kretprobe_assert(struct kretprobe_instance *ri, - unsigned long orig_ret_address, unsigned long trampoline_address) -{ - if (!orig_ret_address || (orig_ret_address == trampoline_address)) { - printk("kretprobe BUG!: Processing kretprobe %p @ %p\n", - ri->rp, ri->rp->kp.addr); - BUG(); - } -} - #ifdef CONFIG_KPROBES_SANITY_TEST extern int init_test_probes(void); #else @@ -364,10 +353,6 @@ int arch_check_ftrace_location(struct kprobe *p); /* Get the kprobe at this addr (if any) - called with preemption disabled */ struct kprobe *get_kprobe(void *addr); -void kretprobe_hash_lock(struct task_struct *tsk, - struct hlist_head **head, unsigned long *flags); -void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags); -struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk); /* kprobe_running() will just return the current_kprobe on this CPU */ static inline struct kprobe *kprobe_running(void) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 0676868f1ac2..732a70163584 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1239,7 +1239,7 @@ static void recycle_rp_inst(struct kretprobe_instance *ri) } NOKPROBE_SYMBOL(recycle_rp_inst); -void kretprobe_hash_lock(struct task_struct *tsk, +static void kretprobe_hash_lock(struct task_struct *tsk, struct hlist_head **head, unsigned long *flags) __acquires(hlist_lock) { @@ -1261,7 +1261,7 @@ __acquires(hlist_lock) } NOKPROBE_SYMBOL(kretprobe_table_lock); -void kretprobe_hash_unlock(struct task_struct *tsk, +static void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags) __releases(hlist_lock) { @@ -1282,7 +1282,7 @@ __releases(hlist_lock) } NOKPROBE_SYMBOL(kretprobe_table_unlock); -struct kprobe kprobe_busy = { +static struct kprobe kprobe_busy = { .addr = (void *) get_kprobe, }; @@ -1983,8 +1983,7 @@ unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs, break; } - kretprobe_assert(ri, (unsigned long)correct_ret_addr, - (unsigned long)trampoline_address); + BUG_ON(!correct_ret_addr || (correct_ret_addr == trampoline_address)); last = ri; hlist_for_each_entry_safe(ri, tmp, head, hlist) { From bcb53209be5cb32d485507452edda19b78f31d84 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 1 Sep 2020 00:12:07 +0900 Subject: [PATCH 17/17] kprobes: Fix to check probe enabled before disarm_kprobe_ftrace() Commit: 0cb2f1372baa ("kprobes: Fix NULL pointer dereference at kprobe_ftrace_handler") fixed one bug but the underlying bugs are not completely fixed yet. If we run a kprobe_module.tc of ftracetest, a warning triggers: # ./ftracetest test.d/kprobe/kprobe_module.tc === Ftrace unit tests === [1] Kprobe dynamic event - probing module ... ------------[ cut here ]------------ Failed to disarm kprobe-ftrace at trace_printk_irq_work+0x0/0x7e [trace_printk] (-2) WARNING: CPU: 7 PID: 200 at kernel/kprobes.c:1091 __disarm_kprobe_ftrace.isra.0+0x7e/0xa0 This is because the kill_kprobe() calls disarm_kprobe_ftrace() even if the given probe is not enabled. In that case, ftrace_set_filter_ip() fails because the given probe point is not registered to ftrace. Fix to check the given (going) probe is enabled before invoking disarm_kprobe_ftrace(). Fixes: 0cb2f1372baa ("kprobes: Fix NULL pointer dereference at kprobe_ftrace_handler") Signed-off-by: Masami Hiramatsu Signed-off-by: Ingo Molnar Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/159888672694.1411785.5987998076694782591.stgit@devnote2 --- kernel/kprobes.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 732a70163584..3b61ae8ff5da 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -2235,9 +2235,10 @@ static void kill_kprobe(struct kprobe *p) /* * The module is going away. We should disarm the kprobe which - * is using ftrace. + * is using ftrace, because ftrace framework is still available at + * MODULE_STATE_GOING notification. */ - if (kprobe_ftrace(p)) + if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed) disarm_kprobe_ftrace(p); }