forked from luck/tmp_suning_uos_patched
[PATCH] kprobes: fix bug when probed on task and isr functions
This patch fixes a race condition where in system used to hang or sometime crash within minutes when kprobes are inserted on ISR routine and a task routine. The fix has been stress tested on i386, ia64, pp64 and on x86_64. To reproduce the problem insert kprobes on schedule() and do_IRQ() functions and you should see hang or system crash. Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Signed-off-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Acked-by: Prasanna S Panchamukhi <prasanna@in.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
bce0649417
commit
deac66ae45
|
@ -177,7 +177,8 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
|
||||||
Disarm the probe we just hit, and ignore it. */
|
Disarm the probe we just hit, and ignore it. */
|
||||||
p = get_kprobe(addr);
|
p = get_kprobe(addr);
|
||||||
if (p) {
|
if (p) {
|
||||||
if (kprobe_status == KPROBE_HIT_SS) {
|
if (kprobe_status == KPROBE_HIT_SS &&
|
||||||
|
*p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
|
||||||
regs->eflags &= ~TF_MASK;
|
regs->eflags &= ~TF_MASK;
|
||||||
regs->eflags |= kprobe_saved_eflags;
|
regs->eflags |= kprobe_saved_eflags;
|
||||||
unlock_kprobes();
|
unlock_kprobes();
|
||||||
|
|
|
@ -95,6 +95,17 @@ static void __kprobes update_kprobe_inst_flag(uint template, uint slot,
|
||||||
p->ainsn.inst_flag = 0;
|
p->ainsn.inst_flag = 0;
|
||||||
p->ainsn.target_br_reg = 0;
|
p->ainsn.target_br_reg = 0;
|
||||||
|
|
||||||
|
/* Check for Break instruction
|
||||||
|
* Bits 37:40 Major opcode to be zero
|
||||||
|
* Bits 27:32 X6 to be zero
|
||||||
|
* Bits 32:35 X3 to be zero
|
||||||
|
*/
|
||||||
|
if ((!major_opcode) && (!((kprobe_inst >> 27) & 0x1FF)) ) {
|
||||||
|
/* is a break instruction */
|
||||||
|
p->ainsn.inst_flag |= INST_FLAG_BREAK_INST;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (bundle_encoding[template][slot] == B) {
|
if (bundle_encoding[template][slot] == B) {
|
||||||
switch (major_opcode) {
|
switch (major_opcode) {
|
||||||
case INDIRECT_CALL_OPCODE:
|
case INDIRECT_CALL_OPCODE:
|
||||||
|
@ -542,8 +553,11 @@ static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs)
|
||||||
unsigned long bundle_addr = (unsigned long) &p->opcode.bundle;
|
unsigned long bundle_addr = (unsigned long) &p->opcode.bundle;
|
||||||
unsigned long slot = (unsigned long)p->addr & 0xf;
|
unsigned long slot = (unsigned long)p->addr & 0xf;
|
||||||
|
|
||||||
/* Update instruction pointer (IIP) and slot number (IPSR.ri) */
|
/* single step inline if break instruction */
|
||||||
regs->cr_iip = bundle_addr & ~0xFULL;
|
if (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)
|
||||||
|
regs->cr_iip = (unsigned long)p->addr & ~0xFULL;
|
||||||
|
else
|
||||||
|
regs->cr_iip = bundle_addr & ~0xFULL;
|
||||||
|
|
||||||
if (slot > 2)
|
if (slot > 2)
|
||||||
slot = 0;
|
slot = 0;
|
||||||
|
@ -599,7 +613,9 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
|
||||||
if (kprobe_running()) {
|
if (kprobe_running()) {
|
||||||
p = get_kprobe(addr);
|
p = get_kprobe(addr);
|
||||||
if (p) {
|
if (p) {
|
||||||
if (kprobe_status == KPROBE_HIT_SS) {
|
if ( (kprobe_status == KPROBE_HIT_SS) &&
|
||||||
|
(p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) {
|
||||||
|
ia64_psr(regs)->ss = 0;
|
||||||
unlock_kprobes();
|
unlock_kprobes();
|
||||||
goto no_kprobe;
|
goto no_kprobe;
|
||||||
}
|
}
|
||||||
|
|
|
@ -102,7 +102,7 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
|
||||||
regs->msr |= MSR_SE;
|
regs->msr |= MSR_SE;
|
||||||
|
|
||||||
/* single step inline if it is a trap variant */
|
/* single step inline if it is a trap variant */
|
||||||
if (IS_TW(insn) || IS_TD(insn) || IS_TWI(insn) || IS_TDI(insn))
|
if (is_trap(insn))
|
||||||
regs->nip = (unsigned long)p->addr;
|
regs->nip = (unsigned long)p->addr;
|
||||||
else
|
else
|
||||||
regs->nip = (unsigned long)p->ainsn.insn;
|
regs->nip = (unsigned long)p->ainsn.insn;
|
||||||
|
@ -152,7 +152,9 @@ static inline int kprobe_handler(struct pt_regs *regs)
|
||||||
Disarm the probe we just hit, and ignore it. */
|
Disarm the probe we just hit, and ignore it. */
|
||||||
p = get_kprobe(addr);
|
p = get_kprobe(addr);
|
||||||
if (p) {
|
if (p) {
|
||||||
if (kprobe_status == KPROBE_HIT_SS) {
|
kprobe_opcode_t insn = *p->ainsn.insn;
|
||||||
|
if (kprobe_status == KPROBE_HIT_SS &&
|
||||||
|
is_trap(insn)) {
|
||||||
regs->msr &= ~MSR_SE;
|
regs->msr &= ~MSR_SE;
|
||||||
regs->msr |= kprobe_saved_msr;
|
regs->msr |= kprobe_saved_msr;
|
||||||
unlock_kprobes();
|
unlock_kprobes();
|
||||||
|
@ -192,8 +194,7 @@ static inline int kprobe_handler(struct pt_regs *regs)
|
||||||
* trap variant, it could belong to someone else
|
* trap variant, it could belong to someone else
|
||||||
*/
|
*/
|
||||||
kprobe_opcode_t cur_insn = *addr;
|
kprobe_opcode_t cur_insn = *addr;
|
||||||
if (IS_TW(cur_insn) || IS_TD(cur_insn) ||
|
if (is_trap(cur_insn))
|
||||||
IS_TWI(cur_insn) || IS_TDI(cur_insn))
|
|
||||||
goto no_kprobe;
|
goto no_kprobe;
|
||||||
/*
|
/*
|
||||||
* The breakpoint instruction was removed right
|
* The breakpoint instruction was removed right
|
||||||
|
@ -403,7 +404,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
preempt_enable();
|
preempt_enable_no_resched();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -311,7 +311,8 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
|
||||||
Disarm the probe we just hit, and ignore it. */
|
Disarm the probe we just hit, and ignore it. */
|
||||||
p = get_kprobe(addr);
|
p = get_kprobe(addr);
|
||||||
if (p) {
|
if (p) {
|
||||||
if (kprobe_status == KPROBE_HIT_SS) {
|
if (kprobe_status == KPROBE_HIT_SS &&
|
||||||
|
*p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
|
||||||
regs->eflags &= ~TF_MASK;
|
regs->eflags &= ~TF_MASK;
|
||||||
regs->eflags |= kprobe_saved_rflags;
|
regs->eflags |= kprobe_saved_rflags;
|
||||||
unlock_kprobes();
|
unlock_kprobes();
|
||||||
|
|
|
@ -92,6 +92,7 @@ struct arch_specific_insn {
|
||||||
kprobe_opcode_t insn;
|
kprobe_opcode_t insn;
|
||||||
#define INST_FLAG_FIX_RELATIVE_IP_ADDR 1
|
#define INST_FLAG_FIX_RELATIVE_IP_ADDR 1
|
||||||
#define INST_FLAG_FIX_BRANCH_REG 2
|
#define INST_FLAG_FIX_BRANCH_REG 2
|
||||||
|
#define INST_FLAG_BREAK_INST 4
|
||||||
unsigned long inst_flag;
|
unsigned long inst_flag;
|
||||||
unsigned short target_br_reg;
|
unsigned short target_br_reg;
|
||||||
};
|
};
|
||||||
|
|
|
@ -42,6 +42,9 @@ typedef unsigned int kprobe_opcode_t;
|
||||||
|
|
||||||
#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)((func_descr_t *)pentry)
|
#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)((func_descr_t *)pentry)
|
||||||
|
|
||||||
|
#define is_trap(instr) (IS_TW(instr) || IS_TD(instr) || \
|
||||||
|
IS_TWI(instr) || IS_TDI(instr))
|
||||||
|
|
||||||
#define ARCH_SUPPORTS_KRETPROBES
|
#define ARCH_SUPPORTS_KRETPROBES
|
||||||
void kretprobe_trampoline(void);
|
void kretprobe_trampoline(void);
|
||||||
|
|
||||||
|
|
|
@ -155,14 +155,36 @@ void __kprobes free_insn_slot(kprobe_opcode_t *slot)
|
||||||
/* Locks kprobe: irqs must be disabled */
|
/* Locks kprobe: irqs must be disabled */
|
||||||
void __kprobes lock_kprobes(void)
|
void __kprobes lock_kprobes(void)
|
||||||
{
|
{
|
||||||
|
unsigned long flags = 0;
|
||||||
|
|
||||||
|
/* Avoiding local interrupts to happen right after we take the kprobe_lock
|
||||||
|
* and before we get a chance to update kprobe_cpu, this to prevent
|
||||||
|
* deadlock when we have a kprobe on ISR routine and a kprobe on task
|
||||||
|
* routine
|
||||||
|
*/
|
||||||
|
local_irq_save(flags);
|
||||||
|
|
||||||
spin_lock(&kprobe_lock);
|
spin_lock(&kprobe_lock);
|
||||||
kprobe_cpu = smp_processor_id();
|
kprobe_cpu = smp_processor_id();
|
||||||
|
|
||||||
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __kprobes unlock_kprobes(void)
|
void __kprobes unlock_kprobes(void)
|
||||||
{
|
{
|
||||||
|
unsigned long flags = 0;
|
||||||
|
|
||||||
|
/* Avoiding local interrupts to happen right after we update
|
||||||
|
* kprobe_cpu and before we get a a chance to release kprobe_lock,
|
||||||
|
* this to prevent deadlock when we have a kprobe on ISR routine and
|
||||||
|
* a kprobe on task routine
|
||||||
|
*/
|
||||||
|
local_irq_save(flags);
|
||||||
|
|
||||||
kprobe_cpu = NR_CPUS;
|
kprobe_cpu = NR_CPUS;
|
||||||
spin_unlock(&kprobe_lock);
|
spin_unlock(&kprobe_lock);
|
||||||
|
|
||||||
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* You have to be holding the kprobe_lock */
|
/* You have to be holding the kprobe_lock */
|
||||||
|
|
Loading…
Reference in New Issue
Block a user