kernel_optimize_test/arch/powerpc/oprofile/backtrace.c
Jiang Lu 0de3b56b13 powerpc/oprofile: Disable pagefaults during user stack read
A page fault occurred during reading user stack in oprofile backtrace
would lead following calltrace:

WARNING: at linux/kernel/smp.c:210
Modules linked in:
CPU: 5 PID: 736 Comm: sh Tainted: G W 3.14.23-WR7.0.0.0_standard #1
task: c0000000f6208bc0 ti: c00000007c72c000 task.ti: c00000007c72c000
NIP: c0000000000ed6e4 LR: c0000000000ed5b8 CTR: 0000000000000000
REGS: c00000007c72f050 TRAP: 0700 Tainted: G W (3.14.23-WR7.0.0
tandard)
MSR: 0000000080021000 <CE,ME> CR: 48222482 XER: 00000000
SOFTE: 0
GPR00: c0000000000ed5b8 c00000007c72f2d0 c0000000010aa048 0000000000000005
GPR04: c000000000fdb820 c00000007c72f410 0000000000000001 0000000000000005
GPR08: c0000000010b5768 c000000000f8a048 0000000000000001 0000000000000000
GPR12: 0000000048222482 c00000000fffe580 0000000022222222 0000000010129664
GPR16: 0000000010143cc0 0000000000000000 0000000044444444 0000000000000000
GPR20: c00000007c7221d8 c0000000f638e3c8 000003f15a20120d 0000000000000001
GPR24: 000000005a20120d c00000007c722000 c00000007cdedda8 00003fffef23b160
GPR28: 0000000000000001 c00000007c72f410 c000000000fdb820 0000000000000006
NIP [c0000000000ed6e4] .smp_call_function_single+0x18c/0x248
LR [c0000000000ed5b8] .smp_call_function_single+0x60/0x248
Call Trace:
[c00000007c72f2d0] [c0000000000ed5b8] .smp_call_function_single+0x60/0x248 (unreliable)
[c00000007c72f3a0] [c000000000030810] .__flush_tlb_page+0x164/0x1b0
[c00000007c72f460] [c00000000002e054] .ptep_set_access_flags+0xb8/0x168
[c00000007c72f500] [c0000000001ad3d8] .handle_mm_fault+0x4a8/0xbac
[c00000007c72f5e0] [c000000000bb3238] .do_page_fault+0x3b8/0x868
[c00000007c72f810] [c00000000001e1d0] storage_fault_common+0x20/0x44
 Exception: 301 at .__copy_tofrom_user_base+0x54/0x5b0
    LR = .op_powerpc_backtrace+0x190/0x20c
[c00000007c72fb00] [c000000000a2ec34] .op_powerpc_backtrace+0x204/0x20c (unreliable)
[c00000007c72fbc0] [c000000000a2b5fc] .oprofile_add_ext_sample+0xe8/0x118
[c00000007c72fc70] [c000000000a2eee0] .fsl_emb_handle_interrupt+0x20c/0x27c
[c00000007c72fd30] [c000000000a2e440] .op_handle_interrupt+0x44/0x58
[c00000007c72fdb0] [c000000000016d68] .performance_monitor_exception+0x74/0x90
[c00000007c72fe30] [c00000000001d8b4] exc_0x260_common+0xfc/0x100

performance_monitor_exception() is executed in a context with interrupt
disabled and preemption enabled. When there is a user space page fault
happened, do_page_fault() invoke in_atomic() to decide whether kernel
should handle such page fault. in_atomic() only check preempt_count.
So need call pagefault_disable() to disable preemption before reading
user stack.

Signed-off-by: Jiang Lu <lu.jiang@windriver.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2014-12-02 14:10:08 +11:00

131 lines
3.0 KiB
C

/**
* Copyright (C) 2005 Brian Rogan <bcr6@cornell.edu>, IBM
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
**/
#include <linux/oprofile.h>
#include <linux/sched.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
#include <asm/compat.h>
#include <asm/oprofile_impl.h>
#define STACK_SP(STACK) *(STACK)
#define STACK_LR64(STACK) *((unsigned long *)(STACK) + 2)
#define STACK_LR32(STACK) *((unsigned int *)(STACK) + 1)
#ifdef CONFIG_PPC64
#define STACK_LR(STACK) STACK_LR64(STACK)
#else
#define STACK_LR(STACK) STACK_LR32(STACK)
#endif
static unsigned int user_getsp32(unsigned int sp, int is_first)
{
unsigned int stack_frame[2];
void __user *p = compat_ptr(sp);
if (!access_ok(VERIFY_READ, p, sizeof(stack_frame)))
return 0;
/*
* The most likely reason for this is that we returned -EFAULT,
* which means that we've done all that we can do from
* interrupt context.
*/
if (__copy_from_user_inatomic(stack_frame, p, sizeof(stack_frame)))
return 0;
if (!is_first)
oprofile_add_trace(STACK_LR32(stack_frame));
/*
* We do not enforce increasing stack addresses here because
* we may transition to a different stack, eg a signal handler.
*/
return STACK_SP(stack_frame);
}
#ifdef CONFIG_PPC64
static unsigned long user_getsp64(unsigned long sp, int is_first)
{
unsigned long stack_frame[3];
if (!access_ok(VERIFY_READ, (void __user *)sp, sizeof(stack_frame)))
return 0;
if (__copy_from_user_inatomic(stack_frame, (void __user *)sp,
sizeof(stack_frame)))
return 0;
if (!is_first)
oprofile_add_trace(STACK_LR64(stack_frame));
return STACK_SP(stack_frame);
}
#endif
static unsigned long kernel_getsp(unsigned long sp, int is_first)
{
unsigned long *stack_frame = (unsigned long *)sp;
if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
return 0;
if (!is_first)
oprofile_add_trace(STACK_LR(stack_frame));
/*
* We do not enforce increasing stack addresses here because
* we might be transitioning from an interrupt stack to a kernel
* stack. validate_sp() is designed to understand this, so just
* use it.
*/
return STACK_SP(stack_frame);
}
void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
{
unsigned long sp = regs->gpr[1];
int first_frame = 1;
/* We ditch the top stackframe so need to loop through an extra time */
depth += 1;
if (!user_mode(regs)) {
while (depth--) {
sp = kernel_getsp(sp, first_frame);
if (!sp)
break;
first_frame = 0;
}
} else {
pagefault_disable();
#ifdef CONFIG_PPC64
if (!is_32bit_task()) {
while (depth--) {
sp = user_getsp64(sp, first_frame);
if (!sp)
break;
first_frame = 0;
}
pagefault_enable();
return;
}
#endif
while (depth--) {
sp = user_getsp32(sp, first_frame);
if (!sp)
break;
first_frame = 0;
}
pagefault_enable();
}
}