forked from luck/tmp_suning_uos_patched
bf05fc25f2
When a kthread calls call_usermodehelper() the steps are:
1. allocate current->mm
2. load_elf_binary()
3. populate current->thread.regs
While doing this, interrupts are not disabled. If there is a perf
interrupt in the middle of this process (i.e. step 1 has completed
but not yet reached to step 3) and if perf tries to read userspace
regs, kernel oops with following log:
Unable to handle kernel paging request for data at address 0x00000000
Faulting instruction address: 0xc0000000000da0fc
...
Call Trace:
perf_output_sample_regs+0x6c/0xd0
perf_output_sample+0x4e4/0x830
perf_event_output_forward+0x64/0x90
__perf_event_overflow+0x8c/0x1e0
record_and_restart+0x220/0x5c0
perf_event_interrupt+0x2d8/0x4d0
performance_monitor_exception+0x54/0x70
performance_monitor_common+0x158/0x160
--- interrupt: f01 at avtab_search_node+0x150/0x1a0
LR = avtab_search_node+0x100/0x1a0
...
load_elf_binary+0x6e8/0x15a0
search_binary_handler+0xe8/0x290
do_execveat_common.isra.14+0x5f4/0x840
call_usermodehelper_exec_async+0x170/0x210
ret_from_kernel_thread+0x5c/0x7c
Fix it by setting abi to PERF_SAMPLE_REGS_ABI_NONE when userspace
pt_regs are not set.
Fixes: ed4a4ef85c
("powerpc/perf: Add support for sampling interrupt register state")
Cc: stable@vger.kernel.org # v4.7+
Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Acked-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
107 lines
3.5 KiB
C
107 lines
3.5 KiB
C
/*
|
|
* Copyright 2016 Anju T, IBM Corporation.
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*/
|
|
|
|
#include <linux/errno.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/sched/task_stack.h>
|
|
#include <linux/perf_event.h>
|
|
#include <linux/bug.h>
|
|
#include <linux/stddef.h>
|
|
#include <asm/ptrace.h>
|
|
#include <asm/perf_regs.h>
|
|
|
|
#define PT_REGS_OFFSET(id, r) [id] = offsetof(struct pt_regs, r)
|
|
|
|
#define REG_RESERVED (~((1ULL << PERF_REG_POWERPC_MAX) - 1))
|
|
|
|
static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = {
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R0, gpr[0]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R1, gpr[1]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R2, gpr[2]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R3, gpr[3]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R4, gpr[4]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R5, gpr[5]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R6, gpr[6]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R7, gpr[7]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R8, gpr[8]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R9, gpr[9]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R10, gpr[10]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R11, gpr[11]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R12, gpr[12]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R13, gpr[13]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R14, gpr[14]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R15, gpr[15]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R16, gpr[16]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R17, gpr[17]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R18, gpr[18]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R19, gpr[19]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R20, gpr[20]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R21, gpr[21]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R22, gpr[22]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R23, gpr[23]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R24, gpr[24]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R25, gpr[25]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R26, gpr[26]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R27, gpr[27]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R28, gpr[28]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R29, gpr[29]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R30, gpr[30]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_R31, gpr[31]),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_NIP, nip),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_MSR, msr),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_ORIG_R3, orig_gpr3),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_CTR, ctr),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_LINK, link),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_XER, xer),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_CCR, ccr),
|
|
#ifdef CONFIG_PPC64
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_SOFTE, softe),
|
|
#else
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_SOFTE, mq),
|
|
#endif
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_TRAP, trap),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_DAR, dar),
|
|
PT_REGS_OFFSET(PERF_REG_POWERPC_DSISR, dsisr),
|
|
};
|
|
|
|
u64 perf_reg_value(struct pt_regs *regs, int idx)
|
|
{
|
|
if (WARN_ON_ONCE(idx >= PERF_REG_POWERPC_MAX))
|
|
return 0;
|
|
|
|
return regs_get_register(regs, pt_regs_offset[idx]);
|
|
}
|
|
|
|
int perf_reg_validate(u64 mask)
|
|
{
|
|
if (!mask || mask & REG_RESERVED)
|
|
return -EINVAL;
|
|
return 0;
|
|
}
|
|
|
|
u64 perf_reg_abi(struct task_struct *task)
|
|
{
|
|
#ifdef CONFIG_PPC64
|
|
if (!test_tsk_thread_flag(task, TIF_32BIT))
|
|
return PERF_SAMPLE_REGS_ABI_64;
|
|
else
|
|
#endif
|
|
return PERF_SAMPLE_REGS_ABI_32;
|
|
}
|
|
|
|
void perf_get_regs_user(struct perf_regs *regs_user,
|
|
struct pt_regs *regs,
|
|
struct pt_regs *regs_user_copy)
|
|
{
|
|
regs_user->regs = task_pt_regs(current);
|
|
regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) :
|
|
PERF_SAMPLE_REGS_ABI_NONE;
|
|
}
|