forked from luck/tmp_suning_uos_patched
Merge branch 'core' of git://git.kernel.org/pub/scm/linux/kernel/git/rric/oprofile into perf/core
This commit is contained in:
commit
9f8b6a6cf0
|
@ -7,6 +7,10 @@
|
|||
extern int raw_show_trace;
|
||||
extern unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
|
||||
unsigned long pc, unsigned long *ra);
|
||||
extern unsigned long unwind_stack_by_address(unsigned long stack_page,
|
||||
unsigned long *sp,
|
||||
unsigned long pc,
|
||||
unsigned long *ra);
|
||||
#else
|
||||
#define raw_show_trace 1
|
||||
static inline unsigned long unwind_stack(struct task_struct *task,
|
||||
|
|
|
@ -373,18 +373,18 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
|
|||
|
||||
|
||||
#ifdef CONFIG_KALLSYMS
|
||||
/* used by show_backtrace() */
|
||||
unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
|
||||
unsigned long pc, unsigned long *ra)
|
||||
/* generic stack unwinding function */
|
||||
unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
|
||||
unsigned long *sp,
|
||||
unsigned long pc,
|
||||
unsigned long *ra)
|
||||
{
|
||||
unsigned long stack_page;
|
||||
struct mips_frame_info info;
|
||||
unsigned long size, ofs;
|
||||
int leaf;
|
||||
extern void ret_from_irq(void);
|
||||
extern void ret_from_exception(void);
|
||||
|
||||
stack_page = (unsigned long)task_stack_page(task);
|
||||
if (!stack_page)
|
||||
return 0;
|
||||
|
||||
|
@ -443,6 +443,15 @@ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
|
|||
*ra = 0;
|
||||
return __kernel_text_address(pc) ? pc : 0;
|
||||
}
|
||||
EXPORT_SYMBOL(unwind_stack_by_address);
|
||||
|
||||
/* used by show_backtrace() */
|
||||
unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
|
||||
unsigned long pc, unsigned long *ra)
|
||||
{
|
||||
unsigned long stack_page = (unsigned long)task_stack_page(task);
|
||||
return unwind_stack_by_address(stack_page, sp, pc, ra);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -8,7 +8,7 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
|
|||
oprofilefs.o oprofile_stats.o \
|
||||
timer_int.o )
|
||||
|
||||
oprofile-y := $(DRIVER_OBJS) common.o
|
||||
oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
|
||||
|
||||
oprofile-$(CONFIG_CPU_MIPS32) += op_model_mipsxx.o
|
||||
oprofile-$(CONFIG_CPU_MIPS64) += op_model_mipsxx.o
|
||||
|
|
175
arch/mips/oprofile/backtrace.c
Normal file
175
arch/mips/oprofile/backtrace.c
Normal file
|
@ -0,0 +1,175 @@
|
|||
#include <linux/oprofile.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/stacktrace.h>
|
||||
#include <linux/stacktrace.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/inst.h>
|
||||
|
||||
struct stackframe {
|
||||
unsigned long sp;
|
||||
unsigned long pc;
|
||||
unsigned long ra;
|
||||
};
|
||||
|
||||
static inline int get_mem(unsigned long addr, unsigned long *result)
|
||||
{
|
||||
unsigned long *address = (unsigned long *) addr;
|
||||
if (!access_ok(VERIFY_READ, addr, sizeof(unsigned long)))
|
||||
return -1;
|
||||
if (__copy_from_user_inatomic(result, address, sizeof(unsigned long)))
|
||||
return -3;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* These two instruction helpers were taken from process.c
|
||||
*/
|
||||
static inline int is_ra_save_ins(union mips_instruction *ip)
|
||||
{
|
||||
/* sw / sd $ra, offset($sp) */
|
||||
return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op)
|
||||
&& ip->i_format.rs == 29 && ip->i_format.rt == 31;
|
||||
}
|
||||
|
||||
static inline int is_sp_move_ins(union mips_instruction *ip)
|
||||
{
|
||||
/* addiu/daddiu sp,sp,-imm */
|
||||
if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
|
||||
return 0;
|
||||
if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Looks for specific instructions that mark the end of a function.
|
||||
* This usually means we ran into the code area of the previous function.
|
||||
*/
|
||||
static inline int is_end_of_function_marker(union mips_instruction *ip)
|
||||
{
|
||||
/* jr ra */
|
||||
if (ip->r_format.func == jr_op && ip->r_format.rs == 31)
|
||||
return 1;
|
||||
/* lui gp */
|
||||
if (ip->i_format.opcode == lui_op && ip->i_format.rt == 28)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO for userspace stack unwinding:
|
||||
* - handle cases where the stack is adjusted inside a function
|
||||
* (generally doesn't happen)
|
||||
* - find optimal value for max_instr_check
|
||||
* - try to find a way to handle leaf functions
|
||||
*/
|
||||
|
||||
static inline int unwind_user_frame(struct stackframe *old_frame,
|
||||
const unsigned int max_instr_check)
|
||||
{
|
||||
struct stackframe new_frame = *old_frame;
|
||||
off_t ra_offset = 0;
|
||||
size_t stack_size = 0;
|
||||
unsigned long addr;
|
||||
|
||||
if (old_frame->pc == 0 || old_frame->sp == 0 || old_frame->ra == 0)
|
||||
return -9;
|
||||
|
||||
for (addr = new_frame.pc; (addr + max_instr_check > new_frame.pc)
|
||||
&& (!ra_offset || !stack_size); --addr) {
|
||||
union mips_instruction ip;
|
||||
|
||||
if (get_mem(addr, (unsigned long *) &ip))
|
||||
return -11;
|
||||
|
||||
if (is_sp_move_ins(&ip)) {
|
||||
int stack_adjustment = ip.i_format.simmediate;
|
||||
if (stack_adjustment > 0)
|
||||
/* This marks the end of the previous function,
|
||||
which means we overran. */
|
||||
break;
|
||||
stack_size = (unsigned) stack_adjustment;
|
||||
} else if (is_ra_save_ins(&ip)) {
|
||||
int ra_slot = ip.i_format.simmediate;
|
||||
if (ra_slot < 0)
|
||||
/* This shouldn't happen. */
|
||||
break;
|
||||
ra_offset = ra_slot;
|
||||
} else if (is_end_of_function_marker(&ip))
|
||||
break;
|
||||
}
|
||||
|
||||
if (!ra_offset || !stack_size)
|
||||
return -1;
|
||||
|
||||
if (ra_offset) {
|
||||
new_frame.ra = old_frame->sp + ra_offset;
|
||||
if (get_mem(new_frame.ra, &(new_frame.ra)))
|
||||
return -13;
|
||||
}
|
||||
|
||||
if (stack_size) {
|
||||
new_frame.sp = old_frame->sp + stack_size;
|
||||
if (get_mem(new_frame.sp, &(new_frame.sp)))
|
||||
return -14;
|
||||
}
|
||||
|
||||
if (new_frame.sp > old_frame->sp)
|
||||
return -2;
|
||||
|
||||
new_frame.pc = old_frame->ra;
|
||||
*old_frame = new_frame;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void do_user_backtrace(unsigned long low_addr,
|
||||
struct stackframe *frame,
|
||||
unsigned int depth)
|
||||
{
|
||||
const unsigned int max_instr_check = 512;
|
||||
const unsigned long high_addr = low_addr + THREAD_SIZE;
|
||||
|
||||
while (depth-- && !unwind_user_frame(frame, max_instr_check)) {
|
||||
oprofile_add_trace(frame->ra);
|
||||
if (frame->sp < low_addr || frame->sp > high_addr)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef CONFIG_KALLSYMS
|
||||
static inline void do_kernel_backtrace(unsigned long low_addr,
|
||||
struct stackframe *frame,
|
||||
unsigned int depth) { }
|
||||
#else
|
||||
static inline void do_kernel_backtrace(unsigned long low_addr,
|
||||
struct stackframe *frame,
|
||||
unsigned int depth)
|
||||
{
|
||||
while (depth-- && frame->pc) {
|
||||
frame->pc = unwind_stack_by_address(low_addr,
|
||||
&(frame->sp),
|
||||
frame->pc,
|
||||
&(frame->ra));
|
||||
oprofile_add_trace(frame->ra);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void notrace op_mips_backtrace(struct pt_regs *const regs, unsigned int depth)
|
||||
{
|
||||
struct stackframe frame = { .sp = regs->regs[29],
|
||||
.pc = regs->cp0_epc,
|
||||
.ra = regs->regs[31] };
|
||||
const int userspace = user_mode(regs);
|
||||
const unsigned long low_addr = ALIGN(frame.sp, THREAD_SIZE);
|
||||
|
||||
if (userspace)
|
||||
do_user_backtrace(low_addr, &frame, depth);
|
||||
else
|
||||
do_kernel_backtrace(low_addr, &frame, depth);
|
||||
}
|
|
@ -115,6 +115,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
|
|||
ops->start = op_mips_start;
|
||||
ops->stop = op_mips_stop;
|
||||
ops->cpu_type = lmodel->cpu_type;
|
||||
ops->backtrace = op_mips_backtrace;
|
||||
|
||||
printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
|
||||
lmodel->cpu_type);
|
||||
|
|
|
@ -36,4 +36,6 @@ struct op_mips_model {
|
|||
unsigned char num_counters;
|
||||
};
|
||||
|
||||
void op_mips_backtrace(struct pt_regs * const regs, unsigned int depth);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -11,10 +11,12 @@
|
|||
#include <linux/oprofile.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/highmem.h>
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/stacktrace.h>
|
||||
#include <linux/compat.h>
|
||||
|
||||
static int backtrace_stack(void *data, char *name)
|
||||
{
|
||||
|
@ -36,17 +38,53 @@ static struct stacktrace_ops backtrace_ops = {
|
|||
.walk_stack = print_context_stack,
|
||||
};
|
||||
|
||||
/* from arch/x86/kernel/cpu/perf_event.c: */
|
||||
|
||||
/*
|
||||
* best effort, GUP based copy_from_user() that assumes IRQ or NMI context
|
||||
*/
|
||||
static unsigned long
|
||||
copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
unsigned long offset, addr = (unsigned long)from;
|
||||
unsigned long size, len = 0;
|
||||
struct page *page;
|
||||
void *map;
|
||||
int ret;
|
||||
|
||||
do {
|
||||
ret = __get_user_pages_fast(addr, 1, 0, &page);
|
||||
if (!ret)
|
||||
break;
|
||||
|
||||
offset = addr & (PAGE_SIZE - 1);
|
||||
size = min(PAGE_SIZE - offset, n - len);
|
||||
|
||||
map = kmap_atomic(page);
|
||||
memcpy(to, map+offset, size);
|
||||
kunmap_atomic(map);
|
||||
put_page(page);
|
||||
|
||||
len += size;
|
||||
to += size;
|
||||
addr += size;
|
||||
|
||||
} while (len < n);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
static struct stack_frame_ia32 *
|
||||
dump_user_backtrace_32(struct stack_frame_ia32 *head)
|
||||
{
|
||||
/* Also check accessibility of one struct frame_head beyond: */
|
||||
struct stack_frame_ia32 bufhead[2];
|
||||
struct stack_frame_ia32 *fp;
|
||||
unsigned long bytes;
|
||||
|
||||
/* Also check accessibility of one struct frame_head beyond */
|
||||
if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
|
||||
return NULL;
|
||||
if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
|
||||
bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
|
||||
if (bytes != sizeof(bufhead))
|
||||
return NULL;
|
||||
|
||||
fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
|
||||
|
@ -87,12 +125,12 @@ x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
|
|||
|
||||
static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
|
||||
{
|
||||
/* Also check accessibility of one struct frame_head beyond: */
|
||||
struct stack_frame bufhead[2];
|
||||
unsigned long bytes;
|
||||
|
||||
/* Also check accessibility of one struct stack_frame beyond */
|
||||
if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
|
||||
return NULL;
|
||||
if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
|
||||
bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
|
||||
if (bytes != sizeof(bufhead))
|
||||
return NULL;
|
||||
|
||||
oprofile_add_trace(bufhead[0].return_address);
|
||||
|
|
|
@ -112,8 +112,10 @@ static void nmi_cpu_start(void *dummy)
|
|||
static int nmi_start(void)
|
||||
{
|
||||
get_online_cpus();
|
||||
on_each_cpu(nmi_cpu_start, NULL, 1);
|
||||
ctr_running = 1;
|
||||
/* make ctr_running visible to the nmi handler: */
|
||||
smp_mb();
|
||||
on_each_cpu(nmi_cpu_start, NULL, 1);
|
||||
put_online_cpus();
|
||||
return 0;
|
||||
}
|
||||
|
@ -504,15 +506,18 @@ static int nmi_setup(void)
|
|||
|
||||
nmi_enabled = 0;
|
||||
ctr_running = 0;
|
||||
barrier();
|
||||
/* make variables visible to the nmi handler: */
|
||||
smp_mb();
|
||||
err = register_die_notifier(&profile_exceptions_nb);
|
||||
if (err)
|
||||
goto fail;
|
||||
|
||||
get_online_cpus();
|
||||
register_cpu_notifier(&oprofile_cpu_nb);
|
||||
on_each_cpu(nmi_cpu_setup, NULL, 1);
|
||||
nmi_enabled = 1;
|
||||
/* make nmi_enabled visible to the nmi handler: */
|
||||
smp_mb();
|
||||
on_each_cpu(nmi_cpu_setup, NULL, 1);
|
||||
put_online_cpus();
|
||||
|
||||
return 0;
|
||||
|
@ -531,7 +536,8 @@ static void nmi_shutdown(void)
|
|||
nmi_enabled = 0;
|
||||
ctr_running = 0;
|
||||
put_online_cpus();
|
||||
barrier();
|
||||
/* make variables visible to the nmi handler: */
|
||||
smp_mb();
|
||||
unregister_die_notifier(&profile_exceptions_nb);
|
||||
msrs = &get_cpu_var(cpu_msrs);
|
||||
model->shutdown(msrs);
|
||||
|
|
Loading…
Reference in New Issue
Block a user