forked from luck/tmp_suning_uos_patched
f215d985e9
Sparc64 changes to track kprobe execution on a per-cpu basis. We now track the kprobe state machine independently on each cpu using an arch specific kprobe control block. Signed-off-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: "David S. Miller" <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
53 lines
1.3 KiB
C
53 lines
1.3 KiB
C
#ifndef _SPARC64_KPROBES_H
|
|
#define _SPARC64_KPROBES_H
|
|
|
|
#include <linux/config.h>
|
|
#include <linux/types.h>
|
|
#include <linux/percpu.h>
|
|
|
|
typedef u32 kprobe_opcode_t;
|
|
|
|
#define BREAKPOINT_INSTRUCTION 0x91d02070 /* ta 0x70 */
|
|
#define BREAKPOINT_INSTRUCTION_2 0x91d02071 /* ta 0x71 */
|
|
#define MAX_INSN_SIZE 2
|
|
|
|
#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
|
|
|
|
/* Architecture specific copy of original instruction*/
|
|
struct arch_specific_insn {
|
|
/* copy of the original instruction */
|
|
kprobe_opcode_t insn[MAX_INSN_SIZE];
|
|
};
|
|
|
|
struct prev_kprobe {
|
|
struct kprobe *kp;
|
|
unsigned int status;
|
|
unsigned long orig_tnpc;
|
|
unsigned long orig_tstate_pil;
|
|
};
|
|
|
|
/* per-cpu kprobe control block */
|
|
struct kprobe_ctlblk {
|
|
unsigned long kprobe_status;
|
|
unsigned long kprobe_orig_tnpc;
|
|
unsigned long kprobe_orig_tstate_pil;
|
|
long *jprobe_saved_esp;
|
|
struct pt_regs jprobe_saved_regs;
|
|
struct pt_regs *jprobe_saved_regs_location;
|
|
struct sparc_stackf jprobe_saved_stack;
|
|
struct prev_kprobe prev_kprobe;
|
|
};
|
|
|
|
#ifdef CONFIG_KPROBES
|
|
extern int kprobe_exceptions_notify(struct notifier_block *self,
|
|
unsigned long val, void *data);
|
|
#else /* !CONFIG_KPROBES */
|
|
static inline int kprobe_exceptions_notify(struct notifier_block *self,
|
|
unsigned long val, void *data)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
#endif /* _SPARC64_KPROBES_H */
|