forked from luck/tmp_suning_uos_patched
Merge branch 'perfcounters-rename-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perfcounters-rename-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: perf: Tidy up after the big rename perf: Do the big rename: Performance Counters -> Performance Events perf_counter: Rename 'event' to event_id/hw_event perf_counter: Rename list_entry -> group_entry, counter_list -> group_list Manually resolved some fairly trivial conflicts with the tracing tree in include/trace/ftrace.h and kernel/trace/trace_syscalls.c.
This commit is contained in:
commit
43c1266ce4
|
@ -4000,7 +4000,7 @@ S: Maintained
|
|||
F: include/linux/delayacct.h
|
||||
F: kernel/delayacct.c
|
||||
|
||||
PERFORMANCE COUNTER SUBSYSTEM
|
||||
PERFORMANCE EVENTS SUBSYSTEM
|
||||
M: Peter Zijlstra <a.p.zijlstra@chello.nl>
|
||||
M: Paul Mackerras <paulus@samba.org>
|
||||
M: Ingo Molnar <mingo@elte.hu>
|
||||
|
|
|
@ -390,7 +390,7 @@
|
|||
#define __NR_preadv (__NR_SYSCALL_BASE+361)
|
||||
#define __NR_pwritev (__NR_SYSCALL_BASE+362)
|
||||
#define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE+363)
|
||||
#define __NR_perf_counter_open (__NR_SYSCALL_BASE+364)
|
||||
#define __NR_perf_event_open (__NR_SYSCALL_BASE+364)
|
||||
|
||||
/*
|
||||
* The following SWIs are ARM private.
|
||||
|
|
|
@ -373,7 +373,7 @@
|
|||
CALL(sys_preadv)
|
||||
CALL(sys_pwritev)
|
||||
CALL(sys_rt_tgsigqueueinfo)
|
||||
CALL(sys_perf_counter_open)
|
||||
CALL(sys_perf_event_open)
|
||||
#ifndef syscalls_counted
|
||||
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
|
||||
#define syscalls_counted
|
||||
|
|
|
@ -381,7 +381,7 @@
|
|||
#define __NR_preadv 366
|
||||
#define __NR_pwritev 367
|
||||
#define __NR_rt_tgsigqueueinfo 368
|
||||
#define __NR_perf_counter_open 369
|
||||
#define __NR_perf_event_open 369
|
||||
|
||||
#define __NR_syscall 370
|
||||
#define NR_syscalls __NR_syscall
|
||||
|
|
|
@ -1620,7 +1620,7 @@ ENTRY(_sys_call_table)
|
|||
.long _sys_preadv
|
||||
.long _sys_pwritev
|
||||
.long _sys_rt_tgsigqueueinfo
|
||||
.long _sys_perf_counter_open
|
||||
.long _sys_perf_event_open
|
||||
|
||||
.rept NR_syscalls-(.-_sys_call_table)/4
|
||||
.long _sys_ni_syscall
|
||||
|
|
|
@ -7,7 +7,7 @@ config FRV
|
|||
default y
|
||||
select HAVE_IDE
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_PERF_COUNTERS
|
||||
select HAVE_PERF_EVENTS
|
||||
|
||||
config ZONE_DMA
|
||||
bool
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* FRV performance counter support
|
||||
/* FRV performance event support
|
||||
*
|
||||
* Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
|
@ -9,9 +9,9 @@
|
|||
* 2 of the Licence, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_PERF_COUNTER_H
|
||||
#define _ASM_PERF_COUNTER_H
|
||||
#ifndef _ASM_PERF_EVENT_H
|
||||
#define _ASM_PERF_EVENT_H
|
||||
|
||||
#define PERF_COUNTER_INDEX_OFFSET 0
|
||||
#define PERF_EVENT_INDEX_OFFSET 0
|
||||
|
||||
#endif /* _ASM_PERF_COUNTER_H */
|
||||
#endif /* _ASM_PERF_EVENT_H */
|
|
@ -342,7 +342,7 @@
|
|||
#define __NR_preadv 333
|
||||
#define __NR_pwritev 334
|
||||
#define __NR_rt_tgsigqueueinfo 335
|
||||
#define __NR_perf_counter_open 336
|
||||
#define __NR_perf_event_open 336
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
|
|
|
@ -1525,6 +1525,6 @@ sys_call_table:
|
|||
.long sys_preadv
|
||||
.long sys_pwritev
|
||||
.long sys_rt_tgsigqueueinfo /* 335 */
|
||||
.long sys_perf_counter_open
|
||||
.long sys_perf_event_open
|
||||
|
||||
syscall_table_size = (. - sys_call_table)
|
||||
|
|
|
@ -5,4 +5,4 @@
|
|||
lib-y := \
|
||||
__ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \
|
||||
checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \
|
||||
outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_counter.o
|
||||
outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_event.o
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* Performance counter handling
|
||||
/* Performance event handling
|
||||
*
|
||||
* Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
|
@ -9,11 +9,11 @@
|
|||
* 2 of the Licence, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/perf_counter.h>
|
||||
#include <linux/perf_event.h>
|
||||
|
||||
/*
|
||||
* mark the performance counter as pending
|
||||
* mark the performance event as pending
|
||||
*/
|
||||
void set_perf_counter_pending(void)
|
||||
void set_perf_event_pending(void)
|
||||
{
|
||||
}
|
|
@ -335,7 +335,7 @@
|
|||
#define __NR_preadv 329
|
||||
#define __NR_pwritev 330
|
||||
#define __NR_rt_tgsigqueueinfo 331
|
||||
#define __NR_perf_counter_open 332
|
||||
#define __NR_perf_event_open 332
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
|
|
|
@ -756,5 +756,5 @@ sys_call_table:
|
|||
.long sys_preadv
|
||||
.long sys_pwritev /* 330 */
|
||||
.long sys_rt_tgsigqueueinfo
|
||||
.long sys_perf_counter_open
|
||||
.long sys_perf_event_open
|
||||
|
||||
|
|
|
@ -350,7 +350,7 @@ ENTRY(sys_call_table)
|
|||
.long sys_preadv
|
||||
.long sys_pwritev /* 330 */
|
||||
.long sys_rt_tgsigqueueinfo
|
||||
.long sys_perf_counter_open
|
||||
.long sys_perf_event_open
|
||||
|
||||
.rept NR_syscalls-(.-sys_call_table)/4
|
||||
.long sys_ni_syscall
|
||||
|
|
|
@ -381,7 +381,7 @@
|
|||
#define __NR_preadv 363 /* new */
|
||||
#define __NR_pwritev 364 /* new */
|
||||
#define __NR_rt_tgsigqueueinfo 365 /* new */
|
||||
#define __NR_perf_counter_open 366 /* new */
|
||||
#define __NR_perf_event_open 366 /* new */
|
||||
|
||||
#define __NR_syscalls 367
|
||||
|
||||
|
|
|
@ -370,4 +370,4 @@ ENTRY(sys_call_table)
|
|||
.long sys_ni_syscall
|
||||
.long sys_ni_syscall
|
||||
.long sys_rt_tgsigqueueinfo /* 365 */
|
||||
.long sys_perf_counter_open
|
||||
.long sys_perf_event_open
|
||||
|
|
|
@ -353,7 +353,7 @@
|
|||
#define __NR_preadv (__NR_Linux + 330)
|
||||
#define __NR_pwritev (__NR_Linux + 331)
|
||||
#define __NR_rt_tgsigqueueinfo (__NR_Linux + 332)
|
||||
#define __NR_perf_counter_open (__NR_Linux + 333)
|
||||
#define __NR_perf_event_open (__NR_Linux + 333)
|
||||
#define __NR_accept4 (__NR_Linux + 334)
|
||||
|
||||
/*
|
||||
|
@ -664,7 +664,7 @@
|
|||
#define __NR_preadv (__NR_Linux + 289)
|
||||
#define __NR_pwritev (__NR_Linux + 290)
|
||||
#define __NR_rt_tgsigqueueinfo (__NR_Linux + 291)
|
||||
#define __NR_perf_counter_open (__NR_Linux + 292)
|
||||
#define __NR_perf_event_open (__NR_Linux + 292)
|
||||
#define __NR_accept4 (__NR_Linux + 293)
|
||||
|
||||
/*
|
||||
|
@ -979,7 +979,7 @@
|
|||
#define __NR_preadv (__NR_Linux + 293)
|
||||
#define __NR_pwritev (__NR_Linux + 294)
|
||||
#define __NR_rt_tgsigqueueinfo (__NR_Linux + 295)
|
||||
#define __NR_perf_counter_open (__NR_Linux + 296)
|
||||
#define __NR_perf_event_open (__NR_Linux + 296)
|
||||
#define __NR_accept4 (__NR_Linux + 297)
|
||||
|
||||
/*
|
||||
|
|
|
@ -581,7 +581,7 @@ einval: li v0, -ENOSYS
|
|||
sys sys_preadv 6 /* 4330 */
|
||||
sys sys_pwritev 6
|
||||
sys sys_rt_tgsigqueueinfo 4
|
||||
sys sys_perf_counter_open 5
|
||||
sys sys_perf_event_open 5
|
||||
sys sys_accept4 4
|
||||
.endm
|
||||
|
||||
|
|
|
@ -418,6 +418,6 @@ sys_call_table:
|
|||
PTR sys_preadv
|
||||
PTR sys_pwritev /* 5390 */
|
||||
PTR sys_rt_tgsigqueueinfo
|
||||
PTR sys_perf_counter_open
|
||||
PTR sys_perf_event_open
|
||||
PTR sys_accept4
|
||||
.size sys_call_table,.-sys_call_table
|
||||
|
|
|
@ -416,6 +416,6 @@ EXPORT(sysn32_call_table)
|
|||
PTR sys_preadv
|
||||
PTR sys_pwritev
|
||||
PTR compat_sys_rt_tgsigqueueinfo /* 5295 */
|
||||
PTR sys_perf_counter_open
|
||||
PTR sys_perf_event_open
|
||||
PTR sys_accept4
|
||||
.size sysn32_call_table,.-sysn32_call_table
|
||||
|
|
|
@ -536,6 +536,6 @@ sys_call_table:
|
|||
PTR compat_sys_preadv /* 4330 */
|
||||
PTR compat_sys_pwritev
|
||||
PTR compat_sys_rt_tgsigqueueinfo
|
||||
PTR sys_perf_counter_open
|
||||
PTR sys_perf_event_open
|
||||
PTR sys_accept4
|
||||
.size sys_call_table,.-sys_call_table
|
||||
|
|
|
@ -347,7 +347,7 @@
|
|||
#define __NR_preadv 334
|
||||
#define __NR_pwritev 335
|
||||
#define __NR_rt_tgsigqueueinfo 336
|
||||
#define __NR_perf_counter_open 337
|
||||
#define __NR_perf_event_open 337
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
|
|
|
@ -723,7 +723,7 @@ ENTRY(sys_call_table)
|
|||
.long sys_preadv
|
||||
.long sys_pwritev /* 335 */
|
||||
.long sys_rt_tgsigqueueinfo
|
||||
.long sys_perf_counter_open
|
||||
.long sys_perf_event_open
|
||||
|
||||
|
||||
nr_syscalls=(.-sys_call_table)/4
|
||||
|
|
|
@ -16,7 +16,7 @@ config PARISC
|
|||
select RTC_DRV_GENERIC
|
||||
select INIT_ALL_POSSIBLE
|
||||
select BUG
|
||||
select HAVE_PERF_COUNTERS
|
||||
select HAVE_PERF_EVENTS
|
||||
select GENERIC_ATOMIC64 if !64BIT
|
||||
help
|
||||
The PA-RISC microprocessor is designed by Hewlett-Packard and used
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
#ifndef __ASM_PARISC_PERF_COUNTER_H
|
||||
#define __ASM_PARISC_PERF_COUNTER_H
|
||||
|
||||
/* parisc only supports software counters through this interface. */
|
||||
static inline void set_perf_counter_pending(void) { }
|
||||
|
||||
#endif /* __ASM_PARISC_PERF_COUNTER_H */
|
7
arch/parisc/include/asm/perf_event.h
Normal file
7
arch/parisc/include/asm/perf_event.h
Normal file
|
@ -0,0 +1,7 @@
|
|||
#ifndef __ASM_PARISC_PERF_EVENT_H
|
||||
#define __ASM_PARISC_PERF_EVENT_H
|
||||
|
||||
/* parisc only supports software events through this interface. */
|
||||
static inline void set_perf_event_pending(void) { }
|
||||
|
||||
#endif /* __ASM_PARISC_PERF_EVENT_H */
|
|
@ -810,9 +810,9 @@
|
|||
#define __NR_preadv (__NR_Linux + 315)
|
||||
#define __NR_pwritev (__NR_Linux + 316)
|
||||
#define __NR_rt_tgsigqueueinfo (__NR_Linux + 317)
|
||||
#define __NR_perf_counter_open (__NR_Linux + 318)
|
||||
#define __NR_perf_event_open (__NR_Linux + 318)
|
||||
|
||||
#define __NR_Linux_syscalls (__NR_perf_counter_open + 1)
|
||||
#define __NR_Linux_syscalls (__NR_perf_event_open + 1)
|
||||
|
||||
|
||||
#define __IGNORE_select /* newselect */
|
||||
|
|
|
@ -416,7 +416,7 @@
|
|||
ENTRY_COMP(preadv) /* 315 */
|
||||
ENTRY_COMP(pwritev)
|
||||
ENTRY_COMP(rt_tgsigqueueinfo)
|
||||
ENTRY_SAME(perf_counter_open)
|
||||
ENTRY_SAME(perf_event_open)
|
||||
|
||||
/* Nothing yet */
|
||||
|
||||
|
|
|
@ -129,7 +129,7 @@ config PPC
|
|||
select HAVE_OPROFILE
|
||||
select HAVE_SYSCALL_WRAPPERS if PPC64
|
||||
select GENERIC_ATOMIC64 if PPC32
|
||||
select HAVE_PERF_COUNTERS
|
||||
select HAVE_PERF_EVENTS
|
||||
|
||||
config EARLY_PRINTK
|
||||
bool
|
||||
|
|
|
@ -135,43 +135,43 @@ static inline int irqs_disabled_flags(unsigned long flags)
|
|||
*/
|
||||
struct irq_chip;
|
||||
|
||||
#ifdef CONFIG_PERF_COUNTERS
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
static inline unsigned long test_perf_counter_pending(void)
|
||||
static inline unsigned long test_perf_event_pending(void)
|
||||
{
|
||||
unsigned long x;
|
||||
|
||||
asm volatile("lbz %0,%1(13)"
|
||||
: "=r" (x)
|
||||
: "i" (offsetof(struct paca_struct, perf_counter_pending)));
|
||||
: "i" (offsetof(struct paca_struct, perf_event_pending)));
|
||||
return x;
|
||||
}
|
||||
|
||||
static inline void set_perf_counter_pending(void)
|
||||
static inline void set_perf_event_pending(void)
|
||||
{
|
||||
asm volatile("stb %0,%1(13)" : :
|
||||
"r" (1),
|
||||
"i" (offsetof(struct paca_struct, perf_counter_pending)));
|
||||
"i" (offsetof(struct paca_struct, perf_event_pending)));
|
||||
}
|
||||
|
||||
static inline void clear_perf_counter_pending(void)
|
||||
static inline void clear_perf_event_pending(void)
|
||||
{
|
||||
asm volatile("stb %0,%1(13)" : :
|
||||
"r" (0),
|
||||
"i" (offsetof(struct paca_struct, perf_counter_pending)));
|
||||
"i" (offsetof(struct paca_struct, perf_event_pending)));
|
||||
}
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
#else /* CONFIG_PERF_COUNTERS */
|
||||
#else /* CONFIG_PERF_EVENTS */
|
||||
|
||||
static inline unsigned long test_perf_counter_pending(void)
|
||||
static inline unsigned long test_perf_event_pending(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void clear_perf_counter_pending(void) {}
|
||||
#endif /* CONFIG_PERF_COUNTERS */
|
||||
static inline void clear_perf_event_pending(void) {}
|
||||
#endif /* CONFIG_PERF_EVENTS */
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_POWERPC_HW_IRQ_H */
|
||||
|
|
|
@ -122,7 +122,7 @@ struct paca_struct {
|
|||
u8 soft_enabled; /* irq soft-enable flag */
|
||||
u8 hard_enabled; /* set if irqs are enabled in MSR */
|
||||
u8 io_sync; /* writel() needs spin_unlock sync */
|
||||
u8 perf_counter_pending; /* PM interrupt while soft-disabled */
|
||||
u8 perf_event_pending; /* PM interrupt while soft-disabled */
|
||||
|
||||
/* Stuff for accurate time accounting */
|
||||
u64 user_time; /* accumulated usermode TB ticks */
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Performance counter support - PowerPC-specific definitions.
|
||||
* Performance event support - PowerPC-specific definitions.
|
||||
*
|
||||
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
|
||||
*
|
||||
|
@ -12,9 +12,9 @@
|
|||
|
||||
#include <asm/hw_irq.h>
|
||||
|
||||
#define MAX_HWCOUNTERS 8
|
||||
#define MAX_HWEVENTS 8
|
||||
#define MAX_EVENT_ALTERNATIVES 8
|
||||
#define MAX_LIMITED_HWCOUNTERS 2
|
||||
#define MAX_LIMITED_HWEVENTS 2
|
||||
|
||||
/*
|
||||
* This struct provides the constants and functions needed to
|
||||
|
@ -22,18 +22,18 @@
|
|||
*/
|
||||
struct power_pmu {
|
||||
const char *name;
|
||||
int n_counter;
|
||||
int n_event;
|
||||
int max_alternatives;
|
||||
unsigned long add_fields;
|
||||
unsigned long test_adder;
|
||||
int (*compute_mmcr)(u64 events[], int n_ev,
|
||||
unsigned int hwc[], unsigned long mmcr[]);
|
||||
int (*get_constraint)(u64 event, unsigned long *mskp,
|
||||
int (*get_constraint)(u64 event_id, unsigned long *mskp,
|
||||
unsigned long *valp);
|
||||
int (*get_alternatives)(u64 event, unsigned int flags,
|
||||
int (*get_alternatives)(u64 event_id, unsigned int flags,
|
||||
u64 alt[]);
|
||||
void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
|
||||
int (*limited_pmc_event)(u64 event);
|
||||
int (*limited_pmc_event)(u64 event_id);
|
||||
u32 flags;
|
||||
int n_generic;
|
||||
int *generic_events;
|
||||
|
@ -61,10 +61,10 @@ struct pt_regs;
|
|||
extern unsigned long perf_misc_flags(struct pt_regs *regs);
|
||||
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
|
||||
|
||||
#define PERF_COUNTER_INDEX_OFFSET 1
|
||||
#define PERF_EVENT_INDEX_OFFSET 1
|
||||
|
||||
/*
|
||||
* Only override the default definitions in include/linux/perf_counter.h
|
||||
* Only override the default definitions in include/linux/perf_event.h
|
||||
* if we have hardware PMU support.
|
||||
*/
|
||||
#ifdef CONFIG_PPC_PERF_CTRS
|
||||
|
@ -73,14 +73,14 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
|
|||
|
||||
/*
|
||||
* The power_pmu.get_constraint function returns a 32/64-bit value and
|
||||
* a 32/64-bit mask that express the constraints between this event and
|
||||
* a 32/64-bit mask that express the constraints between this event_id and
|
||||
* other events.
|
||||
*
|
||||
* The value and mask are divided up into (non-overlapping) bitfields
|
||||
* of three different types:
|
||||
*
|
||||
* Select field: this expresses the constraint that some set of bits
|
||||
* in MMCR* needs to be set to a specific value for this event. For a
|
||||
* in MMCR* needs to be set to a specific value for this event_id. For a
|
||||
* select field, the mask contains 1s in every bit of the field, and
|
||||
* the value contains a unique value for each possible setting of the
|
||||
* MMCR* bits. The constraint checking code will ensure that two events
|
||||
|
@ -102,9 +102,9 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
|
|||
* possible.) For N classes, the field is N+1 bits wide, and each class
|
||||
* is assigned one bit from the least-significant N bits. The mask has
|
||||
* only the most-significant bit set, and the value has only the bit
|
||||
* for the event's class set. The test_adder has the least significant
|
||||
* for the event_id's class set. The test_adder has the least significant
|
||||
* bit set in the field.
|
||||
*
|
||||
* If an event is not subject to the constraint expressed by a particular
|
||||
* If an event_id is not subject to the constraint expressed by a particular
|
||||
* field, then it will have 0 in both the mask and value for that field.
|
||||
*/
|
|
@ -322,7 +322,7 @@ SYSCALL_SPU(epoll_create1)
|
|||
SYSCALL_SPU(dup3)
|
||||
SYSCALL_SPU(pipe2)
|
||||
SYSCALL(inotify_init1)
|
||||
SYSCALL_SPU(perf_counter_open)
|
||||
SYSCALL_SPU(perf_event_open)
|
||||
COMPAT_SYS_SPU(preadv)
|
||||
COMPAT_SYS_SPU(pwritev)
|
||||
COMPAT_SYS(rt_tgsigqueueinfo)
|
||||
|
|
|
@ -341,7 +341,7 @@
|
|||
#define __NR_dup3 316
|
||||
#define __NR_pipe2 317
|
||||
#define __NR_inotify_init1 318
|
||||
#define __NR_perf_counter_open 319
|
||||
#define __NR_perf_event_open 319
|
||||
#define __NR_preadv 320
|
||||
#define __NR_pwritev 321
|
||||
#define __NR_rt_tgsigqueueinfo 322
|
||||
|
|
|
@ -97,7 +97,7 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o
|
|||
|
||||
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
|
||||
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
|
||||
obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o perf_callchain.o
|
||||
obj-$(CONFIG_PPC_PERF_CTRS) += perf_event.o perf_callchain.o
|
||||
obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
|
||||
power5+-pmu.o power6-pmu.o power7-pmu.o
|
||||
obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
|
||||
|
|
|
@ -133,7 +133,7 @@ int main(void)
|
|||
DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
|
||||
DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
|
||||
DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
|
||||
DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_counter_pending));
|
||||
DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_event_pending));
|
||||
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
|
||||
#ifdef CONFIG_PPC_MM_SLICES
|
||||
DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
|
||||
|
|
|
@ -556,14 +556,14 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
|
|||
2:
|
||||
TRACE_AND_RESTORE_IRQ(r5);
|
||||
|
||||
#ifdef CONFIG_PERF_COUNTERS
|
||||
/* check paca->perf_counter_pending if we're enabling ints */
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
/* check paca->perf_event_pending if we're enabling ints */
|
||||
lbz r3,PACAPERFPEND(r13)
|
||||
and. r3,r3,r5
|
||||
beq 27f
|
||||
bl .perf_counter_do_pending
|
||||
bl .perf_event_do_pending
|
||||
27:
|
||||
#endif /* CONFIG_PERF_COUNTERS */
|
||||
#endif /* CONFIG_PERF_EVENTS */
|
||||
|
||||
/* extract EE bit and use it to restore paca->hard_enabled */
|
||||
ld r3,_MSR(r1)
|
||||
|
|
|
@ -53,7 +53,7 @@
|
|||
#include <linux/bootmem.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/perf_counter.h>
|
||||
#include <linux/perf_event.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/system.h>
|
||||
|
@ -138,9 +138,9 @@ notrace void raw_local_irq_restore(unsigned long en)
|
|||
}
|
||||
#endif /* CONFIG_PPC_STD_MMU_64 */
|
||||
|
||||
if (test_perf_counter_pending()) {
|
||||
clear_perf_counter_pending();
|
||||
perf_counter_do_pending();
|
||||
if (test_perf_event_pending()) {
|
||||
clear_perf_event_pending();
|
||||
perf_event_do_pending();
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
#include <linux/string.h>
|
||||
#include <linux/perf_counter.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <asm/reg.h>
|
||||
#include <asm/cputable.h>
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/perf_counter.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/mm.h>
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -9,7 +9,7 @@
|
|||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/perf_counter.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/reg.h>
|
||||
#include <asm/cputable.h>
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/perf_counter.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/reg.h>
|
||||
#include <asm/cputable.h>
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/perf_counter.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/reg.h>
|
||||
#include <asm/cputable.h>
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/perf_counter.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/reg.h>
|
||||
#include <asm/cputable.h>
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/perf_counter.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/reg.h>
|
||||
#include <asm/cputable.h>
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
#include <linux/string.h>
|
||||
#include <linux/perf_counter.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <asm/reg.h>
|
||||
#include <asm/cputable.h>
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@
|
|||
#include <linux/posix-timers.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/perf_counter.h>
|
||||
#include <linux/perf_event.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/processor.h>
|
||||
|
@ -527,25 +527,25 @@ void __init iSeries_time_init_early(void)
|
|||
}
|
||||
#endif /* CONFIG_PPC_ISERIES */
|
||||
|
||||
#if defined(CONFIG_PERF_COUNTERS) && defined(CONFIG_PPC32)
|
||||
DEFINE_PER_CPU(u8, perf_counter_pending);
|
||||
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_PPC32)
|
||||
DEFINE_PER_CPU(u8, perf_event_pending);
|
||||
|
||||
void set_perf_counter_pending(void)
|
||||
void set_perf_event_pending(void)
|
||||
{
|
||||
get_cpu_var(perf_counter_pending) = 1;
|
||||
get_cpu_var(perf_event_pending) = 1;
|
||||
set_dec(1);
|
||||
put_cpu_var(perf_counter_pending);
|
||||
put_cpu_var(perf_event_pending);
|
||||
}
|
||||
|
||||
#define test_perf_counter_pending() __get_cpu_var(perf_counter_pending)
|
||||
#define clear_perf_counter_pending() __get_cpu_var(perf_counter_pending) = 0
|
||||
#define test_perf_event_pending() __get_cpu_var(perf_event_pending)
|
||||
#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0
|
||||
|
||||
#else /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */
|
||||
#else /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
|
||||
|
||||
#define test_perf_counter_pending() 0
|
||||
#define clear_perf_counter_pending()
|
||||
#define test_perf_event_pending() 0
|
||||
#define clear_perf_event_pending()
|
||||
|
||||
#endif /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */
|
||||
#endif /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
|
||||
|
||||
/*
|
||||
* For iSeries shared processors, we have to let the hypervisor
|
||||
|
@ -573,9 +573,9 @@ void timer_interrupt(struct pt_regs * regs)
|
|||
set_dec(DECREMENTER_MAX);
|
||||
|
||||
#ifdef CONFIG_PPC32
|
||||
if (test_perf_counter_pending()) {
|
||||
clear_perf_counter_pending();
|
||||
perf_counter_do_pending();
|
||||
if (test_perf_event_pending()) {
|
||||
clear_perf_event_pending();
|
||||
perf_event_do_pending();
|
||||
}
|
||||
if (atomic_read(&ppc_n_lost_interrupts) != 0)
|
||||
do_IRQ(regs);
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/perf_counter.h>
|
||||
#include <linux/perf_event.h>
|
||||
|
||||
#include <asm/firmware.h>
|
||||
#include <asm/page.h>
|
||||
|
@ -171,7 +171,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
|
|||
die("Weird page fault", regs, SIGSEGV);
|
||||
}
|
||||
|
||||
perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
|
||||
|
||||
/* When running in the kernel we expect faults to occur only to
|
||||
* addresses in user space. All other faults represent errors in the
|
||||
|
@ -312,7 +312,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
|
|||
}
|
||||
if (ret & VM_FAULT_MAJOR) {
|
||||
current->maj_flt++;
|
||||
perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
|
||||
regs, address);
|
||||
#ifdef CONFIG_PPC_SMLPAR
|
||||
if (firmware_has_feature(FW_FEATURE_CMO)) {
|
||||
|
@ -323,7 +323,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
|
|||
#endif
|
||||
} else {
|
||||
current->min_flt++;
|
||||
perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
|
||||
regs, address);
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
|
|
|
@ -280,9 +280,9 @@ config PPC_HAVE_PMU_SUPPORT
|
|||
|
||||
config PPC_PERF_CTRS
|
||||
def_bool y
|
||||
depends on PERF_COUNTERS && PPC_HAVE_PMU_SUPPORT
|
||||
depends on PERF_EVENTS && PPC_HAVE_PMU_SUPPORT
|
||||
help
|
||||
This enables the powerpc-specific perf_counter back-end.
|
||||
This enables the powerpc-specific perf_event back-end.
|
||||
|
||||
config SMP
|
||||
depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE
|
||||
|
|
|
@ -94,7 +94,7 @@ config S390
|
|||
select HAVE_KVM if 64BIT
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select INIT_ALL_POSSIBLE
|
||||
select HAVE_PERF_COUNTERS
|
||||
select HAVE_PERF_EVENTS
|
||||
|
||||
config SCHED_OMIT_FRAME_POINTER
|
||||
bool
|
||||
|
|
|
@ -1,10 +0,0 @@
|
|||
/*
|
||||
* Performance counter support - s390 specific definitions.
|
||||
*
|
||||
* Copyright 2009 Martin Schwidefsky, IBM Corporation.
|
||||
*/
|
||||
|
||||
static inline void set_perf_counter_pending(void) {}
|
||||
static inline void clear_perf_counter_pending(void) {}
|
||||
|
||||
#define PERF_COUNTER_INDEX_OFFSET 0
|
10
arch/s390/include/asm/perf_event.h
Normal file
10
arch/s390/include/asm/perf_event.h
Normal file
|
@ -0,0 +1,10 @@
|
|||
/*
|
||||
* Performance event support - s390 specific definitions.
|
||||
*
|
||||
* Copyright 2009 Martin Schwidefsky, IBM Corporation.
|
||||
*/
|
||||
|
||||
static inline void set_perf_event_pending(void) {}
|
||||
static inline void clear_perf_event_pending(void) {}
|
||||
|
||||
#define PERF_EVENT_INDEX_OFFSET 0
|
|
@ -268,7 +268,7 @@
|
|||
#define __NR_preadv 328
|
||||
#define __NR_pwritev 329
|
||||
#define __NR_rt_tgsigqueueinfo 330
|
||||
#define __NR_perf_counter_open 331
|
||||
#define __NR_perf_event_open 331
|
||||
#define NR_syscalls 332
|
||||
|
||||
/*
|
||||
|
|
|
@ -1832,11 +1832,11 @@ compat_sys_rt_tgsigqueueinfo_wrapper:
|
|||
llgtr %r5,%r5 # struct compat_siginfo *
|
||||
jg compat_sys_rt_tgsigqueueinfo_wrapper # branch to system call
|
||||
|
||||
.globl sys_perf_counter_open_wrapper
|
||||
sys_perf_counter_open_wrapper:
|
||||
llgtr %r2,%r2 # const struct perf_counter_attr *
|
||||
.globl sys_perf_event_open_wrapper
|
||||
sys_perf_event_open_wrapper:
|
||||
llgtr %r2,%r2 # const struct perf_event_attr *
|
||||
lgfr %r3,%r3 # pid_t
|
||||
lgfr %r4,%r4 # int
|
||||
lgfr %r5,%r5 # int
|
||||
llgfr %r6,%r6 # unsigned long
|
||||
jg sys_perf_counter_open # branch to system call
|
||||
jg sys_perf_event_open # branch to system call
|
||||
|
|
|
@ -339,4 +339,4 @@ SYSCALL(sys_epoll_create1,sys_epoll_create1,sys_epoll_create1_wrapper)
|
|||
SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper)
|
||||
SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper)
|
||||
SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo_wrapper) /* 330 */
|
||||
SYSCALL(sys_perf_counter_open,sys_perf_counter_open,sys_perf_counter_open_wrapper)
|
||||
SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper)
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
* Copyright (C) 1995 Linus Torvalds
|
||||
*/
|
||||
|
||||
#include <linux/perf_counter.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kernel.h>
|
||||
|
@ -306,7 +306,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write)
|
|||
* interrupts again and then search the VMAs
|
||||
*/
|
||||
local_irq_enable();
|
||||
perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
|
||||
down_read(&mm->mmap_sem);
|
||||
|
||||
si_code = SEGV_MAPERR;
|
||||
|
@ -366,11 +366,11 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write)
|
|||
}
|
||||
if (fault & VM_FAULT_MAJOR) {
|
||||
tsk->maj_flt++;
|
||||
perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
|
||||
regs, address);
|
||||
} else {
|
||||
tsk->min_flt++;
|
||||
perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
|
||||
regs, address);
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
|
|
|
@ -16,7 +16,7 @@ config SUPERH
|
|||
select HAVE_IOREMAP_PROT if MMU
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_DMA_API_DEBUG
|
||||
select HAVE_PERF_COUNTERS
|
||||
select HAVE_PERF_EVENTS
|
||||
select HAVE_KERNEL_GZIP
|
||||
select HAVE_KERNEL_BZIP2
|
||||
select HAVE_KERNEL_LZMA
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
#ifndef __ASM_SH_PERF_COUNTER_H
|
||||
#define __ASM_SH_PERF_COUNTER_H
|
||||
|
||||
/* SH only supports software counters through this interface. */
|
||||
static inline void set_perf_counter_pending(void) {}
|
||||
|
||||
#define PERF_COUNTER_INDEX_OFFSET 0
|
||||
|
||||
#endif /* __ASM_SH_PERF_COUNTER_H */
|
9
arch/sh/include/asm/perf_event.h
Normal file
9
arch/sh/include/asm/perf_event.h
Normal file
|
@ -0,0 +1,9 @@
|
|||
#ifndef __ASM_SH_PERF_EVENT_H
|
||||
#define __ASM_SH_PERF_EVENT_H
|
||||
|
||||
/* SH only supports software events through this interface. */
|
||||
static inline void set_perf_event_pending(void) {}
|
||||
|
||||
#define PERF_EVENT_INDEX_OFFSET 0
|
||||
|
||||
#endif /* __ASM_SH_PERF_EVENT_H */
|
|
@ -344,7 +344,7 @@
|
|||
#define __NR_preadv 333
|
||||
#define __NR_pwritev 334
|
||||
#define __NR_rt_tgsigqueueinfo 335
|
||||
#define __NR_perf_counter_open 336
|
||||
#define __NR_perf_event_open 336
|
||||
|
||||
#define NR_syscalls 337
|
||||
|
||||
|
|
|
@ -384,7 +384,7 @@
|
|||
#define __NR_preadv 361
|
||||
#define __NR_pwritev 362
|
||||
#define __NR_rt_tgsigqueueinfo 363
|
||||
#define __NR_perf_counter_open 364
|
||||
#define __NR_perf_event_open 364
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
|
|
|
@ -352,4 +352,4 @@ ENTRY(sys_call_table)
|
|||
.long sys_preadv
|
||||
.long sys_pwritev
|
||||
.long sys_rt_tgsigqueueinfo /* 335 */
|
||||
.long sys_perf_counter_open
|
||||
.long sys_perf_event_open
|
||||
|
|
|
@ -390,4 +390,4 @@ sys_call_table:
|
|||
.long sys_preadv
|
||||
.long sys_pwritev
|
||||
.long sys_rt_tgsigqueueinfo
|
||||
.long sys_perf_counter_open
|
||||
.long sys_perf_event_open
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/perf_counter.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <asm/io_trapped.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -157,7 +157,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
|
|||
if ((regs->sr & SR_IMASK) != SR_IMASK)
|
||||
local_irq_enable();
|
||||
|
||||
perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
|
||||
|
||||
/*
|
||||
* If we're in an interrupt, have no user context or are running
|
||||
|
@ -208,11 +208,11 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
|
|||
}
|
||||
if (fault & VM_FAULT_MAJOR) {
|
||||
tsk->maj_flt++;
|
||||
perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
|
||||
regs, address);
|
||||
} else {
|
||||
tsk->min_flt++;
|
||||
perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
|
||||
regs, address);
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
#include <linux/mman.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/perf_counter.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/io.h>
|
||||
|
@ -116,7 +116,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
|
|||
/* Not an IO address, so reenable interrupts */
|
||||
local_irq_enable();
|
||||
|
||||
perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
|
||||
|
||||
/*
|
||||
* If we're in an interrupt or have no user
|
||||
|
@ -201,11 +201,11 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
|
|||
|
||||
if (fault & VM_FAULT_MAJOR) {
|
||||
tsk->maj_flt++;
|
||||
perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
|
||||
regs, address);
|
||||
} else {
|
||||
tsk->min_flt++;
|
||||
perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
|
||||
regs, address);
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ config SPARC
|
|||
select ARCH_WANT_OPTIONAL_GPIOLIB
|
||||
select RTC_CLASS
|
||||
select RTC_DRV_M48T59
|
||||
select HAVE_PERF_COUNTERS
|
||||
select HAVE_PERF_EVENTS
|
||||
select HAVE_DMA_ATTRS
|
||||
select HAVE_DMA_API_DEBUG
|
||||
|
||||
|
@ -47,7 +47,7 @@ config SPARC64
|
|||
select RTC_DRV_BQ4802
|
||||
select RTC_DRV_SUN4V
|
||||
select RTC_DRV_STARFIRE
|
||||
select HAVE_PERF_COUNTERS
|
||||
select HAVE_PERF_EVENTS
|
||||
|
||||
config ARCH_DEFCONFIG
|
||||
string
|
||||
|
|
|
@ -1,14 +0,0 @@
|
|||
#ifndef __ASM_SPARC_PERF_COUNTER_H
|
||||
#define __ASM_SPARC_PERF_COUNTER_H
|
||||
|
||||
extern void set_perf_counter_pending(void);
|
||||
|
||||
#define PERF_COUNTER_INDEX_OFFSET 0
|
||||
|
||||
#ifdef CONFIG_PERF_COUNTERS
|
||||
extern void init_hw_perf_counters(void);
|
||||
#else
|
||||
static inline void init_hw_perf_counters(void) { }
|
||||
#endif
|
||||
|
||||
#endif
|
14
arch/sparc/include/asm/perf_event.h
Normal file
14
arch/sparc/include/asm/perf_event.h
Normal file
|
@ -0,0 +1,14 @@
|
|||
#ifndef __ASM_SPARC_PERF_EVENT_H
|
||||
#define __ASM_SPARC_PERF_EVENT_H
|
||||
|
||||
extern void set_perf_event_pending(void);
|
||||
|
||||
#define PERF_EVENT_INDEX_OFFSET 0
|
||||
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
extern void init_hw_perf_events(void);
|
||||
#else
|
||||
static inline void init_hw_perf_events(void) { }
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -395,7 +395,7 @@
|
|||
#define __NR_preadv 324
|
||||
#define __NR_pwritev 325
|
||||
#define __NR_rt_tgsigqueueinfo 326
|
||||
#define __NR_perf_counter_open 327
|
||||
#define __NR_perf_event_open 327
|
||||
|
||||
#define NR_SYSCALLS 328
|
||||
|
||||
|
|
|
@ -104,5 +104,5 @@ obj-$(CONFIG_AUDIT) += audit.o
|
|||
audit--$(CONFIG_AUDIT) := compat_audit.o
|
||||
obj-$(CONFIG_COMPAT) += $(audit--y)
|
||||
|
||||
pc--$(CONFIG_PERF_COUNTERS) := perf_counter.o
|
||||
pc--$(CONFIG_PERF_EVENTS) := perf_event.o
|
||||
obj-$(CONFIG_SPARC64) += $(pc--y)
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#include <linux/delay.h>
|
||||
#include <linux/smp.h>
|
||||
|
||||
#include <asm/perf_counter.h>
|
||||
#include <asm/perf_event.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/local.h>
|
||||
#include <asm/pcr.h>
|
||||
|
@ -265,7 +265,7 @@ int __init nmi_init(void)
|
|||
}
|
||||
}
|
||||
if (!err)
|
||||
init_hw_perf_counters();
|
||||
init_hw_perf_events();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
#include <linux/perf_counter.h>
|
||||
#include <linux/perf_event.h>
|
||||
|
||||
#include <asm/pil.h>
|
||||
#include <asm/pcr.h>
|
||||
|
@ -15,7 +15,7 @@
|
|||
|
||||
/* This code is shared between various users of the performance
|
||||
* counters. Users will be oprofile, pseudo-NMI watchdog, and the
|
||||
* perf_counter support layer.
|
||||
* perf_event support layer.
|
||||
*/
|
||||
|
||||
#define PCR_SUN4U_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE)
|
||||
|
@ -42,14 +42,14 @@ void deferred_pcr_work_irq(int irq, struct pt_regs *regs)
|
|||
|
||||
old_regs = set_irq_regs(regs);
|
||||
irq_enter();
|
||||
#ifdef CONFIG_PERF_COUNTERS
|
||||
perf_counter_do_pending();
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
perf_event_do_pending();
|
||||
#endif
|
||||
irq_exit();
|
||||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
||||
void set_perf_counter_pending(void)
|
||||
void set_perf_event_pending(void)
|
||||
{
|
||||
set_softint(1 << PIL_DEFERRED_PCR_WORK);
|
||||
}
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
/* Performance counter support for sparc64.
|
||||
/* Performance event support for sparc64.
|
||||
*
|
||||
* Copyright (C) 2009 David S. Miller <davem@davemloft.net>
|
||||
*
|
||||
* This code is based almost entirely upon the x86 perf counter
|
||||
* This code is based almost entirely upon the x86 perf event
|
||||
* code, which is:
|
||||
*
|
||||
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
|
||||
|
@ -12,7 +12,7 @@
|
|||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
*/
|
||||
|
||||
#include <linux/perf_counter.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kdebug.h>
|
||||
|
@ -46,19 +46,19 @@
|
|||
* normal code.
|
||||
*/
|
||||
|
||||
#define MAX_HWCOUNTERS 2
|
||||
#define MAX_HWEVENTS 2
|
||||
#define MAX_PERIOD ((1UL << 32) - 1)
|
||||
|
||||
#define PIC_UPPER_INDEX 0
|
||||
#define PIC_LOWER_INDEX 1
|
||||
|
||||
struct cpu_hw_counters {
|
||||
struct perf_counter *counters[MAX_HWCOUNTERS];
|
||||
unsigned long used_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)];
|
||||
unsigned long active_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)];
|
||||
struct cpu_hw_events {
|
||||
struct perf_event *events[MAX_HWEVENTS];
|
||||
unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
|
||||
unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
|
||||
int enabled;
|
||||
};
|
||||
DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { .enabled = 1, };
|
||||
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
|
||||
|
||||
struct perf_event_map {
|
||||
u16 encoding;
|
||||
|
@ -87,9 +87,9 @@ static const struct perf_event_map ultra3i_perfmon_event_map[] = {
|
|||
[PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
|
||||
};
|
||||
|
||||
static const struct perf_event_map *ultra3i_event_map(int event)
|
||||
static const struct perf_event_map *ultra3i_event_map(int event_id)
|
||||
{
|
||||
return &ultra3i_perfmon_event_map[event];
|
||||
return &ultra3i_perfmon_event_map[event_id];
|
||||
}
|
||||
|
||||
static const struct sparc_pmu ultra3i_pmu = {
|
||||
|
@ -111,9 +111,9 @@ static const struct perf_event_map niagara2_perfmon_event_map[] = {
|
|||
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
|
||||
};
|
||||
|
||||
static const struct perf_event_map *niagara2_event_map(int event)
|
||||
static const struct perf_event_map *niagara2_event_map(int event_id)
|
||||
{
|
||||
return &niagara2_perfmon_event_map[event];
|
||||
return &niagara2_perfmon_event_map[event_id];
|
||||
}
|
||||
|
||||
static const struct sparc_pmu niagara2_pmu = {
|
||||
|
@ -130,13 +130,13 @@ static const struct sparc_pmu niagara2_pmu = {
|
|||
|
||||
static const struct sparc_pmu *sparc_pmu __read_mostly;
|
||||
|
||||
static u64 event_encoding(u64 event, int idx)
|
||||
static u64 event_encoding(u64 event_id, int idx)
|
||||
{
|
||||
if (idx == PIC_UPPER_INDEX)
|
||||
event <<= sparc_pmu->upper_shift;
|
||||
event_id <<= sparc_pmu->upper_shift;
|
||||
else
|
||||
event <<= sparc_pmu->lower_shift;
|
||||
return event;
|
||||
event_id <<= sparc_pmu->lower_shift;
|
||||
return event_id;
|
||||
}
|
||||
|
||||
static u64 mask_for_index(int idx)
|
||||
|
@ -151,7 +151,7 @@ static u64 nop_for_index(int idx)
|
|||
sparc_pmu->lower_nop, idx);
|
||||
}
|
||||
|
||||
static inline void sparc_pmu_enable_counter(struct hw_perf_counter *hwc,
|
||||
static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc,
|
||||
int idx)
|
||||
{
|
||||
u64 val, mask = mask_for_index(idx);
|
||||
|
@ -160,7 +160,7 @@ static inline void sparc_pmu_enable_counter(struct hw_perf_counter *hwc,
|
|||
pcr_ops->write((val & ~mask) | hwc->config);
|
||||
}
|
||||
|
||||
static inline void sparc_pmu_disable_counter(struct hw_perf_counter *hwc,
|
||||
static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc,
|
||||
int idx)
|
||||
{
|
||||
u64 mask = mask_for_index(idx);
|
||||
|
@ -172,7 +172,7 @@ static inline void sparc_pmu_disable_counter(struct hw_perf_counter *hwc,
|
|||
|
||||
void hw_perf_enable(void)
|
||||
{
|
||||
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
u64 val;
|
||||
int i;
|
||||
|
||||
|
@ -184,9 +184,9 @@ void hw_perf_enable(void)
|
|||
|
||||
val = pcr_ops->read();
|
||||
|
||||
for (i = 0; i < MAX_HWCOUNTERS; i++) {
|
||||
struct perf_counter *cp = cpuc->counters[i];
|
||||
struct hw_perf_counter *hwc;
|
||||
for (i = 0; i < MAX_HWEVENTS; i++) {
|
||||
struct perf_event *cp = cpuc->events[i];
|
||||
struct hw_perf_event *hwc;
|
||||
|
||||
if (!cp)
|
||||
continue;
|
||||
|
@ -199,7 +199,7 @@ void hw_perf_enable(void)
|
|||
|
||||
void hw_perf_disable(void)
|
||||
{
|
||||
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
u64 val;
|
||||
|
||||
if (!cpuc->enabled)
|
||||
|
@ -241,8 +241,8 @@ static void write_pmc(int idx, u64 val)
|
|||
write_pic(pic);
|
||||
}
|
||||
|
||||
static int sparc_perf_counter_set_period(struct perf_counter *counter,
|
||||
struct hw_perf_counter *hwc, int idx)
|
||||
static int sparc_perf_event_set_period(struct perf_event *event,
|
||||
struct hw_perf_event *hwc, int idx)
|
||||
{
|
||||
s64 left = atomic64_read(&hwc->period_left);
|
||||
s64 period = hwc->sample_period;
|
||||
|
@ -268,33 +268,33 @@ static int sparc_perf_counter_set_period(struct perf_counter *counter,
|
|||
|
||||
write_pmc(idx, (u64)(-left) & 0xffffffff);
|
||||
|
||||
perf_counter_update_userpage(counter);
|
||||
perf_event_update_userpage(event);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int sparc_pmu_enable(struct perf_counter *counter)
|
||||
static int sparc_pmu_enable(struct perf_event *event)
|
||||
{
|
||||
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
|
||||
struct hw_perf_counter *hwc = &counter->hw;
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int idx = hwc->idx;
|
||||
|
||||
if (test_and_set_bit(idx, cpuc->used_mask))
|
||||
return -EAGAIN;
|
||||
|
||||
sparc_pmu_disable_counter(hwc, idx);
|
||||
sparc_pmu_disable_event(hwc, idx);
|
||||
|
||||
cpuc->counters[idx] = counter;
|
||||
cpuc->events[idx] = event;
|
||||
set_bit(idx, cpuc->active_mask);
|
||||
|
||||
sparc_perf_counter_set_period(counter, hwc, idx);
|
||||
sparc_pmu_enable_counter(hwc, idx);
|
||||
perf_counter_update_userpage(counter);
|
||||
sparc_perf_event_set_period(event, hwc, idx);
|
||||
sparc_pmu_enable_event(hwc, idx);
|
||||
perf_event_update_userpage(event);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 sparc_perf_counter_update(struct perf_counter *counter,
|
||||
struct hw_perf_counter *hwc, int idx)
|
||||
static u64 sparc_perf_event_update(struct perf_event *event,
|
||||
struct hw_perf_event *hwc, int idx)
|
||||
{
|
||||
int shift = 64 - 32;
|
||||
u64 prev_raw_count, new_raw_count;
|
||||
|
@ -311,79 +311,79 @@ static u64 sparc_perf_counter_update(struct perf_counter *counter,
|
|||
delta = (new_raw_count << shift) - (prev_raw_count << shift);
|
||||
delta >>= shift;
|
||||
|
||||
atomic64_add(delta, &counter->count);
|
||||
atomic64_add(delta, &event->count);
|
||||
atomic64_sub(delta, &hwc->period_left);
|
||||
|
||||
return new_raw_count;
|
||||
}
|
||||
|
||||
static void sparc_pmu_disable(struct perf_counter *counter)
|
||||
static void sparc_pmu_disable(struct perf_event *event)
|
||||
{
|
||||
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
|
||||
struct hw_perf_counter *hwc = &counter->hw;
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int idx = hwc->idx;
|
||||
|
||||
clear_bit(idx, cpuc->active_mask);
|
||||
sparc_pmu_disable_counter(hwc, idx);
|
||||
sparc_pmu_disable_event(hwc, idx);
|
||||
|
||||
barrier();
|
||||
|
||||
sparc_perf_counter_update(counter, hwc, idx);
|
||||
cpuc->counters[idx] = NULL;
|
||||
sparc_perf_event_update(event, hwc, idx);
|
||||
cpuc->events[idx] = NULL;
|
||||
clear_bit(idx, cpuc->used_mask);
|
||||
|
||||
perf_counter_update_userpage(counter);
|
||||
perf_event_update_userpage(event);
|
||||
}
|
||||
|
||||
static void sparc_pmu_read(struct perf_counter *counter)
|
||||
static void sparc_pmu_read(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_counter *hwc = &counter->hw;
|
||||
sparc_perf_counter_update(counter, hwc, hwc->idx);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
sparc_perf_event_update(event, hwc, hwc->idx);
|
||||
}
|
||||
|
||||
static void sparc_pmu_unthrottle(struct perf_counter *counter)
|
||||
static void sparc_pmu_unthrottle(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_counter *hwc = &counter->hw;
|
||||
sparc_pmu_enable_counter(hwc, hwc->idx);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
sparc_pmu_enable_event(hwc, hwc->idx);
|
||||
}
|
||||
|
||||
static atomic_t active_counters = ATOMIC_INIT(0);
|
||||
static atomic_t active_events = ATOMIC_INIT(0);
|
||||
static DEFINE_MUTEX(pmc_grab_mutex);
|
||||
|
||||
void perf_counter_grab_pmc(void)
|
||||
void perf_event_grab_pmc(void)
|
||||
{
|
||||
if (atomic_inc_not_zero(&active_counters))
|
||||
if (atomic_inc_not_zero(&active_events))
|
||||
return;
|
||||
|
||||
mutex_lock(&pmc_grab_mutex);
|
||||
if (atomic_read(&active_counters) == 0) {
|
||||
if (atomic_read(&active_events) == 0) {
|
||||
if (atomic_read(&nmi_active) > 0) {
|
||||
on_each_cpu(stop_nmi_watchdog, NULL, 1);
|
||||
BUG_ON(atomic_read(&nmi_active) != 0);
|
||||
}
|
||||
atomic_inc(&active_counters);
|
||||
atomic_inc(&active_events);
|
||||
}
|
||||
mutex_unlock(&pmc_grab_mutex);
|
||||
}
|
||||
|
||||
void perf_counter_release_pmc(void)
|
||||
void perf_event_release_pmc(void)
|
||||
{
|
||||
if (atomic_dec_and_mutex_lock(&active_counters, &pmc_grab_mutex)) {
|
||||
if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
|
||||
if (atomic_read(&nmi_active) == 0)
|
||||
on_each_cpu(start_nmi_watchdog, NULL, 1);
|
||||
mutex_unlock(&pmc_grab_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static void hw_perf_counter_destroy(struct perf_counter *counter)
|
||||
static void hw_perf_event_destroy(struct perf_event *event)
|
||||
{
|
||||
perf_counter_release_pmc();
|
||||
perf_event_release_pmc();
|
||||
}
|
||||
|
||||
static int __hw_perf_counter_init(struct perf_counter *counter)
|
||||
static int __hw_perf_event_init(struct perf_event *event)
|
||||
{
|
||||
struct perf_counter_attr *attr = &counter->attr;
|
||||
struct hw_perf_counter *hwc = &counter->hw;
|
||||
struct perf_event_attr *attr = &event->attr;
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
const struct perf_event_map *pmap;
|
||||
u64 enc;
|
||||
|
||||
|
@ -396,8 +396,8 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
|
|||
if (attr->config >= sparc_pmu->max_events)
|
||||
return -EINVAL;
|
||||
|
||||
perf_counter_grab_pmc();
|
||||
counter->destroy = hw_perf_counter_destroy;
|
||||
perf_event_grab_pmc();
|
||||
event->destroy = hw_perf_event_destroy;
|
||||
|
||||
/* We save the enable bits in the config_base. So to
|
||||
* turn off sampling just write 'config', and to enable
|
||||
|
@ -439,16 +439,16 @@ static const struct pmu pmu = {
|
|||
.unthrottle = sparc_pmu_unthrottle,
|
||||
};
|
||||
|
||||
const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
|
||||
const struct pmu *hw_perf_event_init(struct perf_event *event)
|
||||
{
|
||||
int err = __hw_perf_counter_init(counter);
|
||||
int err = __hw_perf_event_init(event);
|
||||
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
return &pmu;
|
||||
}
|
||||
|
||||
void perf_counter_print_debug(void)
|
||||
void perf_event_print_debug(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
u64 pcr, pic;
|
||||
|
@ -471,16 +471,16 @@ void perf_counter_print_debug(void)
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static int __kprobes perf_counter_nmi_handler(struct notifier_block *self,
|
||||
static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
|
||||
unsigned long cmd, void *__args)
|
||||
{
|
||||
struct die_args *args = __args;
|
||||
struct perf_sample_data data;
|
||||
struct cpu_hw_counters *cpuc;
|
||||
struct cpu_hw_events *cpuc;
|
||||
struct pt_regs *regs;
|
||||
int idx;
|
||||
|
||||
if (!atomic_read(&active_counters))
|
||||
if (!atomic_read(&active_events))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
switch (cmd) {
|
||||
|
@ -495,32 +495,32 @@ static int __kprobes perf_counter_nmi_handler(struct notifier_block *self,
|
|||
|
||||
data.addr = 0;
|
||||
|
||||
cpuc = &__get_cpu_var(cpu_hw_counters);
|
||||
for (idx = 0; idx < MAX_HWCOUNTERS; idx++) {
|
||||
struct perf_counter *counter = cpuc->counters[idx];
|
||||
struct hw_perf_counter *hwc;
|
||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
for (idx = 0; idx < MAX_HWEVENTS; idx++) {
|
||||
struct perf_event *event = cpuc->events[idx];
|
||||
struct hw_perf_event *hwc;
|
||||
u64 val;
|
||||
|
||||
if (!test_bit(idx, cpuc->active_mask))
|
||||
continue;
|
||||
hwc = &counter->hw;
|
||||
val = sparc_perf_counter_update(counter, hwc, idx);
|
||||
hwc = &event->hw;
|
||||
val = sparc_perf_event_update(event, hwc, idx);
|
||||
if (val & (1ULL << 31))
|
||||
continue;
|
||||
|
||||
data.period = counter->hw.last_period;
|
||||
if (!sparc_perf_counter_set_period(counter, hwc, idx))
|
||||
data.period = event->hw.last_period;
|
||||
if (!sparc_perf_event_set_period(event, hwc, idx))
|
||||
continue;
|
||||
|
||||
if (perf_counter_overflow(counter, 1, &data, regs))
|
||||
sparc_pmu_disable_counter(hwc, idx);
|
||||
if (perf_event_overflow(event, 1, &data, regs))
|
||||
sparc_pmu_disable_event(hwc, idx);
|
||||
}
|
||||
|
||||
return NOTIFY_STOP;
|
||||
}
|
||||
|
||||
static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
|
||||
.notifier_call = perf_counter_nmi_handler,
|
||||
static __read_mostly struct notifier_block perf_event_nmi_notifier = {
|
||||
.notifier_call = perf_event_nmi_handler,
|
||||
};
|
||||
|
||||
static bool __init supported_pmu(void)
|
||||
|
@ -536,9 +536,9 @@ static bool __init supported_pmu(void)
|
|||
return false;
|
||||
}
|
||||
|
||||
void __init init_hw_perf_counters(void)
|
||||
void __init init_hw_perf_events(void)
|
||||
{
|
||||
pr_info("Performance counters: ");
|
||||
pr_info("Performance events: ");
|
||||
|
||||
if (!supported_pmu()) {
|
||||
pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
|
||||
|
@ -547,10 +547,10 @@ void __init init_hw_perf_counters(void)
|
|||
|
||||
pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
|
||||
|
||||
/* All sparc64 PMUs currently have 2 counters. But this simple
|
||||
* driver only supports one active counter at a time.
|
||||
/* All sparc64 PMUs currently have 2 events. But this simple
|
||||
* driver only supports one active event at a time.
|
||||
*/
|
||||
perf_max_counters = 1;
|
||||
perf_max_events = 1;
|
||||
|
||||
register_die_notifier(&perf_counter_nmi_notifier);
|
||||
register_die_notifier(&perf_event_nmi_notifier);
|
||||
}
|
|
@ -82,5 +82,5 @@ sys_call_table:
|
|||
/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
|
||||
/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
|
||||
/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
|
||||
/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open
|
||||
/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ sys_call_table32:
|
|||
/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate
|
||||
.word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
|
||||
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
|
||||
.word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_counter_open
|
||||
.word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
|
@ -158,4 +158,4 @@ sys_call_table:
|
|||
/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
|
||||
.word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
|
||||
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
|
||||
.word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open
|
||||
.word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open
|
||||
|
|
|
@ -24,7 +24,7 @@ config X86
|
|||
select HAVE_UNSTABLE_SCHED_CLOCK
|
||||
select HAVE_IDE
|
||||
select HAVE_OPROFILE
|
||||
select HAVE_PERF_COUNTERS if (!M386 && !M486)
|
||||
select HAVE_PERF_EVENTS if (!M386 && !M486)
|
||||
select HAVE_IOREMAP_PROT
|
||||
select HAVE_KPROBES
|
||||
select ARCH_WANT_OPTIONAL_GPIOLIB
|
||||
|
|
|
@ -831,5 +831,5 @@ ia32_sys_call_table:
|
|||
.quad compat_sys_preadv
|
||||
.quad compat_sys_pwritev
|
||||
.quad compat_sys_rt_tgsigqueueinfo /* 335 */
|
||||
.quad sys_perf_counter_open
|
||||
.quad sys_perf_event_open
|
||||
ia32_syscall_end:
|
||||
|
|
|
@ -49,7 +49,7 @@ BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
|
|||
BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
|
||||
BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
|
||||
|
||||
#ifdef CONFIG_PERF_COUNTERS
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR)
|
||||
#endif
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
#ifndef _ASM_X86_PERF_COUNTER_H
|
||||
#define _ASM_X86_PERF_COUNTER_H
|
||||
#ifndef _ASM_X86_PERF_EVENT_H
|
||||
#define _ASM_X86_PERF_EVENT_H
|
||||
|
||||
/*
|
||||
* Performance counter hw details:
|
||||
* Performance event hw details:
|
||||
*/
|
||||
|
||||
#define X86_PMC_MAX_GENERIC 8
|
||||
|
@ -43,7 +43,7 @@
|
|||
union cpuid10_eax {
|
||||
struct {
|
||||
unsigned int version_id:8;
|
||||
unsigned int num_counters:8;
|
||||
unsigned int num_events:8;
|
||||
unsigned int bit_width:8;
|
||||
unsigned int mask_length:8;
|
||||
} split;
|
||||
|
@ -52,7 +52,7 @@ union cpuid10_eax {
|
|||
|
||||
union cpuid10_edx {
|
||||
struct {
|
||||
unsigned int num_counters_fixed:4;
|
||||
unsigned int num_events_fixed:4;
|
||||
unsigned int reserved:28;
|
||||
} split;
|
||||
unsigned int full;
|
||||
|
@ -60,7 +60,7 @@ union cpuid10_edx {
|
|||
|
||||
|
||||
/*
|
||||
* Fixed-purpose performance counters:
|
||||
* Fixed-purpose performance events:
|
||||
*/
|
||||
|
||||
/*
|
||||
|
@ -87,22 +87,22 @@ union cpuid10_edx {
|
|||
/*
|
||||
* We model BTS tracing as another fixed-mode PMC.
|
||||
*
|
||||
* We choose a value in the middle of the fixed counter range, since lower
|
||||
* values are used by actual fixed counters and higher values are used
|
||||
* We choose a value in the middle of the fixed event range, since lower
|
||||
* values are used by actual fixed events and higher values are used
|
||||
* to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
|
||||
*/
|
||||
#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
|
||||
|
||||
|
||||
#ifdef CONFIG_PERF_COUNTERS
|
||||
extern void init_hw_perf_counters(void);
|
||||
extern void perf_counters_lapic_init(void);
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
extern void init_hw_perf_events(void);
|
||||
extern void perf_events_lapic_init(void);
|
||||
|
||||
#define PERF_COUNTER_INDEX_OFFSET 0
|
||||
#define PERF_EVENT_INDEX_OFFSET 0
|
||||
|
||||
#else
|
||||
static inline void init_hw_perf_counters(void) { }
|
||||
static inline void perf_counters_lapic_init(void) { }
|
||||
static inline void init_hw_perf_events(void) { }
|
||||
static inline void perf_events_lapic_init(void) { }
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_PERF_COUNTER_H */
|
||||
#endif /* _ASM_X86_PERF_EVENT_H */
|
|
@ -341,7 +341,7 @@
|
|||
#define __NR_preadv 333
|
||||
#define __NR_pwritev 334
|
||||
#define __NR_rt_tgsigqueueinfo 335
|
||||
#define __NR_perf_counter_open 336
|
||||
#define __NR_perf_event_open 336
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
|
|
|
@ -659,8 +659,8 @@ __SYSCALL(__NR_preadv, sys_preadv)
|
|||
__SYSCALL(__NR_pwritev, sys_pwritev)
|
||||
#define __NR_rt_tgsigqueueinfo 297
|
||||
__SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo)
|
||||
#define __NR_perf_counter_open 298
|
||||
__SYSCALL(__NR_perf_counter_open, sys_perf_counter_open)
|
||||
#define __NR_perf_event_open 298
|
||||
__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
|
||||
|
||||
#ifndef __NO_STUBS
|
||||
#define __ARCH_WANT_OLD_READDIR
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
* Mikael Pettersson : PM converted to driver model.
|
||||
*/
|
||||
|
||||
#include <linux/perf_counter.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/mc146818rtc.h>
|
||||
#include <linux/acpi_pmtmr.h>
|
||||
|
@ -35,7 +35,7 @@
|
|||
#include <linux/smp.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/perf_counter.h>
|
||||
#include <asm/perf_event.h>
|
||||
#include <asm/x86_init.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/atomic.h>
|
||||
|
@ -1189,7 +1189,7 @@ void __cpuinit setup_local_APIC(void)
|
|||
apic_write(APIC_ESR, 0);
|
||||
}
|
||||
#endif
|
||||
perf_counters_lapic_init();
|
||||
perf_events_lapic_init();
|
||||
|
||||
preempt_disable();
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
|
|||
obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
|
||||
obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
|
||||
|
||||
obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o
|
||||
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
|
||||
|
||||
obj-$(CONFIG_X86_MCE) += mcheck/
|
||||
obj-$(CONFIG_MTRR) += mtrr/
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
#include <linux/io.h>
|
||||
|
||||
#include <asm/stackprotector.h>
|
||||
#include <asm/perf_counter.h>
|
||||
#include <asm/perf_event.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/hypervisor.h>
|
||||
#include <asm/processor.h>
|
||||
|
@ -869,7 +869,7 @@ void __init identify_boot_cpu(void)
|
|||
#else
|
||||
vgetcpu_set_mode();
|
||||
#endif
|
||||
init_hw_perf_counters();
|
||||
init_hw_perf_events();
|
||||
}
|
||||
|
||||
void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -20,7 +20,7 @@
|
|||
#include <linux/kprobes.h>
|
||||
|
||||
#include <asm/apic.h>
|
||||
#include <asm/perf_counter.h>
|
||||
#include <asm/perf_event.h>
|
||||
|
||||
struct nmi_watchdog_ctlblk {
|
||||
unsigned int cccr_msr;
|
||||
|
|
|
@ -1021,7 +1021,7 @@ apicinterrupt ERROR_APIC_VECTOR \
|
|||
apicinterrupt SPURIOUS_APIC_VECTOR \
|
||||
spurious_interrupt smp_spurious_interrupt
|
||||
|
||||
#ifdef CONFIG_PERF_COUNTERS
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
apicinterrupt LOCAL_PENDING_VECTOR \
|
||||
perf_pending_interrupt smp_perf_pending_interrupt
|
||||
#endif
|
||||
|
|
|
@ -208,7 +208,7 @@ static void __init apic_intr_init(void)
|
|||
alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
|
||||
|
||||
/* Performance monitoring interrupts: */
|
||||
# ifdef CONFIG_PERF_COUNTERS
|
||||
# ifdef CONFIG_PERF_EVENTS
|
||||
alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt);
|
||||
# endif
|
||||
|
||||
|
|
|
@ -335,4 +335,4 @@ ENTRY(sys_call_table)
|
|||
.long sys_preadv
|
||||
.long sys_pwritev
|
||||
.long sys_rt_tgsigqueueinfo /* 335 */
|
||||
.long sys_perf_counter_open
|
||||
.long sys_perf_event_open
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
#include <linux/bootmem.h> /* max_low_pfn */
|
||||
#include <linux/kprobes.h> /* __kprobes, ... */
|
||||
#include <linux/mmiotrace.h> /* kmmio_handler, ... */
|
||||
#include <linux/perf_counter.h> /* perf_swcounter_event */
|
||||
#include <linux/perf_event.h> /* perf_sw_event */
|
||||
|
||||
#include <asm/traps.h> /* dotraplinkage, ... */
|
||||
#include <asm/pgalloc.h> /* pgd_*(), ... */
|
||||
|
@ -1017,7 +1017,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
|||
if (unlikely(error_code & PF_RSVD))
|
||||
pgtable_bad(regs, error_code, address);
|
||||
|
||||
perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
|
||||
|
||||
/*
|
||||
* If we're in an interrupt, have no user context or are running
|
||||
|
@ -1114,11 +1114,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
|||
|
||||
if (fault & VM_FAULT_MAJOR) {
|
||||
tsk->maj_flt++;
|
||||
perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
|
||||
regs, address);
|
||||
} else {
|
||||
tsk->min_flt++;
|
||||
perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
|
||||
regs, address);
|
||||
}
|
||||
|
||||
|
|
|
@ -234,11 +234,11 @@ static void arch_perfmon_setup_counters(void)
|
|||
if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 &&
|
||||
current_cpu_data.x86_model == 15) {
|
||||
eax.split.version_id = 2;
|
||||
eax.split.num_counters = 2;
|
||||
eax.split.num_events = 2;
|
||||
eax.split.bit_width = 40;
|
||||
}
|
||||
|
||||
num_counters = eax.split.num_counters;
|
||||
num_counters = eax.split.num_events;
|
||||
|
||||
op_arch_perfmon_spec.num_counters = num_counters;
|
||||
op_arch_perfmon_spec.num_controls = num_counters;
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
#define OP_X86_MODEL_H
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <asm/perf_counter.h>
|
||||
#include <asm/perf_event.h>
|
||||
|
||||
struct op_msr {
|
||||
unsigned long addr;
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#include <linux/proc_fs.h>
|
||||
#include <linux/nmi.h>
|
||||
#include <linux/quotaops.h>
|
||||
#include <linux/perf_counter.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/suspend.h>
|
||||
|
@ -252,7 +252,7 @@ static void sysrq_handle_showregs(int key, struct tty_struct *tty)
|
|||
struct pt_regs *regs = get_irq_regs();
|
||||
if (regs)
|
||||
show_regs(regs);
|
||||
perf_counter_print_debug();
|
||||
perf_event_print_debug();
|
||||
}
|
||||
static struct sysrq_key_op sysrq_showregs_op = {
|
||||
.handler = sysrq_handle_showregs,
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
#include <linux/string.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/perf_counter.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/key.h>
|
||||
|
@ -923,7 +923,7 @@ void set_task_comm(struct task_struct *tsk, char *buf)
|
|||
task_lock(tsk);
|
||||
strlcpy(tsk->comm, buf, sizeof(tsk->comm));
|
||||
task_unlock(tsk);
|
||||
perf_counter_comm(tsk);
|
||||
perf_event_comm(tsk);
|
||||
}
|
||||
|
||||
int flush_old_exec(struct linux_binprm * bprm)
|
||||
|
@ -997,7 +997,7 @@ int flush_old_exec(struct linux_binprm * bprm)
|
|||
* security domain:
|
||||
*/
|
||||
if (!get_dumpable(current->mm))
|
||||
perf_counter_exit_task(current);
|
||||
perf_event_exit_task(current);
|
||||
|
||||
/* An exec changes our domain. We are no longer part of the thread
|
||||
group */
|
||||
|
|
|
@ -620,8 +620,8 @@ __SYSCALL(__NR_move_pages, sys_move_pages)
|
|||
|
||||
#define __NR_rt_tgsigqueueinfo 240
|
||||
__SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo)
|
||||
#define __NR_perf_counter_open 241
|
||||
__SYSCALL(__NR_perf_counter_open, sys_perf_counter_open)
|
||||
#define __NR_perf_event_open 241
|
||||
__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
|
||||
|
||||
#undef __NR_syscalls
|
||||
#define __NR_syscalls 242
|
||||
|
|
|
@ -106,13 +106,13 @@ extern struct group_info init_groups;
|
|||
|
||||
extern struct cred init_cred;
|
||||
|
||||
#ifdef CONFIG_PERF_COUNTERS
|
||||
# define INIT_PERF_COUNTERS(tsk) \
|
||||
.perf_counter_mutex = \
|
||||
__MUTEX_INITIALIZER(tsk.perf_counter_mutex), \
|
||||
.perf_counter_list = LIST_HEAD_INIT(tsk.perf_counter_list),
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
# define INIT_PERF_EVENTS(tsk) \
|
||||
.perf_event_mutex = \
|
||||
__MUTEX_INITIALIZER(tsk.perf_event_mutex), \
|
||||
.perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
|
||||
#else
|
||||
# define INIT_PERF_COUNTERS(tsk)
|
||||
# define INIT_PERF_EVENTS(tsk)
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -178,7 +178,7 @@ extern struct cred init_cred;
|
|||
}, \
|
||||
.dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
|
||||
INIT_IDS \
|
||||
INIT_PERF_COUNTERS(tsk) \
|
||||
INIT_PERF_EVENTS(tsk) \
|
||||
INIT_TRACE_IRQFLAGS \
|
||||
INIT_LOCKDEP \
|
||||
INIT_FTRACE_GRAPH \
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
/*
|
||||
* Performance counters:
|
||||
* NOTE: this file will be removed in a future kernel release, it is
|
||||
* provided as a courtesy copy of user-space code that relies on the
|
||||
* old (pre-rename) symbols and constants.
|
||||
*
|
||||
* Performance events:
|
||||
*
|
||||
* Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
|
||||
* Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
|
||||
|
@ -131,19 +135,19 @@ enum perf_counter_sample_format {
|
|||
* as specified by attr.read_format:
|
||||
*
|
||||
* struct read_format {
|
||||
* { u64 value;
|
||||
* { u64 time_enabled; } && PERF_FORMAT_ENABLED
|
||||
* { u64 time_running; } && PERF_FORMAT_RUNNING
|
||||
* { u64 id; } && PERF_FORMAT_ID
|
||||
* } && !PERF_FORMAT_GROUP
|
||||
* { u64 value;
|
||||
* { u64 time_enabled; } && PERF_FORMAT_ENABLED
|
||||
* { u64 time_running; } && PERF_FORMAT_RUNNING
|
||||
* { u64 id; } && PERF_FORMAT_ID
|
||||
* } && !PERF_FORMAT_GROUP
|
||||
*
|
||||
* { u64 nr;
|
||||
* { u64 time_enabled; } && PERF_FORMAT_ENABLED
|
||||
* { u64 time_running; } && PERF_FORMAT_RUNNING
|
||||
* { u64 value;
|
||||
* { u64 id; } && PERF_FORMAT_ID
|
||||
* } cntr[nr];
|
||||
* } && PERF_FORMAT_GROUP
|
||||
* { u64 nr;
|
||||
* { u64 time_enabled; } && PERF_FORMAT_ENABLED
|
||||
* { u64 time_running; } && PERF_FORMAT_RUNNING
|
||||
* { u64 value;
|
||||
* { u64 id; } && PERF_FORMAT_ID
|
||||
* } cntr[nr];
|
||||
* } && PERF_FORMAT_GROUP
|
||||
* };
|
||||
*/
|
||||
enum perf_counter_read_format {
|
||||
|
@ -314,9 +318,9 @@ enum perf_event_type {
|
|||
|
||||
/*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
* u64 id;
|
||||
* u64 lost;
|
||||
* struct perf_event_header header;
|
||||
* u64 id;
|
||||
* u64 lost;
|
||||
* };
|
||||
*/
|
||||
PERF_EVENT_LOST = 2,
|
||||
|
@ -364,10 +368,10 @@ enum perf_event_type {
|
|||
|
||||
/*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
* u32 pid, tid;
|
||||
* struct perf_event_header header;
|
||||
* u32 pid, tid;
|
||||
*
|
||||
* struct read_format values;
|
||||
* struct read_format values;
|
||||
* };
|
||||
*/
|
||||
PERF_EVENT_READ = 8,
|
||||
|
@ -383,23 +387,23 @@ enum perf_event_type {
|
|||
* { u64 id; } && PERF_SAMPLE_ID
|
||||
* { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
|
||||
* { u32 cpu, res; } && PERF_SAMPLE_CPU
|
||||
* { u64 period; } && PERF_SAMPLE_PERIOD
|
||||
* { u64 period; } && PERF_SAMPLE_PERIOD
|
||||
*
|
||||
* { struct read_format values; } && PERF_SAMPLE_READ
|
||||
*
|
||||
* { u64 nr,
|
||||
* u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
|
||||
*
|
||||
* #
|
||||
* # The RAW record below is opaque data wrt the ABI
|
||||
* #
|
||||
* # That is, the ABI doesn't make any promises wrt to
|
||||
* # the stability of its content, it may vary depending
|
||||
* # on event, hardware, kernel version and phase of
|
||||
* # the moon.
|
||||
* #
|
||||
* # In other words, PERF_SAMPLE_RAW contents are not an ABI.
|
||||
* #
|
||||
* #
|
||||
* # The RAW record below is opaque data wrt the ABI
|
||||
* #
|
||||
* # That is, the ABI doesn't make any promises wrt to
|
||||
* # the stability of its content, it may vary depending
|
||||
* # on event, hardware, kernel version and phase of
|
||||
* # the moon.
|
||||
* #
|
||||
* # In other words, PERF_SAMPLE_RAW contents are not an ABI.
|
||||
* #
|
||||
*
|
||||
* { u32 size;
|
||||
* char data[size];}&& PERF_SAMPLE_RAW
|
||||
|
@ -422,437 +426,16 @@ enum perf_callchain_context {
|
|||
PERF_CONTEXT_MAX = (__u64)-4095,
|
||||
};
|
||||
|
||||
#define PERF_FLAG_FD_NO_GROUP (1U << 0)
|
||||
#define PERF_FLAG_FD_OUTPUT (1U << 1)
|
||||
|
||||
#ifdef __KERNEL__
|
||||
/*
|
||||
* Kernel-internal data types and definitions:
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_PERF_COUNTERS
|
||||
# include <asm/perf_counter.h>
|
||||
#endif
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/pid_namespace.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
#define PERF_MAX_STACK_DEPTH 255
|
||||
|
||||
struct perf_callchain_entry {
|
||||
__u64 nr;
|
||||
__u64 ip[PERF_MAX_STACK_DEPTH];
|
||||
};
|
||||
|
||||
struct perf_raw_record {
|
||||
u32 size;
|
||||
void *data;
|
||||
};
|
||||
|
||||
struct task_struct;
|
||||
|
||||
/**
|
||||
* struct hw_perf_counter - performance counter hardware details:
|
||||
*/
|
||||
struct hw_perf_counter {
|
||||
#ifdef CONFIG_PERF_COUNTERS
|
||||
union {
|
||||
struct { /* hardware */
|
||||
u64 config;
|
||||
unsigned long config_base;
|
||||
unsigned long counter_base;
|
||||
int idx;
|
||||
};
|
||||
union { /* software */
|
||||
atomic64_t count;
|
||||
struct hrtimer hrtimer;
|
||||
};
|
||||
};
|
||||
atomic64_t prev_count;
|
||||
u64 sample_period;
|
||||
u64 last_period;
|
||||
atomic64_t period_left;
|
||||
u64 interrupts;
|
||||
|
||||
u64 freq_count;
|
||||
u64 freq_interrupts;
|
||||
u64 freq_stamp;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct perf_counter;
|
||||
|
||||
/**
|
||||
* struct pmu - generic performance monitoring unit
|
||||
*/
|
||||
struct pmu {
|
||||
int (*enable) (struct perf_counter *counter);
|
||||
void (*disable) (struct perf_counter *counter);
|
||||
void (*read) (struct perf_counter *counter);
|
||||
void (*unthrottle) (struct perf_counter *counter);
|
||||
};
|
||||
|
||||
/**
|
||||
* enum perf_counter_active_state - the states of a counter
|
||||
*/
|
||||
enum perf_counter_active_state {
|
||||
PERF_COUNTER_STATE_ERROR = -2,
|
||||
PERF_COUNTER_STATE_OFF = -1,
|
||||
PERF_COUNTER_STATE_INACTIVE = 0,
|
||||
PERF_COUNTER_STATE_ACTIVE = 1,
|
||||
};
|
||||
|
||||
struct file;
|
||||
|
||||
struct perf_mmap_data {
|
||||
struct rcu_head rcu_head;
|
||||
int nr_pages; /* nr of data pages */
|
||||
int writable; /* are we writable */
|
||||
int nr_locked; /* nr pages mlocked */
|
||||
|
||||
atomic_t poll; /* POLL_ for wakeups */
|
||||
atomic_t events; /* event limit */
|
||||
|
||||
atomic_long_t head; /* write position */
|
||||
atomic_long_t done_head; /* completed head */
|
||||
|
||||
atomic_t lock; /* concurrent writes */
|
||||
atomic_t wakeup; /* needs a wakeup */
|
||||
atomic_t lost; /* nr records lost */
|
||||
|
||||
long watermark; /* wakeup watermark */
|
||||
|
||||
struct perf_counter_mmap_page *user_page;
|
||||
void *data_pages[0];
|
||||
};
|
||||
|
||||
struct perf_pending_entry {
|
||||
struct perf_pending_entry *next;
|
||||
void (*func)(struct perf_pending_entry *);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct perf_counter - performance counter kernel representation:
|
||||
*/
|
||||
struct perf_counter {
|
||||
#ifdef CONFIG_PERF_COUNTERS
|
||||
struct list_head list_entry;
|
||||
struct list_head event_entry;
|
||||
struct list_head sibling_list;
|
||||
int nr_siblings;
|
||||
struct perf_counter *group_leader;
|
||||
struct perf_counter *output;
|
||||
const struct pmu *pmu;
|
||||
|
||||
enum perf_counter_active_state state;
|
||||
atomic64_t count;
|
||||
|
||||
/*
|
||||
* These are the total time in nanoseconds that the counter
|
||||
* has been enabled (i.e. eligible to run, and the task has
|
||||
* been scheduled in, if this is a per-task counter)
|
||||
* and running (scheduled onto the CPU), respectively.
|
||||
*
|
||||
* They are computed from tstamp_enabled, tstamp_running and
|
||||
* tstamp_stopped when the counter is in INACTIVE or ACTIVE state.
|
||||
*/
|
||||
u64 total_time_enabled;
|
||||
u64 total_time_running;
|
||||
|
||||
/*
|
||||
* These are timestamps used for computing total_time_enabled
|
||||
* and total_time_running when the counter is in INACTIVE or
|
||||
* ACTIVE state, measured in nanoseconds from an arbitrary point
|
||||
* in time.
|
||||
* tstamp_enabled: the notional time when the counter was enabled
|
||||
* tstamp_running: the notional time when the counter was scheduled on
|
||||
* tstamp_stopped: in INACTIVE state, the notional time when the
|
||||
* counter was scheduled off.
|
||||
*/
|
||||
u64 tstamp_enabled;
|
||||
u64 tstamp_running;
|
||||
u64 tstamp_stopped;
|
||||
|
||||
struct perf_counter_attr attr;
|
||||
struct hw_perf_counter hw;
|
||||
|
||||
struct perf_counter_context *ctx;
|
||||
struct file *filp;
|
||||
|
||||
/*
|
||||
* These accumulate total time (in nanoseconds) that children
|
||||
* counters have been enabled and running, respectively.
|
||||
*/
|
||||
atomic64_t child_total_time_enabled;
|
||||
atomic64_t child_total_time_running;
|
||||
|
||||
/*
|
||||
* Protect attach/detach and child_list:
|
||||
*/
|
||||
struct mutex child_mutex;
|
||||
struct list_head child_list;
|
||||
struct perf_counter *parent;
|
||||
|
||||
int oncpu;
|
||||
int cpu;
|
||||
|
||||
struct list_head owner_entry;
|
||||
struct task_struct *owner;
|
||||
|
||||
/* mmap bits */
|
||||
struct mutex mmap_mutex;
|
||||
atomic_t mmap_count;
|
||||
struct perf_mmap_data *data;
|
||||
|
||||
/* poll related */
|
||||
wait_queue_head_t waitq;
|
||||
struct fasync_struct *fasync;
|
||||
|
||||
/* delayed work for NMIs and such */
|
||||
int pending_wakeup;
|
||||
int pending_kill;
|
||||
int pending_disable;
|
||||
struct perf_pending_entry pending;
|
||||
|
||||
atomic_t event_limit;
|
||||
|
||||
void (*destroy)(struct perf_counter *);
|
||||
struct rcu_head rcu_head;
|
||||
|
||||
struct pid_namespace *ns;
|
||||
u64 id;
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
* struct perf_counter_context - counter context structure
|
||||
*
|
||||
* Used as a container for task counters and CPU counters as well:
|
||||
*/
|
||||
struct perf_counter_context {
|
||||
/*
|
||||
* Protect the states of the counters in the list,
|
||||
* nr_active, and the list:
|
||||
*/
|
||||
spinlock_t lock;
|
||||
/*
|
||||
* Protect the list of counters. Locking either mutex or lock
|
||||
* is sufficient to ensure the list doesn't change; to change
|
||||
* the list you need to lock both the mutex and the spinlock.
|
||||
*/
|
||||
struct mutex mutex;
|
||||
|
||||
struct list_head counter_list;
|
||||
struct list_head event_list;
|
||||
int nr_counters;
|
||||
int nr_active;
|
||||
int is_active;
|
||||
int nr_stat;
|
||||
atomic_t refcount;
|
||||
struct task_struct *task;
|
||||
|
||||
/*
|
||||
* Context clock, runs when context enabled.
|
||||
*/
|
||||
u64 time;
|
||||
u64 timestamp;
|
||||
|
||||
/*
|
||||
* These fields let us detect when two contexts have both
|
||||
* been cloned (inherited) from a common ancestor.
|
||||
*/
|
||||
struct perf_counter_context *parent_ctx;
|
||||
u64 parent_gen;
|
||||
u64 generation;
|
||||
int pin_count;
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct perf_counter_cpu_context - per cpu counter context structure
|
||||
*/
|
||||
struct perf_cpu_context {
|
||||
struct perf_counter_context ctx;
|
||||
struct perf_counter_context *task_ctx;
|
||||
int active_oncpu;
|
||||
int max_pertask;
|
||||
int exclusive;
|
||||
|
||||
/*
|
||||
* Recursion avoidance:
|
||||
*
|
||||
* task, softirq, irq, nmi context
|
||||
*/
|
||||
int recursion[4];
|
||||
};
|
||||
|
||||
struct perf_output_handle {
|
||||
struct perf_counter *counter;
|
||||
struct perf_mmap_data *data;
|
||||
unsigned long head;
|
||||
unsigned long offset;
|
||||
int nmi;
|
||||
int sample;
|
||||
int locked;
|
||||
unsigned long flags;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PERF_COUNTERS
|
||||
#define PERF_FLAG_FD_NO_GROUP (1U << 0)
|
||||
#define PERF_FLAG_FD_OUTPUT (1U << 1)
|
||||
|
||||
/*
|
||||
* Set by architecture code:
|
||||
* In case some app still references the old symbols:
|
||||
*/
|
||||
extern int perf_max_counters;
|
||||
|
||||
extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter);
|
||||
#define __NR_perf_counter_open __NR_perf_event_open
|
||||
|
||||
extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
|
||||
extern void perf_counter_task_sched_out(struct task_struct *task,
|
||||
struct task_struct *next, int cpu);
|
||||
extern void perf_counter_task_tick(struct task_struct *task, int cpu);
|
||||
extern int perf_counter_init_task(struct task_struct *child);
|
||||
extern void perf_counter_exit_task(struct task_struct *child);
|
||||
extern void perf_counter_free_task(struct task_struct *task);
|
||||
extern void set_perf_counter_pending(void);
|
||||
extern void perf_counter_do_pending(void);
|
||||
extern void perf_counter_print_debug(void);
|
||||
extern void __perf_disable(void);
|
||||
extern bool __perf_enable(void);
|
||||
extern void perf_disable(void);
|
||||
extern void perf_enable(void);
|
||||
extern int perf_counter_task_disable(void);
|
||||
extern int perf_counter_task_enable(void);
|
||||
extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
|
||||
struct perf_cpu_context *cpuctx,
|
||||
struct perf_counter_context *ctx, int cpu);
|
||||
extern void perf_counter_update_userpage(struct perf_counter *counter);
|
||||
#define PR_TASK_PERF_COUNTERS_DISABLE PR_TASK_PERF_EVENTS_DISABLE
|
||||
#define PR_TASK_PERF_COUNTERS_ENABLE PR_TASK_PERF_EVENTS_ENABLE
|
||||
|
||||
struct perf_sample_data {
|
||||
u64 type;
|
||||
|
||||
u64 ip;
|
||||
struct {
|
||||
u32 pid;
|
||||
u32 tid;
|
||||
} tid_entry;
|
||||
u64 time;
|
||||
u64 addr;
|
||||
u64 id;
|
||||
u64 stream_id;
|
||||
struct {
|
||||
u32 cpu;
|
||||
u32 reserved;
|
||||
} cpu_entry;
|
||||
u64 period;
|
||||
struct perf_callchain_entry *callchain;
|
||||
struct perf_raw_record *raw;
|
||||
};
|
||||
|
||||
extern void perf_output_sample(struct perf_output_handle *handle,
|
||||
struct perf_event_header *header,
|
||||
struct perf_sample_data *data,
|
||||
struct perf_counter *counter);
|
||||
extern void perf_prepare_sample(struct perf_event_header *header,
|
||||
struct perf_sample_data *data,
|
||||
struct perf_counter *counter,
|
||||
struct pt_regs *regs);
|
||||
|
||||
extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs);
|
||||
|
||||
/*
|
||||
* Return 1 for a software counter, 0 for a hardware counter
|
||||
*/
|
||||
static inline int is_software_counter(struct perf_counter *counter)
|
||||
{
|
||||
return (counter->attr.type != PERF_TYPE_RAW) &&
|
||||
(counter->attr.type != PERF_TYPE_HARDWARE) &&
|
||||
(counter->attr.type != PERF_TYPE_HW_CACHE);
|
||||
}
|
||||
|
||||
extern atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];
|
||||
|
||||
extern void __perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
|
||||
|
||||
static inline void
|
||||
perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
|
||||
{
|
||||
if (atomic_read(&perf_swcounter_enabled[event]))
|
||||
__perf_swcounter_event(event, nr, nmi, regs, addr);
|
||||
}
|
||||
|
||||
extern void __perf_counter_mmap(struct vm_area_struct *vma);
|
||||
|
||||
static inline void perf_counter_mmap(struct vm_area_struct *vma)
|
||||
{
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
__perf_counter_mmap(vma);
|
||||
}
|
||||
|
||||
extern void perf_counter_comm(struct task_struct *tsk);
|
||||
extern void perf_counter_fork(struct task_struct *tsk);
|
||||
|
||||
extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
|
||||
|
||||
extern int sysctl_perf_counter_paranoid;
|
||||
extern int sysctl_perf_counter_mlock;
|
||||
extern int sysctl_perf_counter_sample_rate;
|
||||
|
||||
extern void perf_counter_init(void);
|
||||
extern void perf_tpcounter_event(int event_id, u64 addr, u64 count,
|
||||
void *record, int entry_size);
|
||||
|
||||
#ifndef perf_misc_flags
|
||||
#define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \
|
||||
PERF_EVENT_MISC_KERNEL)
|
||||
#define perf_instruction_pointer(regs) instruction_pointer(regs)
|
||||
#endif
|
||||
|
||||
extern int perf_output_begin(struct perf_output_handle *handle,
|
||||
struct perf_counter *counter, unsigned int size,
|
||||
int nmi, int sample);
|
||||
extern void perf_output_end(struct perf_output_handle *handle);
|
||||
extern void perf_output_copy(struct perf_output_handle *handle,
|
||||
const void *buf, unsigned int len);
|
||||
#else
|
||||
static inline void
|
||||
perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
|
||||
static inline void
|
||||
perf_counter_task_sched_out(struct task_struct *task,
|
||||
struct task_struct *next, int cpu) { }
|
||||
static inline void
|
||||
perf_counter_task_tick(struct task_struct *task, int cpu) { }
|
||||
static inline int perf_counter_init_task(struct task_struct *child) { return 0; }
|
||||
static inline void perf_counter_exit_task(struct task_struct *child) { }
|
||||
static inline void perf_counter_free_task(struct task_struct *task) { }
|
||||
static inline void perf_counter_do_pending(void) { }
|
||||
static inline void perf_counter_print_debug(void) { }
|
||||
static inline void perf_disable(void) { }
|
||||
static inline void perf_enable(void) { }
|
||||
static inline int perf_counter_task_disable(void) { return -EINVAL; }
|
||||
static inline int perf_counter_task_enable(void) { return -EINVAL; }
|
||||
|
||||
static inline void
|
||||
perf_swcounter_event(u32 event, u64 nr, int nmi,
|
||||
struct pt_regs *regs, u64 addr) { }
|
||||
|
||||
static inline void perf_counter_mmap(struct vm_area_struct *vma) { }
|
||||
static inline void perf_counter_comm(struct task_struct *tsk) { }
|
||||
static inline void perf_counter_fork(struct task_struct *tsk) { }
|
||||
static inline void perf_counter_init(void) { }
|
||||
|
||||
#endif
|
||||
|
||||
#define perf_output_put(handle, x) \
|
||||
perf_output_copy((handle), &(x), sizeof(x))
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_PERF_COUNTER_H */
|
||||
|
|
858
include/linux/perf_event.h
Normal file
858
include/linux/perf_event.h
Normal file
|
@ -0,0 +1,858 @@
|
|||
/*
|
||||
* Performance events:
|
||||
*
|
||||
* Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
|
||||
* Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
|
||||
*
|
||||
* Data type definitions, declarations, prototypes.
|
||||
*
|
||||
* Started by: Thomas Gleixner and Ingo Molnar
|
||||
*
|
||||
* For licencing details see kernel-base/COPYING
|
||||
*/
|
||||
#ifndef _LINUX_PERF_EVENT_H
|
||||
#define _LINUX_PERF_EVENT_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/ioctl.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
/*
|
||||
* User-space ABI bits:
|
||||
*/
|
||||
|
||||
/*
|
||||
* attr.type
|
||||
*/
|
||||
enum perf_type_id {
|
||||
PERF_TYPE_HARDWARE = 0,
|
||||
PERF_TYPE_SOFTWARE = 1,
|
||||
PERF_TYPE_TRACEPOINT = 2,
|
||||
PERF_TYPE_HW_CACHE = 3,
|
||||
PERF_TYPE_RAW = 4,
|
||||
|
||||
PERF_TYPE_MAX, /* non-ABI */
|
||||
};
|
||||
|
||||
/*
|
||||
* Generalized performance event event_id types, used by the
|
||||
* attr.event_id parameter of the sys_perf_event_open()
|
||||
* syscall:
|
||||
*/
|
||||
enum perf_hw_id {
|
||||
/*
|
||||
* Common hardware events, generalized by the kernel:
|
||||
*/
|
||||
PERF_COUNT_HW_CPU_CYCLES = 0,
|
||||
PERF_COUNT_HW_INSTRUCTIONS = 1,
|
||||
PERF_COUNT_HW_CACHE_REFERENCES = 2,
|
||||
PERF_COUNT_HW_CACHE_MISSES = 3,
|
||||
PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
|
||||
PERF_COUNT_HW_BRANCH_MISSES = 5,
|
||||
PERF_COUNT_HW_BUS_CYCLES = 6,
|
||||
|
||||
PERF_COUNT_HW_MAX, /* non-ABI */
|
||||
};
|
||||
|
||||
/*
|
||||
* Generalized hardware cache events:
|
||||
*
|
||||
* { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
|
||||
* { read, write, prefetch } x
|
||||
* { accesses, misses }
|
||||
*/
|
||||
enum perf_hw_cache_id {
|
||||
PERF_COUNT_HW_CACHE_L1D = 0,
|
||||
PERF_COUNT_HW_CACHE_L1I = 1,
|
||||
PERF_COUNT_HW_CACHE_LL = 2,
|
||||
PERF_COUNT_HW_CACHE_DTLB = 3,
|
||||
PERF_COUNT_HW_CACHE_ITLB = 4,
|
||||
PERF_COUNT_HW_CACHE_BPU = 5,
|
||||
|
||||
PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
|
||||
};
|
||||
|
||||
enum perf_hw_cache_op_id {
|
||||
PERF_COUNT_HW_CACHE_OP_READ = 0,
|
||||
PERF_COUNT_HW_CACHE_OP_WRITE = 1,
|
||||
PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
|
||||
|
||||
PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
|
||||
};
|
||||
|
||||
enum perf_hw_cache_op_result_id {
|
||||
PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
|
||||
PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
|
||||
|
||||
PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
|
||||
};
|
||||
|
||||
/*
|
||||
* Special "software" events provided by the kernel, even if the hardware
|
||||
* does not support performance events. These events measure various
|
||||
* physical and sw events of the kernel (and allow the profiling of them as
|
||||
* well):
|
||||
*/
|
||||
enum perf_sw_ids {
|
||||
PERF_COUNT_SW_CPU_CLOCK = 0,
|
||||
PERF_COUNT_SW_TASK_CLOCK = 1,
|
||||
PERF_COUNT_SW_PAGE_FAULTS = 2,
|
||||
PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
|
||||
PERF_COUNT_SW_CPU_MIGRATIONS = 4,
|
||||
PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
|
||||
PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
|
||||
|
||||
PERF_COUNT_SW_MAX, /* non-ABI */
|
||||
};
|
||||
|
||||
/*
|
||||
* Bits that can be set in attr.sample_type to request information
|
||||
* in the overflow packets.
|
||||
*/
|
||||
enum perf_event_sample_format {
|
||||
PERF_SAMPLE_IP = 1U << 0,
|
||||
PERF_SAMPLE_TID = 1U << 1,
|
||||
PERF_SAMPLE_TIME = 1U << 2,
|
||||
PERF_SAMPLE_ADDR = 1U << 3,
|
||||
PERF_SAMPLE_READ = 1U << 4,
|
||||
PERF_SAMPLE_CALLCHAIN = 1U << 5,
|
||||
PERF_SAMPLE_ID = 1U << 6,
|
||||
PERF_SAMPLE_CPU = 1U << 7,
|
||||
PERF_SAMPLE_PERIOD = 1U << 8,
|
||||
PERF_SAMPLE_STREAM_ID = 1U << 9,
|
||||
PERF_SAMPLE_RAW = 1U << 10,
|
||||
|
||||
PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
|
||||
};
|
||||
|
||||
/*
|
||||
* The format of the data returned by read() on a perf event fd,
|
||||
* as specified by attr.read_format:
|
||||
*
|
||||
* struct read_format {
|
||||
* { u64 value;
|
||||
* { u64 time_enabled; } && PERF_FORMAT_ENABLED
|
||||
* { u64 time_running; } && PERF_FORMAT_RUNNING
|
||||
* { u64 id; } && PERF_FORMAT_ID
|
||||
* } && !PERF_FORMAT_GROUP
|
||||
*
|
||||
* { u64 nr;
|
||||
* { u64 time_enabled; } && PERF_FORMAT_ENABLED
|
||||
* { u64 time_running; } && PERF_FORMAT_RUNNING
|
||||
* { u64 value;
|
||||
* { u64 id; } && PERF_FORMAT_ID
|
||||
* } cntr[nr];
|
||||
* } && PERF_FORMAT_GROUP
|
||||
* };
|
||||
*/
|
||||
enum perf_event_read_format {
|
||||
PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
|
||||
PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
|
||||
PERF_FORMAT_ID = 1U << 2,
|
||||
PERF_FORMAT_GROUP = 1U << 3,
|
||||
|
||||
PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
|
||||
};
|
||||
|
||||
#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
|
||||
|
||||
/*
|
||||
* Hardware event_id to monitor via a performance monitoring event:
|
||||
*/
|
||||
struct perf_event_attr {
|
||||
|
||||
/*
|
||||
* Major type: hardware/software/tracepoint/etc.
|
||||
*/
|
||||
__u32 type;
|
||||
|
||||
/*
|
||||
* Size of the attr structure, for fwd/bwd compat.
|
||||
*/
|
||||
__u32 size;
|
||||
|
||||
/*
|
||||
* Type specific configuration information.
|
||||
*/
|
||||
__u64 config;
|
||||
|
||||
union {
|
||||
__u64 sample_period;
|
||||
__u64 sample_freq;
|
||||
};
|
||||
|
||||
__u64 sample_type;
|
||||
__u64 read_format;
|
||||
|
||||
__u64 disabled : 1, /* off by default */
|
||||
inherit : 1, /* children inherit it */
|
||||
pinned : 1, /* must always be on PMU */
|
||||
exclusive : 1, /* only group on PMU */
|
||||
exclude_user : 1, /* don't count user */
|
||||
exclude_kernel : 1, /* ditto kernel */
|
||||
exclude_hv : 1, /* ditto hypervisor */
|
||||
exclude_idle : 1, /* don't count when idle */
|
||||
mmap : 1, /* include mmap data */
|
||||
comm : 1, /* include comm data */
|
||||
freq : 1, /* use freq, not period */
|
||||
inherit_stat : 1, /* per task counts */
|
||||
enable_on_exec : 1, /* next exec enables */
|
||||
task : 1, /* trace fork/exit */
|
||||
watermark : 1, /* wakeup_watermark */
|
||||
|
||||
__reserved_1 : 49;
|
||||
|
||||
union {
|
||||
__u32 wakeup_events; /* wakeup every n events */
|
||||
__u32 wakeup_watermark; /* bytes before wakeup */
|
||||
};
|
||||
__u32 __reserved_2;
|
||||
|
||||
__u64 __reserved_3;
|
||||
};
|
||||
|
||||
/*
|
||||
* Ioctls that can be done on a perf event fd:
|
||||
*/
|
||||
#define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
|
||||
#define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
|
||||
#define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
|
||||
#define PERF_EVENT_IOC_RESET _IO ('$', 3)
|
||||
#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64)
|
||||
#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
|
||||
|
||||
enum perf_event_ioc_flags {
|
||||
PERF_IOC_FLAG_GROUP = 1U << 0,
|
||||
};
|
||||
|
||||
/*
|
||||
* Structure of the page that can be mapped via mmap
|
||||
*/
|
||||
struct perf_event_mmap_page {
|
||||
__u32 version; /* version number of this structure */
|
||||
__u32 compat_version; /* lowest version this is compat with */
|
||||
|
||||
/*
|
||||
* Bits needed to read the hw events in user-space.
|
||||
*
|
||||
* u32 seq;
|
||||
* s64 count;
|
||||
*
|
||||
* do {
|
||||
* seq = pc->lock;
|
||||
*
|
||||
* barrier()
|
||||
* if (pc->index) {
|
||||
* count = pmc_read(pc->index - 1);
|
||||
* count += pc->offset;
|
||||
* } else
|
||||
* goto regular_read;
|
||||
*
|
||||
* barrier();
|
||||
* } while (pc->lock != seq);
|
||||
*
|
||||
* NOTE: for obvious reason this only works on self-monitoring
|
||||
* processes.
|
||||
*/
|
||||
__u32 lock; /* seqlock for synchronization */
|
||||
__u32 index; /* hardware event identifier */
|
||||
__s64 offset; /* add to hardware event value */
|
||||
__u64 time_enabled; /* time event active */
|
||||
__u64 time_running; /* time event on cpu */
|
||||
|
||||
/*
|
||||
* Hole for extension of the self monitor capabilities
|
||||
*/
|
||||
|
||||
__u64 __reserved[123]; /* align to 1k */
|
||||
|
||||
/*
|
||||
* Control data for the mmap() data buffer.
|
||||
*
|
||||
* User-space reading the @data_head value should issue an rmb(), on
|
||||
* SMP capable platforms, after reading this value -- see
|
||||
* perf_event_wakeup().
|
||||
*
|
||||
* When the mapping is PROT_WRITE the @data_tail value should be
|
||||
* written by userspace to reflect the last read data. In this case
|
||||
* the kernel will not over-write unread data.
|
||||
*/
|
||||
__u64 data_head; /* head in the data section */
|
||||
__u64 data_tail; /* user-space written tail */
|
||||
};
|
||||
|
||||
#define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0)
|
||||
#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
|
||||
#define PERF_RECORD_MISC_KERNEL (1 << 0)
|
||||
#define PERF_RECORD_MISC_USER (2 << 0)
|
||||
#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
|
||||
|
||||
struct perf_event_header {
|
||||
__u32 type;
|
||||
__u16 misc;
|
||||
__u16 size;
|
||||
};
|
||||
|
||||
enum perf_event_type {
|
||||
|
||||
/*
|
||||
* The MMAP events record the PROT_EXEC mappings so that we can
|
||||
* correlate userspace IPs to code. They have the following structure:
|
||||
*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
*
|
||||
* u32 pid, tid;
|
||||
* u64 addr;
|
||||
* u64 len;
|
||||
* u64 pgoff;
|
||||
* char filename[];
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_MMAP = 1,
|
||||
|
||||
/*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
* u64 id;
|
||||
* u64 lost;
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_LOST = 2,
|
||||
|
||||
/*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
*
|
||||
* u32 pid, tid;
|
||||
* char comm[];
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_COMM = 3,
|
||||
|
||||
/*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
* u32 pid, ppid;
|
||||
* u32 tid, ptid;
|
||||
* u64 time;
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_EXIT = 4,
|
||||
|
||||
/*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
* u64 time;
|
||||
* u64 id;
|
||||
* u64 stream_id;
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_THROTTLE = 5,
|
||||
PERF_RECORD_UNTHROTTLE = 6,
|
||||
|
||||
/*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
* u32 pid, ppid;
|
||||
* u32 tid, ptid;
|
||||
* { u64 time; } && PERF_SAMPLE_TIME
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_FORK = 7,
|
||||
|
||||
/*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
* u32 pid, tid;
|
||||
*
|
||||
* struct read_format values;
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_READ = 8,
|
||||
|
||||
/*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
*
|
||||
* { u64 ip; } && PERF_SAMPLE_IP
|
||||
* { u32 pid, tid; } && PERF_SAMPLE_TID
|
||||
* { u64 time; } && PERF_SAMPLE_TIME
|
||||
* { u64 addr; } && PERF_SAMPLE_ADDR
|
||||
* { u64 id; } && PERF_SAMPLE_ID
|
||||
* { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
|
||||
* { u32 cpu, res; } && PERF_SAMPLE_CPU
|
||||
* { u64 period; } && PERF_SAMPLE_PERIOD
|
||||
*
|
||||
* { struct read_format values; } && PERF_SAMPLE_READ
|
||||
*
|
||||
* { u64 nr,
|
||||
* u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
|
||||
*
|
||||
* #
|
||||
* # The RAW record below is opaque data wrt the ABI
|
||||
* #
|
||||
* # That is, the ABI doesn't make any promises wrt to
|
||||
* # the stability of its content, it may vary depending
|
||||
* # on event, hardware, kernel version and phase of
|
||||
* # the moon.
|
||||
* #
|
||||
* # In other words, PERF_SAMPLE_RAW contents are not an ABI.
|
||||
* #
|
||||
*
|
||||
* { u32 size;
|
||||
* char data[size];}&& PERF_SAMPLE_RAW
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_SAMPLE = 9,
|
||||
|
||||
PERF_RECORD_MAX, /* non-ABI */
|
||||
};
|
||||
|
||||
enum perf_callchain_context {
|
||||
PERF_CONTEXT_HV = (__u64)-32,
|
||||
PERF_CONTEXT_KERNEL = (__u64)-128,
|
||||
PERF_CONTEXT_USER = (__u64)-512,
|
||||
|
||||
PERF_CONTEXT_GUEST = (__u64)-2048,
|
||||
PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
|
||||
PERF_CONTEXT_GUEST_USER = (__u64)-2560,
|
||||
|
||||
PERF_CONTEXT_MAX = (__u64)-4095,
|
||||
};
|
||||
|
||||
#define PERF_FLAG_FD_NO_GROUP (1U << 0)
|
||||
#define PERF_FLAG_FD_OUTPUT (1U << 1)
|
||||
|
||||
#ifdef __KERNEL__
|
||||
/*
|
||||
* Kernel-internal data types and definitions:
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
# include <asm/perf_event.h>
|
||||
#endif
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/pid_namespace.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
#define PERF_MAX_STACK_DEPTH 255
|
||||
|
||||
struct perf_callchain_entry {
|
||||
__u64 nr;
|
||||
__u64 ip[PERF_MAX_STACK_DEPTH];
|
||||
};
|
||||
|
||||
struct perf_raw_record {
|
||||
u32 size;
|
||||
void *data;
|
||||
};
|
||||
|
||||
struct task_struct;
|
||||
|
||||
/**
|
||||
* struct hw_perf_event - performance event hardware details:
|
||||
*/
|
||||
struct hw_perf_event {
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
union {
|
||||
struct { /* hardware */
|
||||
u64 config;
|
||||
unsigned long config_base;
|
||||
unsigned long event_base;
|
||||
int idx;
|
||||
};
|
||||
union { /* software */
|
||||
atomic64_t count;
|
||||
struct hrtimer hrtimer;
|
||||
};
|
||||
};
|
||||
atomic64_t prev_count;
|
||||
u64 sample_period;
|
||||
u64 last_period;
|
||||
atomic64_t period_left;
|
||||
u64 interrupts;
|
||||
|
||||
u64 freq_count;
|
||||
u64 freq_interrupts;
|
||||
u64 freq_stamp;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct perf_event;
|
||||
|
||||
/**
|
||||
* struct pmu - generic performance monitoring unit
|
||||
*/
|
||||
struct pmu {
|
||||
int (*enable) (struct perf_event *event);
|
||||
void (*disable) (struct perf_event *event);
|
||||
void (*read) (struct perf_event *event);
|
||||
void (*unthrottle) (struct perf_event *event);
|
||||
};
|
||||
|
||||
/**
|
||||
* enum perf_event_active_state - the states of a event
|
||||
*/
|
||||
enum perf_event_active_state {
|
||||
PERF_EVENT_STATE_ERROR = -2,
|
||||
PERF_EVENT_STATE_OFF = -1,
|
||||
PERF_EVENT_STATE_INACTIVE = 0,
|
||||
PERF_EVENT_STATE_ACTIVE = 1,
|
||||
};
|
||||
|
||||
struct file;
|
||||
|
||||
struct perf_mmap_data {
|
||||
struct rcu_head rcu_head;
|
||||
int nr_pages; /* nr of data pages */
|
||||
int writable; /* are we writable */
|
||||
int nr_locked; /* nr pages mlocked */
|
||||
|
||||
atomic_t poll; /* POLL_ for wakeups */
|
||||
atomic_t events; /* event_id limit */
|
||||
|
||||
atomic_long_t head; /* write position */
|
||||
atomic_long_t done_head; /* completed head */
|
||||
|
||||
atomic_t lock; /* concurrent writes */
|
||||
atomic_t wakeup; /* needs a wakeup */
|
||||
atomic_t lost; /* nr records lost */
|
||||
|
||||
long watermark; /* wakeup watermark */
|
||||
|
||||
struct perf_event_mmap_page *user_page;
|
||||
void *data_pages[0];
|
||||
};
|
||||
|
||||
struct perf_pending_entry {
|
||||
struct perf_pending_entry *next;
|
||||
void (*func)(struct perf_pending_entry *);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct perf_event - performance event kernel representation:
|
||||
*/
|
||||
struct perf_event {
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
struct list_head group_entry;
|
||||
struct list_head event_entry;
|
||||
struct list_head sibling_list;
|
||||
int nr_siblings;
|
||||
struct perf_event *group_leader;
|
||||
struct perf_event *output;
|
||||
const struct pmu *pmu;
|
||||
|
||||
enum perf_event_active_state state;
|
||||
atomic64_t count;
|
||||
|
||||
/*
|
||||
* These are the total time in nanoseconds that the event
|
||||
* has been enabled (i.e. eligible to run, and the task has
|
||||
* been scheduled in, if this is a per-task event)
|
||||
* and running (scheduled onto the CPU), respectively.
|
||||
*
|
||||
* They are computed from tstamp_enabled, tstamp_running and
|
||||
* tstamp_stopped when the event is in INACTIVE or ACTIVE state.
|
||||
*/
|
||||
u64 total_time_enabled;
|
||||
u64 total_time_running;
|
||||
|
||||
/*
|
||||
* These are timestamps used for computing total_time_enabled
|
||||
* and total_time_running when the event is in INACTIVE or
|
||||
* ACTIVE state, measured in nanoseconds from an arbitrary point
|
||||
* in time.
|
||||
* tstamp_enabled: the notional time when the event was enabled
|
||||
* tstamp_running: the notional time when the event was scheduled on
|
||||
* tstamp_stopped: in INACTIVE state, the notional time when the
|
||||
* event was scheduled off.
|
||||
*/
|
||||
u64 tstamp_enabled;
|
||||
u64 tstamp_running;
|
||||
u64 tstamp_stopped;
|
||||
|
||||
struct perf_event_attr attr;
|
||||
struct hw_perf_event hw;
|
||||
|
||||
struct perf_event_context *ctx;
|
||||
struct file *filp;
|
||||
|
||||
/*
|
||||
* These accumulate total time (in nanoseconds) that children
|
||||
* events have been enabled and running, respectively.
|
||||
*/
|
||||
atomic64_t child_total_time_enabled;
|
||||
atomic64_t child_total_time_running;
|
||||
|
||||
/*
|
||||
* Protect attach/detach and child_list:
|
||||
*/
|
||||
struct mutex child_mutex;
|
||||
struct list_head child_list;
|
||||
struct perf_event *parent;
|
||||
|
||||
int oncpu;
|
||||
int cpu;
|
||||
|
||||
struct list_head owner_entry;
|
||||
struct task_struct *owner;
|
||||
|
||||
/* mmap bits */
|
||||
struct mutex mmap_mutex;
|
||||
atomic_t mmap_count;
|
||||
struct perf_mmap_data *data;
|
||||
|
||||
/* poll related */
|
||||
wait_queue_head_t waitq;
|
||||
struct fasync_struct *fasync;
|
||||
|
||||
/* delayed work for NMIs and such */
|
||||
int pending_wakeup;
|
||||
int pending_kill;
|
||||
int pending_disable;
|
||||
struct perf_pending_entry pending;
|
||||
|
||||
atomic_t event_limit;
|
||||
|
||||
void (*destroy)(struct perf_event *);
|
||||
struct rcu_head rcu_head;
|
||||
|
||||
struct pid_namespace *ns;
|
||||
u64 id;
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
* struct perf_event_context - event context structure
|
||||
*
|
||||
* Used as a container for task events and CPU events as well:
|
||||
*/
|
||||
struct perf_event_context {
|
||||
/*
|
||||
* Protect the states of the events in the list,
|
||||
* nr_active, and the list:
|
||||
*/
|
||||
spinlock_t lock;
|
||||
/*
|
||||
* Protect the list of events. Locking either mutex or lock
|
||||
* is sufficient to ensure the list doesn't change; to change
|
||||
* the list you need to lock both the mutex and the spinlock.
|
||||
*/
|
||||
struct mutex mutex;
|
||||
|
||||
struct list_head group_list;
|
||||
struct list_head event_list;
|
||||
int nr_events;
|
||||
int nr_active;
|
||||
int is_active;
|
||||
int nr_stat;
|
||||
atomic_t refcount;
|
||||
struct task_struct *task;
|
||||
|
||||
/*
|
||||
* Context clock, runs when context enabled.
|
||||
*/
|
||||
u64 time;
|
||||
u64 timestamp;
|
||||
|
||||
/*
|
||||
* These fields let us detect when two contexts have both
|
||||
* been cloned (inherited) from a common ancestor.
|
||||
*/
|
||||
struct perf_event_context *parent_ctx;
|
||||
u64 parent_gen;
|
||||
u64 generation;
|
||||
int pin_count;
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct perf_event_cpu_context - per cpu event context structure
|
||||
*/
|
||||
struct perf_cpu_context {
|
||||
struct perf_event_context ctx;
|
||||
struct perf_event_context *task_ctx;
|
||||
int active_oncpu;
|
||||
int max_pertask;
|
||||
int exclusive;
|
||||
|
||||
/*
|
||||
* Recursion avoidance:
|
||||
*
|
||||
* task, softirq, irq, nmi context
|
||||
*/
|
||||
int recursion[4];
|
||||
};
|
||||
|
||||
struct perf_output_handle {
|
||||
struct perf_event *event;
|
||||
struct perf_mmap_data *data;
|
||||
unsigned long head;
|
||||
unsigned long offset;
|
||||
int nmi;
|
||||
int sample;
|
||||
int locked;
|
||||
unsigned long flags;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
|
||||
/*
|
||||
* Set by architecture code:
|
||||
*/
|
||||
extern int perf_max_events;
|
||||
|
||||
extern const struct pmu *hw_perf_event_init(struct perf_event *event);
|
||||
|
||||
extern void perf_event_task_sched_in(struct task_struct *task, int cpu);
|
||||
extern void perf_event_task_sched_out(struct task_struct *task,
|
||||
struct task_struct *next, int cpu);
|
||||
extern void perf_event_task_tick(struct task_struct *task, int cpu);
|
||||
extern int perf_event_init_task(struct task_struct *child);
|
||||
extern void perf_event_exit_task(struct task_struct *child);
|
||||
extern void perf_event_free_task(struct task_struct *task);
|
||||
extern void set_perf_event_pending(void);
|
||||
extern void perf_event_do_pending(void);
|
||||
extern void perf_event_print_debug(void);
|
||||
extern void __perf_disable(void);
|
||||
extern bool __perf_enable(void);
|
||||
extern void perf_disable(void);
|
||||
extern void perf_enable(void);
|
||||
extern int perf_event_task_disable(void);
|
||||
extern int perf_event_task_enable(void);
|
||||
extern int hw_perf_group_sched_in(struct perf_event *group_leader,
|
||||
struct perf_cpu_context *cpuctx,
|
||||
struct perf_event_context *ctx, int cpu);
|
||||
extern void perf_event_update_userpage(struct perf_event *event);
|
||||
|
||||
struct perf_sample_data {
|
||||
u64 type;
|
||||
|
||||
u64 ip;
|
||||
struct {
|
||||
u32 pid;
|
||||
u32 tid;
|
||||
} tid_entry;
|
||||
u64 time;
|
||||
u64 addr;
|
||||
u64 id;
|
||||
u64 stream_id;
|
||||
struct {
|
||||
u32 cpu;
|
||||
u32 reserved;
|
||||
} cpu_entry;
|
||||
u64 period;
|
||||
struct perf_callchain_entry *callchain;
|
||||
struct perf_raw_record *raw;
|
||||
};
|
||||
|
||||
extern void perf_output_sample(struct perf_output_handle *handle,
|
||||
struct perf_event_header *header,
|
||||
struct perf_sample_data *data,
|
||||
struct perf_event *event);
|
||||
extern void perf_prepare_sample(struct perf_event_header *header,
|
||||
struct perf_sample_data *data,
|
||||
struct perf_event *event,
|
||||
struct pt_regs *regs);
|
||||
|
||||
extern int perf_event_overflow(struct perf_event *event, int nmi,
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs);
|
||||
|
||||
/*
|
||||
* Return 1 for a software event, 0 for a hardware event
|
||||
*/
|
||||
static inline int is_software_event(struct perf_event *event)
|
||||
{
|
||||
return (event->attr.type != PERF_TYPE_RAW) &&
|
||||
(event->attr.type != PERF_TYPE_HARDWARE) &&
|
||||
(event->attr.type != PERF_TYPE_HW_CACHE);
|
||||
}
|
||||
|
||||
extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
|
||||
|
||||
extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
|
||||
|
||||
static inline void
|
||||
perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
|
||||
{
|
||||
if (atomic_read(&perf_swevent_enabled[event_id]))
|
||||
__perf_sw_event(event_id, nr, nmi, regs, addr);
|
||||
}
|
||||
|
||||
extern void __perf_event_mmap(struct vm_area_struct *vma);
|
||||
|
||||
static inline void perf_event_mmap(struct vm_area_struct *vma)
|
||||
{
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
__perf_event_mmap(vma);
|
||||
}
|
||||
|
||||
extern void perf_event_comm(struct task_struct *tsk);
|
||||
extern void perf_event_fork(struct task_struct *tsk);
|
||||
|
||||
extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
|
||||
|
||||
extern int sysctl_perf_event_paranoid;
|
||||
extern int sysctl_perf_event_mlock;
|
||||
extern int sysctl_perf_event_sample_rate;
|
||||
|
||||
extern void perf_event_init(void);
|
||||
extern void perf_tp_event(int event_id, u64 addr, u64 count,
|
||||
void *record, int entry_size);
|
||||
|
||||
#ifndef perf_misc_flags
|
||||
#define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \
|
||||
PERF_RECORD_MISC_KERNEL)
|
||||
#define perf_instruction_pointer(regs) instruction_pointer(regs)
|
||||
#endif
|
||||
|
||||
extern int perf_output_begin(struct perf_output_handle *handle,
|
||||
struct perf_event *event, unsigned int size,
|
||||
int nmi, int sample);
|
||||
extern void perf_output_end(struct perf_output_handle *handle);
|
||||
extern void perf_output_copy(struct perf_output_handle *handle,
|
||||
const void *buf, unsigned int len);
|
||||
#else
|
||||
static inline void
|
||||
perf_event_task_sched_in(struct task_struct *task, int cpu) { }
|
||||
static inline void
|
||||
perf_event_task_sched_out(struct task_struct *task,
|
||||
struct task_struct *next, int cpu) { }
|
||||
static inline void
|
||||
perf_event_task_tick(struct task_struct *task, int cpu) { }
|
||||
static inline int perf_event_init_task(struct task_struct *child) { return 0; }
|
||||
static inline void perf_event_exit_task(struct task_struct *child) { }
|
||||
static inline void perf_event_free_task(struct task_struct *task) { }
|
||||
static inline void perf_event_do_pending(void) { }
|
||||
static inline void perf_event_print_debug(void) { }
|
||||
static inline void perf_disable(void) { }
|
||||
static inline void perf_enable(void) { }
|
||||
static inline int perf_event_task_disable(void) { return -EINVAL; }
|
||||
static inline int perf_event_task_enable(void) { return -EINVAL; }
|
||||
|
||||
static inline void
|
||||
perf_sw_event(u32 event_id, u64 nr, int nmi,
|
||||
struct pt_regs *regs, u64 addr) { }
|
||||
|
||||
static inline void perf_event_mmap(struct vm_area_struct *vma) { }
|
||||
static inline void perf_event_comm(struct task_struct *tsk) { }
|
||||
static inline void perf_event_fork(struct task_struct *tsk) { }
|
||||
static inline void perf_event_init(void) { }
|
||||
|
||||
#endif
|
||||
|
||||
#define perf_output_put(handle, x) \
|
||||
perf_output_copy((handle), &(x), sizeof(x))
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_PERF_EVENT_H */
|
|
@ -85,7 +85,7 @@
|
|||
#define PR_SET_TIMERSLACK 29
|
||||
#define PR_GET_TIMERSLACK 30
|
||||
|
||||
#define PR_TASK_PERF_COUNTERS_DISABLE 31
|
||||
#define PR_TASK_PERF_COUNTERS_ENABLE 32
|
||||
#define PR_TASK_PERF_EVENTS_DISABLE 31
|
||||
#define PR_TASK_PERF_EVENTS_ENABLE 32
|
||||
|
||||
#endif /* _LINUX_PRCTL_H */
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user