forked from luck/tmp_suning_uos_patched
perf, arch: Rework perf_event_index()
Put the logic to compute the event index into a per pmu method. This is required because the x86 rules are weird and wonderful and don't match the capabilities of the current scheme. AFAIK only powerpc actually has a usable userspace read of the PMCs but I'm not at all sure anybody actually used that. ARM is restored to the default since it currently does not support userspace access at all. And all software events are provided with a method that reports their index as 0 (disabled). Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Michael Cree <mcree@orcon.net.nz> Cc: Will Deacon <will.deacon@arm.com> Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com> Cc: Anton Blanchard <anton@samba.org> Cc: Eric B Munson <emunson@mgebm.net> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: David S. Miller <davem@davemloft.net> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: Stephane Eranian <eranian@google.com> Cc: Arun Sharma <asharma@fb.com> Link: http://lkml.kernel.org/n/tip-dfydxodki16lylkt3gl2j7cw@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
9a0f05cb36
commit
35edc2a509
|
@ -12,10 +12,6 @@
|
||||||
#ifndef __ARM_PERF_EVENT_H__
|
#ifndef __ARM_PERF_EVENT_H__
|
||||||
#define __ARM_PERF_EVENT_H__
|
#define __ARM_PERF_EVENT_H__
|
||||||
|
|
||||||
/* ARM performance counters start from 1 (in the cp15 accesses) so use the
|
|
||||||
* same indexes here for consistency. */
|
|
||||||
#define PERF_EVENT_INDEX_OFFSET 1
|
|
||||||
|
|
||||||
/* ARM perf PMU IDs for use by internal perf clients. */
|
/* ARM perf PMU IDs for use by internal perf clients. */
|
||||||
enum arm_perf_pmu_ids {
|
enum arm_perf_pmu_ids {
|
||||||
ARM_PERF_PMU_ID_XSCALE1 = 0,
|
ARM_PERF_PMU_ID_XSCALE1 = 0,
|
||||||
|
|
|
@ -12,6 +12,4 @@
|
||||||
#ifndef _ASM_PERF_EVENT_H
|
#ifndef _ASM_PERF_EVENT_H
|
||||||
#define _ASM_PERF_EVENT_H
|
#define _ASM_PERF_EVENT_H
|
||||||
|
|
||||||
#define PERF_EVENT_INDEX_OFFSET 0
|
|
||||||
|
|
||||||
#endif /* _ASM_PERF_EVENT_H */
|
#endif /* _ASM_PERF_EVENT_H */
|
||||||
|
|
|
@ -19,6 +19,4 @@
|
||||||
#ifndef _ASM_PERF_EVENT_H
|
#ifndef _ASM_PERF_EVENT_H
|
||||||
#define _ASM_PERF_EVENT_H
|
#define _ASM_PERF_EVENT_H
|
||||||
|
|
||||||
#define PERF_EVENT_INDEX_OFFSET 0
|
|
||||||
|
|
||||||
#endif /* _ASM_PERF_EVENT_H */
|
#endif /* _ASM_PERF_EVENT_H */
|
||||||
|
|
|
@ -61,8 +61,6 @@ struct pt_regs;
|
||||||
extern unsigned long perf_misc_flags(struct pt_regs *regs);
|
extern unsigned long perf_misc_flags(struct pt_regs *regs);
|
||||||
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
|
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
|
||||||
|
|
||||||
#define PERF_EVENT_INDEX_OFFSET 1
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Only override the default definitions in include/linux/perf_event.h
|
* Only override the default definitions in include/linux/perf_event.h
|
||||||
* if we have hardware PMU support.
|
* if we have hardware PMU support.
|
||||||
|
|
|
@ -1187,6 +1187,11 @@ static int power_pmu_event_init(struct perf_event *event)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int power_pmu_event_idx(struct perf_event *event)
|
||||||
|
{
|
||||||
|
return event->hw.idx;
|
||||||
|
}
|
||||||
|
|
||||||
struct pmu power_pmu = {
|
struct pmu power_pmu = {
|
||||||
.pmu_enable = power_pmu_enable,
|
.pmu_enable = power_pmu_enable,
|
||||||
.pmu_disable = power_pmu_disable,
|
.pmu_disable = power_pmu_disable,
|
||||||
|
@ -1199,6 +1204,7 @@ struct pmu power_pmu = {
|
||||||
.start_txn = power_pmu_start_txn,
|
.start_txn = power_pmu_start_txn,
|
||||||
.cancel_txn = power_pmu_cancel_txn,
|
.cancel_txn = power_pmu_cancel_txn,
|
||||||
.commit_txn = power_pmu_commit_txn,
|
.commit_txn = power_pmu_commit_txn,
|
||||||
|
.event_idx = power_pmu_event_idx,
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -6,4 +6,3 @@
|
||||||
|
|
||||||
/* Empty, just to avoid compiling error */
|
/* Empty, just to avoid compiling error */
|
||||||
|
|
||||||
#define PERF_EVENT_INDEX_OFFSET 0
|
|
||||||
|
|
|
@ -188,8 +188,6 @@ extern u32 get_ibs_caps(void);
|
||||||
#ifdef CONFIG_PERF_EVENTS
|
#ifdef CONFIG_PERF_EVENTS
|
||||||
extern void perf_events_lapic_init(void);
|
extern void perf_events_lapic_init(void);
|
||||||
|
|
||||||
#define PERF_EVENT_INDEX_OFFSET 0
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups.
|
* Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups.
|
||||||
* This flag is otherwise unused and ABI specified to be 0, so nobody should
|
* This flag is otherwise unused and ABI specified to be 0, so nobody should
|
||||||
|
|
|
@ -680,6 +680,12 @@ struct pmu {
|
||||||
* for each successful ->add() during the transaction.
|
* for each successful ->add() during the transaction.
|
||||||
*/
|
*/
|
||||||
void (*cancel_txn) (struct pmu *pmu); /* optional */
|
void (*cancel_txn) (struct pmu *pmu); /* optional */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Will return the value for perf_event_mmap_page::index for this event,
|
||||||
|
* if no implementation is provided it will default to: event->hw.idx + 1.
|
||||||
|
*/
|
||||||
|
int (*event_idx) (struct perf_event *event); /*optional */
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -3208,10 +3208,6 @@ int perf_event_task_disable(void)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef PERF_EVENT_INDEX_OFFSET
|
|
||||||
# define PERF_EVENT_INDEX_OFFSET 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static int perf_event_index(struct perf_event *event)
|
static int perf_event_index(struct perf_event *event)
|
||||||
{
|
{
|
||||||
if (event->hw.state & PERF_HES_STOPPED)
|
if (event->hw.state & PERF_HES_STOPPED)
|
||||||
|
@ -3220,7 +3216,7 @@ static int perf_event_index(struct perf_event *event)
|
||||||
if (event->state != PERF_EVENT_STATE_ACTIVE)
|
if (event->state != PERF_EVENT_STATE_ACTIVE)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
|
return event->pmu->event_idx(event);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void calc_timer_values(struct perf_event *event,
|
static void calc_timer_values(struct perf_event *event,
|
||||||
|
@ -4992,6 +4988,11 @@ static int perf_swevent_init(struct perf_event *event)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int perf_swevent_event_idx(struct perf_event *event)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static struct pmu perf_swevent = {
|
static struct pmu perf_swevent = {
|
||||||
.task_ctx_nr = perf_sw_context,
|
.task_ctx_nr = perf_sw_context,
|
||||||
|
|
||||||
|
@ -5001,6 +5002,8 @@ static struct pmu perf_swevent = {
|
||||||
.start = perf_swevent_start,
|
.start = perf_swevent_start,
|
||||||
.stop = perf_swevent_stop,
|
.stop = perf_swevent_stop,
|
||||||
.read = perf_swevent_read,
|
.read = perf_swevent_read,
|
||||||
|
|
||||||
|
.event_idx = perf_swevent_event_idx,
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_EVENT_TRACING
|
#ifdef CONFIG_EVENT_TRACING
|
||||||
|
@ -5087,6 +5090,8 @@ static struct pmu perf_tracepoint = {
|
||||||
.start = perf_swevent_start,
|
.start = perf_swevent_start,
|
||||||
.stop = perf_swevent_stop,
|
.stop = perf_swevent_stop,
|
||||||
.read = perf_swevent_read,
|
.read = perf_swevent_read,
|
||||||
|
|
||||||
|
.event_idx = perf_swevent_event_idx,
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline void perf_tp_register(void)
|
static inline void perf_tp_register(void)
|
||||||
|
@ -5306,6 +5311,8 @@ static struct pmu perf_cpu_clock = {
|
||||||
.start = cpu_clock_event_start,
|
.start = cpu_clock_event_start,
|
||||||
.stop = cpu_clock_event_stop,
|
.stop = cpu_clock_event_stop,
|
||||||
.read = cpu_clock_event_read,
|
.read = cpu_clock_event_read,
|
||||||
|
|
||||||
|
.event_idx = perf_swevent_event_idx,
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -5378,6 +5385,8 @@ static struct pmu perf_task_clock = {
|
||||||
.start = task_clock_event_start,
|
.start = task_clock_event_start,
|
||||||
.stop = task_clock_event_stop,
|
.stop = task_clock_event_stop,
|
||||||
.read = task_clock_event_read,
|
.read = task_clock_event_read,
|
||||||
|
|
||||||
|
.event_idx = perf_swevent_event_idx,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void perf_pmu_nop_void(struct pmu *pmu)
|
static void perf_pmu_nop_void(struct pmu *pmu)
|
||||||
|
@ -5405,6 +5414,11 @@ static void perf_pmu_cancel_txn(struct pmu *pmu)
|
||||||
perf_pmu_enable(pmu);
|
perf_pmu_enable(pmu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int perf_event_idx_default(struct perf_event *event)
|
||||||
|
{
|
||||||
|
return event->hw.idx + 1;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ensures all contexts with the same task_ctx_nr have the same
|
* Ensures all contexts with the same task_ctx_nr have the same
|
||||||
* pmu_cpu_context too.
|
* pmu_cpu_context too.
|
||||||
|
@ -5594,6 +5608,9 @@ int perf_pmu_register(struct pmu *pmu, char *name, int type)
|
||||||
pmu->pmu_disable = perf_pmu_nop_void;
|
pmu->pmu_disable = perf_pmu_nop_void;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!pmu->event_idx)
|
||||||
|
pmu->event_idx = perf_event_idx_default;
|
||||||
|
|
||||||
list_add_rcu(&pmu->entry, &pmus);
|
list_add_rcu(&pmu->entry, &pmus);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
unlock:
|
unlock:
|
||||||
|
|
|
@ -613,6 +613,11 @@ static void hw_breakpoint_stop(struct perf_event *bp, int flags)
|
||||||
bp->hw.state = PERF_HES_STOPPED;
|
bp->hw.state = PERF_HES_STOPPED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int hw_breakpoint_event_idx(struct perf_event *bp)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static struct pmu perf_breakpoint = {
|
static struct pmu perf_breakpoint = {
|
||||||
.task_ctx_nr = perf_sw_context, /* could eventually get its own */
|
.task_ctx_nr = perf_sw_context, /* could eventually get its own */
|
||||||
|
|
||||||
|
@ -622,6 +627,8 @@ static struct pmu perf_breakpoint = {
|
||||||
.start = hw_breakpoint_start,
|
.start = hw_breakpoint_start,
|
||||||
.stop = hw_breakpoint_stop,
|
.stop = hw_breakpoint_stop,
|
||||||
.read = hw_breakpoint_pmu_read,
|
.read = hw_breakpoint_pmu_read,
|
||||||
|
|
||||||
|
.event_idx = hw_breakpoint_event_idx,
|
||||||
};
|
};
|
||||||
|
|
||||||
int __init init_hw_breakpoint(void)
|
int __init init_hw_breakpoint(void)
|
||||||
|
|
Loading…
Reference in New Issue
Block a user