forked from luck/tmp_suning_uos_patched
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar: "Misc kernel fixes: a virtualization environment related fix, an uncore PMU driver removal handling fix, a PowerPC fix and new events for Knights Landing" * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/x86/intel: Honour the CPUID for number of fixed counters in hypervisors perf/powerpc: Don't call perf_event_disable() from atomic context perf/core: Protect PMU device removal with a 'pmu_bus_running' check, to fix CONFIG_DEBUG_TEST_DRIVER_REMOVE=y kernel panic perf/x86/intel/cstate: Add C-state residency events for Knights Landing
This commit is contained in:
commit
b49c3170bf
|
@ -275,7 +275,7 @@ int hw_breakpoint_handler(struct die_args *args)
|
|||
if (!stepped) {
|
||||
WARN(1, "Unable to handle hardware breakpoint. Breakpoint at "
|
||||
"0x%lx will be disabled.", info->address);
|
||||
perf_event_disable(bp);
|
||||
perf_event_disable_inatomic(bp);
|
||||
goto out;
|
||||
}
|
||||
/*
|
||||
|
|
|
@ -3607,10 +3607,14 @@ __init int intel_pmu_init(void)
|
|||
|
||||
/*
|
||||
* Quirk: v2 perfmon does not report fixed-purpose events, so
|
||||
* assume at least 3 events:
|
||||
* assume at least 3 events, when not running in a hypervisor:
|
||||
*/
|
||||
if (version > 1)
|
||||
x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
|
||||
if (version > 1) {
|
||||
int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
|
||||
|
||||
x86_pmu.num_counters_fixed =
|
||||
max((int)edx.split.num_counters_fixed, assume);
|
||||
}
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_PDCM)) {
|
||||
u64 capabilities;
|
||||
|
|
|
@ -48,7 +48,8 @@
|
|||
* Scope: Core
|
||||
* MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
|
||||
* perf code: 0x02
|
||||
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL
|
||||
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
|
||||
* SKL,KNL
|
||||
* Scope: Core
|
||||
* MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
|
||||
* perf code: 0x03
|
||||
|
@ -56,15 +57,16 @@
|
|||
* Scope: Core
|
||||
* MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
|
||||
* perf code: 0x00
|
||||
* Available model: SNB,IVB,HSW,BDW,SKL
|
||||
* Available model: SNB,IVB,HSW,BDW,SKL,KNL
|
||||
* Scope: Package (physical package)
|
||||
* MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
|
||||
* perf code: 0x01
|
||||
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
|
||||
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL
|
||||
* Scope: Package (physical package)
|
||||
* MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
|
||||
* perf code: 0x02
|
||||
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL
|
||||
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
|
||||
* SKL,KNL
|
||||
* Scope: Package (physical package)
|
||||
* MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
|
||||
* perf code: 0x03
|
||||
|
@ -118,6 +120,7 @@ struct cstate_model {
|
|||
|
||||
/* Quirk flags */
|
||||
#define SLM_PKG_C6_USE_C7_MSR (1UL << 0)
|
||||
#define KNL_CORE_C6_MSR (1UL << 1)
|
||||
|
||||
struct perf_cstate_msr {
|
||||
u64 msr;
|
||||
|
@ -488,6 +491,18 @@ static const struct cstate_model slm_cstates __initconst = {
|
|||
.quirks = SLM_PKG_C6_USE_C7_MSR,
|
||||
};
|
||||
|
||||
|
||||
static const struct cstate_model knl_cstates __initconst = {
|
||||
.core_events = BIT(PERF_CSTATE_CORE_C6_RES),
|
||||
|
||||
.pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
|
||||
BIT(PERF_CSTATE_PKG_C3_RES) |
|
||||
BIT(PERF_CSTATE_PKG_C6_RES),
|
||||
.quirks = KNL_CORE_C6_MSR,
|
||||
};
|
||||
|
||||
|
||||
|
||||
#define X86_CSTATES_MODEL(model, states) \
|
||||
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
|
||||
|
||||
|
@ -523,6 +538,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
|
|||
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE, snb_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates),
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
|
||||
|
@ -558,6 +575,11 @@ static int __init cstate_probe(const struct cstate_model *cm)
|
|||
if (cm->quirks & SLM_PKG_C6_USE_C7_MSR)
|
||||
pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY;
|
||||
|
||||
/* KNL has different MSR for CORE C6 */
|
||||
if (cm->quirks & KNL_CORE_C6_MSR)
|
||||
pkg_msr[PERF_CSTATE_CORE_C6_RES].msr = MSR_KNL_CORE_C6_RESIDENCY;
|
||||
|
||||
|
||||
has_cstate_core = cstate_probe_msr(cm->core_events,
|
||||
PERF_CSTATE_CORE_EVENT_MAX,
|
||||
core_msr, core_events_attrs);
|
||||
|
|
|
@ -1257,6 +1257,7 @@ extern u64 perf_swevent_set_period(struct perf_event *event);
|
|||
extern void perf_event_enable(struct perf_event *event);
|
||||
extern void perf_event_disable(struct perf_event *event);
|
||||
extern void perf_event_disable_local(struct perf_event *event);
|
||||
extern void perf_event_disable_inatomic(struct perf_event *event);
|
||||
extern void perf_event_task_tick(void);
|
||||
#else /* !CONFIG_PERF_EVENTS: */
|
||||
static inline void *
|
||||
|
|
|
@ -1960,6 +1960,12 @@ void perf_event_disable(struct perf_event *event)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(perf_event_disable);
|
||||
|
||||
void perf_event_disable_inatomic(struct perf_event *event)
|
||||
{
|
||||
event->pending_disable = 1;
|
||||
irq_work_queue(&event->pending);
|
||||
}
|
||||
|
||||
static void perf_set_shadow_time(struct perf_event *event,
|
||||
struct perf_event_context *ctx,
|
||||
u64 tstamp)
|
||||
|
@ -7075,8 +7081,8 @@ static int __perf_event_overflow(struct perf_event *event,
|
|||
if (events && atomic_dec_and_test(&event->event_limit)) {
|
||||
ret = 1;
|
||||
event->pending_kill = POLL_HUP;
|
||||
event->pending_disable = 1;
|
||||
irq_work_queue(&event->pending);
|
||||
|
||||
perf_event_disable_inatomic(event);
|
||||
}
|
||||
|
||||
READ_ONCE(event->overflow_handler)(event, data, regs);
|
||||
|
@ -8855,7 +8861,10 @@ EXPORT_SYMBOL_GPL(perf_pmu_register);
|
|||
|
||||
void perf_pmu_unregister(struct pmu *pmu)
|
||||
{
|
||||
int remove_device;
|
||||
|
||||
mutex_lock(&pmus_lock);
|
||||
remove_device = pmu_bus_running;
|
||||
list_del_rcu(&pmu->entry);
|
||||
mutex_unlock(&pmus_lock);
|
||||
|
||||
|
@ -8869,10 +8878,12 @@ void perf_pmu_unregister(struct pmu *pmu)
|
|||
free_percpu(pmu->pmu_disable_count);
|
||||
if (pmu->type >= PERF_TYPE_MAX)
|
||||
idr_remove(&pmu_idr, pmu->type);
|
||||
if (pmu->nr_addr_filters)
|
||||
device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
|
||||
device_del(pmu->dev);
|
||||
put_device(pmu->dev);
|
||||
if (remove_device) {
|
||||
if (pmu->nr_addr_filters)
|
||||
device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
|
||||
device_del(pmu->dev);
|
||||
put_device(pmu->dev);
|
||||
}
|
||||
free_pmu_context(pmu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(perf_pmu_unregister);
|
||||
|
|
Loading…
Reference in New Issue
Block a user