forked from luck/tmp_suning_uos_patched
perf, x86: P4 PMU -- update nmi irq statistics and unmask lvt entry properly
In case if last active performance counter is not overflowed at moment of NMI being triggered by another counter, the irq statistics may miss an update stage. As a more serious consequence -- apic quirk may not be triggered so apic lvt entry stay masked. Tested-by: Lin Ming <ming.m.lin@intel.com> Signed-off-by: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Stephane Eranian <eranian@google.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Frederic Weisbecker <fweisbec@gmail.com> LKML-Reference: <20100805150917.GA6311@lenovo> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
ef8f34aabf
commit
1c250d709f
|
@ -656,6 +656,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
|
|||
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||
int overflow;
|
||||
|
||||
if (!test_bit(idx, cpuc->active_mask))
|
||||
continue;
|
||||
|
@ -666,12 +667,14 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
|
|||
WARN_ON_ONCE(hwc->idx != idx);
|
||||
|
||||
/* it might be unflagged overflow */
|
||||
handled = p4_pmu_clear_cccr_ovf(hwc);
|
||||
overflow = p4_pmu_clear_cccr_ovf(hwc);
|
||||
|
||||
val = x86_perf_event_update(event);
|
||||
if (!handled && (val & (1ULL << (x86_pmu.cntval_bits - 1))))
|
||||
if (!overflow && (val & (1ULL << (x86_pmu.cntval_bits - 1))))
|
||||
continue;
|
||||
|
||||
handled += overflow;
|
||||
|
||||
/* event overflow for sure */
|
||||
data.period = event->hw.last_period;
|
||||
|
||||
|
@ -687,7 +690,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
|
|||
inc_irq_stat(apic_perf_irqs);
|
||||
}
|
||||
|
||||
return handled;
|
||||
return handled > 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue
Block a user