forked from luck/tmp_suning_uos_patched
KVM: PPC: Work around POWER7 DABR corruption problem
It turns out that on POWER7, writing to the DABR can cause a corrupted value to be written if the PMU is active and updating SDAR in continuous sampling mode. To work around this, we make sure that the PMU is inactive and SDAR updates are disabled (via MMCRA) when we are context-switching DABR. When the guest sets DABR via the H_SET_DABR hypercall, we use a slightly different workaround, which is to read back the DABR and write it again if it got corrupted. While we are at it, make it consistent that the saving and restoring of the guest's non-volatile GPRs and the FPRs are done with the guest setup of the PMU active. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
7657f4089b
commit
8943633cf9
|
@ -68,19 +68,24 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
|||
rotldi r10,r10,16
|
||||
mtmsrd r10,1
|
||||
|
||||
/* Save host PMU registers and load guest PMU registers */
|
||||
/* Save host PMU registers */
|
||||
/* R4 is live here (vcpu pointer) but not r3 or r5 */
|
||||
li r3, 1
|
||||
sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
|
||||
mfspr r7, SPRN_MMCR0 /* save MMCR0 */
|
||||
mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */
|
||||
mfspr r6, SPRN_MMCRA
|
||||
BEGIN_FTR_SECTION
|
||||
/* On P7, clear MMCRA in order to disable SDAR updates */
|
||||
li r5, 0
|
||||
mtspr SPRN_MMCRA, r5
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
||||
isync
|
||||
ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
|
||||
lbz r5, LPPACA_PMCINUSE(r3)
|
||||
cmpwi r5, 0
|
||||
beq 31f /* skip if not */
|
||||
mfspr r5, SPRN_MMCR1
|
||||
mfspr r6, SPRN_MMCRA
|
||||
std r7, HSTATE_MMCR(r13)
|
||||
std r5, HSTATE_MMCR + 8(r13)
|
||||
std r6, HSTATE_MMCR + 16(r13)
|
||||
|
|
|
@ -159,24 +159,15 @@ kvmppc_hv_entry:
|
|||
mflr r0
|
||||
std r0, HSTATE_VMHANDLER(r13)
|
||||
|
||||
ld r14, VCPU_GPR(r14)(r4)
|
||||
ld r15, VCPU_GPR(r15)(r4)
|
||||
ld r16, VCPU_GPR(r16)(r4)
|
||||
ld r17, VCPU_GPR(r17)(r4)
|
||||
ld r18, VCPU_GPR(r18)(r4)
|
||||
ld r19, VCPU_GPR(r19)(r4)
|
||||
ld r20, VCPU_GPR(r20)(r4)
|
||||
ld r21, VCPU_GPR(r21)(r4)
|
||||
ld r22, VCPU_GPR(r22)(r4)
|
||||
ld r23, VCPU_GPR(r23)(r4)
|
||||
ld r24, VCPU_GPR(r24)(r4)
|
||||
ld r25, VCPU_GPR(r25)(r4)
|
||||
ld r26, VCPU_GPR(r26)(r4)
|
||||
ld r27, VCPU_GPR(r27)(r4)
|
||||
ld r28, VCPU_GPR(r28)(r4)
|
||||
ld r29, VCPU_GPR(r29)(r4)
|
||||
ld r30, VCPU_GPR(r30)(r4)
|
||||
ld r31, VCPU_GPR(r31)(r4)
|
||||
/* Set partition DABR */
|
||||
/* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
|
||||
li r5,3
|
||||
ld r6,VCPU_DABR(r4)
|
||||
mtspr SPRN_DABRX,r5
|
||||
mtspr SPRN_DABR,r6
|
||||
BEGIN_FTR_SECTION
|
||||
isync
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
||||
|
||||
/* Load guest PMU registers */
|
||||
/* R4 is live here (vcpu pointer) */
|
||||
|
@ -215,6 +206,25 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
|
|||
/* Load up FP, VMX and VSX registers */
|
||||
bl kvmppc_load_fp
|
||||
|
||||
ld r14, VCPU_GPR(r14)(r4)
|
||||
ld r15, VCPU_GPR(r15)(r4)
|
||||
ld r16, VCPU_GPR(r16)(r4)
|
||||
ld r17, VCPU_GPR(r17)(r4)
|
||||
ld r18, VCPU_GPR(r18)(r4)
|
||||
ld r19, VCPU_GPR(r19)(r4)
|
||||
ld r20, VCPU_GPR(r20)(r4)
|
||||
ld r21, VCPU_GPR(r21)(r4)
|
||||
ld r22, VCPU_GPR(r22)(r4)
|
||||
ld r23, VCPU_GPR(r23)(r4)
|
||||
ld r24, VCPU_GPR(r24)(r4)
|
||||
ld r25, VCPU_GPR(r25)(r4)
|
||||
ld r26, VCPU_GPR(r26)(r4)
|
||||
ld r27, VCPU_GPR(r27)(r4)
|
||||
ld r28, VCPU_GPR(r28)(r4)
|
||||
ld r29, VCPU_GPR(r29)(r4)
|
||||
ld r30, VCPU_GPR(r30)(r4)
|
||||
ld r31, VCPU_GPR(r31)(r4)
|
||||
|
||||
BEGIN_FTR_SECTION
|
||||
/* Switch DSCR to guest value */
|
||||
ld r5, VCPU_DSCR(r4)
|
||||
|
@ -256,12 +266,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
|||
mtspr SPRN_DAR, r5
|
||||
mtspr SPRN_DSISR, r6
|
||||
|
||||
/* Set partition DABR */
|
||||
li r5,3
|
||||
ld r6,VCPU_DABR(r4)
|
||||
mtspr SPRN_DABRX,r5
|
||||
mtspr SPRN_DABR,r6
|
||||
|
||||
BEGIN_FTR_SECTION
|
||||
/* Restore AMR and UAMOR, set AMOR to all 1s */
|
||||
ld r5,VCPU_AMR(r4)
|
||||
|
@ -955,12 +959,6 @@ BEGIN_FTR_SECTION
|
|||
mtspr SPRN_AMR,r6
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
||||
|
||||
/* Restore host DABR and DABRX */
|
||||
ld r5,HSTATE_DABR(r13)
|
||||
li r6,7
|
||||
mtspr SPRN_DABR,r5
|
||||
mtspr SPRN_DABRX,r6
|
||||
|
||||
/* Switch DSCR back to host value */
|
||||
BEGIN_FTR_SECTION
|
||||
mfspr r8, SPRN_DSCR
|
||||
|
@ -999,6 +997,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
|||
std r5, VCPU_SPRG2(r9)
|
||||
std r6, VCPU_SPRG3(r9)
|
||||
|
||||
/* save FP state */
|
||||
mr r3, r9
|
||||
bl .kvmppc_save_fp
|
||||
|
||||
/* Increment yield count if they have a VPA */
|
||||
ld r8, VCPU_VPA(r9) /* do they have a VPA? */
|
||||
cmpdi r8, 0
|
||||
|
@ -1013,6 +1015,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
|||
sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
|
||||
mfspr r4, SPRN_MMCR0 /* save MMCR0 */
|
||||
mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
|
||||
mfspr r6, SPRN_MMCRA
|
||||
BEGIN_FTR_SECTION
|
||||
/* On P7, clear MMCRA in order to disable SDAR updates */
|
||||
li r7, 0
|
||||
mtspr SPRN_MMCRA, r7
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
||||
isync
|
||||
beq 21f /* if no VPA, save PMU stuff anyway */
|
||||
lbz r7, LPPACA_PMCINUSE(r8)
|
||||
|
@ -1021,7 +1029,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
|||
std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
|
||||
b 22f
|
||||
21: mfspr r5, SPRN_MMCR1
|
||||
mfspr r6, SPRN_MMCRA
|
||||
std r4, VCPU_MMCR(r9)
|
||||
std r5, VCPU_MMCR + 8(r9)
|
||||
std r6, VCPU_MMCR + 16(r9)
|
||||
|
@ -1046,17 +1053,20 @@ BEGIN_FTR_SECTION
|
|||
stw r11, VCPU_PMC + 28(r9)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
|
||||
22:
|
||||
/* save FP state */
|
||||
mr r3, r9
|
||||
bl .kvmppc_save_fp
|
||||
|
||||
/* Secondary threads go off to take a nap on POWER7 */
|
||||
BEGIN_FTR_SECTION
|
||||
lwz r0,VCPU_PTID(r3)
|
||||
lwz r0,VCPU_PTID(r9)
|
||||
cmpwi r0,0
|
||||
bne secondary_nap
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
||||
|
||||
/* Restore host DABR and DABRX */
|
||||
ld r5,HSTATE_DABR(r13)
|
||||
li r6,7
|
||||
mtspr SPRN_DABR,r5
|
||||
mtspr SPRN_DABRX,r6
|
||||
|
||||
/*
|
||||
* Reload DEC. HDEC interrupts were disabled when
|
||||
* we reloaded the host's LPCR value.
|
||||
|
@ -1393,7 +1403,12 @@ bounce_ext_interrupt:
|
|||
|
||||
_GLOBAL(kvmppc_h_set_dabr)
|
||||
std r4,VCPU_DABR(r3)
|
||||
mtspr SPRN_DABR,r4
|
||||
/* Work around P7 bug where DABR can get corrupted on mtspr */
|
||||
1: mtspr SPRN_DABR,r4
|
||||
mfspr r5, SPRN_DABR
|
||||
cmpd r4, r5
|
||||
bne 1b
|
||||
isync
|
||||
li r3,0
|
||||
blr
|
||||
|
||||
|
@ -1615,8 +1630,8 @@ kvm_no_guest:
|
|||
* r3 = vcpu pointer
|
||||
*/
|
||||
_GLOBAL(kvmppc_save_fp)
|
||||
mfmsr r9
|
||||
ori r8,r9,MSR_FP
|
||||
mfmsr r5
|
||||
ori r8,r5,MSR_FP
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
BEGIN_FTR_SECTION
|
||||
oris r8,r8,MSR_VEC@h
|
||||
|
@ -1665,7 +1680,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
|||
#endif
|
||||
mfspr r6,SPRN_VRSAVE
|
||||
stw r6,VCPU_VRSAVE(r3)
|
||||
mtmsrd r9
|
||||
mtmsrd r5
|
||||
isync
|
||||
blr
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user