forked from luck/tmp_suning_uos_patched
KVM: PPC: e500: Save/restore SPE state
This is done lazily. The SPE save will be done only if the guest has used SPE since the last preemption or heavyweight exit. Restore will be done only on demand, when enabling MSR_SPE in the shadow MSR, in response to an SPE fault or mtmsr emulation. For SPEFSCR, Linux already switches it on context switch (non-lazily), so the only remaining bit is to save it between qemu and the guest. Signed-off-by: Liu Yu <yu.liu@freescale.com> Signed-off-by: Scott Wood <scottwood@freescale.com> Signed-off-by: Alexander Graf <agraf@suse.de>
This commit is contained in:
parent
ecee273fc4
commit
4cd35f675b
@ -195,6 +195,12 @@ struct kvm_vcpu_arch {
|
|||||||
u64 fpr[32];
|
u64 fpr[32];
|
||||||
u64 fpscr;
|
u64 fpscr;
|
||||||
|
|
||||||
|
#ifdef CONFIG_SPE
|
||||||
|
ulong evr[32];
|
||||||
|
ulong spefscr;
|
||||||
|
ulong host_spefscr;
|
||||||
|
u64 acc;
|
||||||
|
#endif
|
||||||
#ifdef CONFIG_ALTIVEC
|
#ifdef CONFIG_ALTIVEC
|
||||||
vector128 vr[32];
|
vector128 vr[32];
|
||||||
vector128 vscr;
|
vector128 vscr;
|
||||||
|
@ -318,6 +318,7 @@
|
|||||||
#define ESR_ILK 0x00100000 /* Instr. Cache Locking */
|
#define ESR_ILK 0x00100000 /* Instr. Cache Locking */
|
||||||
#define ESR_PUO 0x00040000 /* Unimplemented Operation exception */
|
#define ESR_PUO 0x00040000 /* Unimplemented Operation exception */
|
||||||
#define ESR_BO 0x00020000 /* Byte Ordering */
|
#define ESR_BO 0x00020000 /* Byte Ordering */
|
||||||
|
#define ESR_SPV 0x00000080 /* Signal Processing operation */
|
||||||
|
|
||||||
/* Bit definitions related to the DBCR0. */
|
/* Bit definitions related to the DBCR0. */
|
||||||
#if defined(CONFIG_40x)
|
#if defined(CONFIG_40x)
|
||||||
|
@ -497,6 +497,13 @@ int main(void)
|
|||||||
DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7));
|
DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if defined(CONFIG_KVM) && defined(CONFIG_SPE)
|
||||||
|
DEFINE(VCPU_EVR, offsetof(struct kvm_vcpu, arch.evr[0]));
|
||||||
|
DEFINE(VCPU_ACC, offsetof(struct kvm_vcpu, arch.acc));
|
||||||
|
DEFINE(VCPU_SPEFSCR, offsetof(struct kvm_vcpu, arch.spefscr));
|
||||||
|
DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr));
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_KVM_EXIT_TIMING
|
#ifdef CONFIG_KVM_EXIT_TIMING
|
||||||
DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
|
DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
|
||||||
arch.timing_exit.tv32.tbu));
|
arch.timing_exit.tv32.tbu));
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||||
*
|
*
|
||||||
* Copyright IBM Corp. 2007
|
* Copyright IBM Corp. 2007
|
||||||
|
* Copyright 2010-2011 Freescale Semiconductor, Inc.
|
||||||
*
|
*
|
||||||
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
|
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
|
||||||
* Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
|
* Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
|
||||||
@ -78,6 +79,57 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_SPE
|
||||||
|
void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
preempt_disable();
|
||||||
|
enable_kernel_spe();
|
||||||
|
kvmppc_save_guest_spe(vcpu);
|
||||||
|
vcpu->arch.shadow_msr &= ~MSR_SPE;
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
preempt_disable();
|
||||||
|
enable_kernel_spe();
|
||||||
|
kvmppc_load_guest_spe(vcpu);
|
||||||
|
vcpu->arch.shadow_msr |= MSR_SPE;
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
if (vcpu->arch.shared->msr & MSR_SPE) {
|
||||||
|
if (!(vcpu->arch.shadow_msr & MSR_SPE))
|
||||||
|
kvmppc_vcpu_enable_spe(vcpu);
|
||||||
|
} else if (vcpu->arch.shadow_msr & MSR_SPE) {
|
||||||
|
kvmppc_vcpu_disable_spe(vcpu);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Helper function for "full" MSR writes. No need to call this if only EE is
|
||||||
|
* changing. */
|
||||||
|
void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
|
||||||
|
{
|
||||||
|
if ((new_msr & MSR_PR) != (vcpu->arch.shared->msr & MSR_PR))
|
||||||
|
kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
|
||||||
|
|
||||||
|
vcpu->arch.shared->msr = new_msr;
|
||||||
|
|
||||||
|
if (vcpu->arch.shared->msr & MSR_WE) {
|
||||||
|
kvm_vcpu_block(vcpu);
|
||||||
|
kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
|
||||||
|
};
|
||||||
|
|
||||||
|
kvmppc_vcpu_sync_spe(vcpu);
|
||||||
|
}
|
||||||
|
|
||||||
static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
|
static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
|
||||||
unsigned int priority)
|
unsigned int priority)
|
||||||
{
|
{
|
||||||
@ -344,10 +396,16 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||||||
r = RESUME_GUEST;
|
r = RESUME_GUEST;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case BOOKE_INTERRUPT_SPE_UNAVAIL:
|
#ifdef CONFIG_SPE
|
||||||
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL);
|
case BOOKE_INTERRUPT_SPE_UNAVAIL: {
|
||||||
|
if (vcpu->arch.shared->msr & MSR_SPE)
|
||||||
|
kvmppc_vcpu_enable_spe(vcpu);
|
||||||
|
else
|
||||||
|
kvmppc_booke_queue_irqprio(vcpu,
|
||||||
|
BOOKE_IRQPRIO_SPE_UNAVAIL);
|
||||||
r = RESUME_GUEST;
|
r = RESUME_GUEST;
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
case BOOKE_INTERRUPT_SPE_FP_DATA:
|
case BOOKE_INTERRUPT_SPE_FP_DATA:
|
||||||
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
|
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
|
||||||
@ -358,6 +416,28 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||||||
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
|
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
|
||||||
r = RESUME_GUEST;
|
r = RESUME_GUEST;
|
||||||
break;
|
break;
|
||||||
|
#else
|
||||||
|
case BOOKE_INTERRUPT_SPE_UNAVAIL:
|
||||||
|
/*
|
||||||
|
* Guest wants SPE, but host kernel doesn't support it. Send
|
||||||
|
* an "unimplemented operation" program check to the guest.
|
||||||
|
*/
|
||||||
|
kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
|
||||||
|
r = RESUME_GUEST;
|
||||||
|
break;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* These really should never happen without CONFIG_SPE,
|
||||||
|
* as we should never enable the real MSR[SPE] in the guest.
|
||||||
|
*/
|
||||||
|
case BOOKE_INTERRUPT_SPE_FP_DATA:
|
||||||
|
case BOOKE_INTERRUPT_SPE_FP_ROUND:
|
||||||
|
printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
|
||||||
|
__func__, exit_nr, vcpu->arch.pc);
|
||||||
|
run->hw.hardware_exit_reason = exit_nr;
|
||||||
|
r = RESUME_HOST;
|
||||||
|
break;
|
||||||
|
#endif
|
||||||
|
|
||||||
case BOOKE_INTERRUPT_DATA_STORAGE:
|
case BOOKE_INTERRUPT_DATA_STORAGE:
|
||||||
kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
|
kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
|
||||||
|
@ -52,24 +52,18 @@
|
|||||||
|
|
||||||
extern unsigned long kvmppc_booke_handlers;
|
extern unsigned long kvmppc_booke_handlers;
|
||||||
|
|
||||||
/* Helper function for "full" MSR writes. No need to call this if only EE is
|
void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr);
|
||||||
* changing. */
|
|
||||||
static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
|
|
||||||
{
|
|
||||||
if ((new_msr & MSR_PR) != (vcpu->arch.shared->msr & MSR_PR))
|
|
||||||
kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
|
|
||||||
|
|
||||||
vcpu->arch.shared->msr = new_msr;
|
|
||||||
|
|
||||||
if (vcpu->arch.shared->msr & MSR_WE) {
|
|
||||||
kvm_vcpu_block(vcpu);
|
|
||||||
kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
unsigned int inst, int *advance);
|
unsigned int inst, int *advance);
|
||||||
int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
|
int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
|
||||||
int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs);
|
int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs);
|
||||||
|
|
||||||
|
/* low-level asm code to transfer guest state */
|
||||||
|
void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu);
|
||||||
|
void kvmppc_save_guest_spe(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
|
/* high-level function, manages flags, host state */
|
||||||
|
void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
#endif /* __KVM_BOOKE_H__ */
|
#endif /* __KVM_BOOKE_H__ */
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||||
*
|
*
|
||||||
* Copyright IBM Corp. 2007
|
* Copyright IBM Corp. 2007
|
||||||
|
* Copyright 2011 Freescale Semiconductor, Inc.
|
||||||
*
|
*
|
||||||
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
|
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
|
||||||
*/
|
*/
|
||||||
@ -239,6 +240,14 @@ _GLOBAL(kvmppc_resume_host)
|
|||||||
heavyweight_exit:
|
heavyweight_exit:
|
||||||
/* Not returning to guest. */
|
/* Not returning to guest. */
|
||||||
|
|
||||||
|
#ifdef CONFIG_SPE
|
||||||
|
/* save guest SPEFSCR and load host SPEFSCR */
|
||||||
|
mfspr r9, SPRN_SPEFSCR
|
||||||
|
stw r9, VCPU_SPEFSCR(r4)
|
||||||
|
lwz r9, VCPU_HOST_SPEFSCR(r4)
|
||||||
|
mtspr SPRN_SPEFSCR, r9
|
||||||
|
#endif
|
||||||
|
|
||||||
/* We already saved guest volatile register state; now save the
|
/* We already saved guest volatile register state; now save the
|
||||||
* non-volatiles. */
|
* non-volatiles. */
|
||||||
stw r15, VCPU_GPR(r15)(r4)
|
stw r15, VCPU_GPR(r15)(r4)
|
||||||
@ -340,6 +349,14 @@ _GLOBAL(__kvmppc_vcpu_run)
|
|||||||
lwz r30, VCPU_GPR(r30)(r4)
|
lwz r30, VCPU_GPR(r30)(r4)
|
||||||
lwz r31, VCPU_GPR(r31)(r4)
|
lwz r31, VCPU_GPR(r31)(r4)
|
||||||
|
|
||||||
|
#ifdef CONFIG_SPE
|
||||||
|
/* save host SPEFSCR and load guest SPEFSCR */
|
||||||
|
mfspr r3, SPRN_SPEFSCR
|
||||||
|
stw r3, VCPU_HOST_SPEFSCR(r4)
|
||||||
|
lwz r3, VCPU_SPEFSCR(r4)
|
||||||
|
mtspr SPRN_SPEFSCR, r3
|
||||||
|
#endif
|
||||||
|
|
||||||
lightweight_exit:
|
lightweight_exit:
|
||||||
stw r2, HOST_R2(r1)
|
stw r2, HOST_R2(r1)
|
||||||
|
|
||||||
@ -425,3 +442,24 @@ lightweight_exit:
|
|||||||
lwz r3, VCPU_GPR(r3)(r4)
|
lwz r3, VCPU_GPR(r3)(r4)
|
||||||
lwz r4, VCPU_GPR(r4)(r4)
|
lwz r4, VCPU_GPR(r4)(r4)
|
||||||
rfi
|
rfi
|
||||||
|
|
||||||
|
#ifdef CONFIG_SPE
|
||||||
|
_GLOBAL(kvmppc_save_guest_spe)
|
||||||
|
cmpi 0,r3,0
|
||||||
|
beqlr-
|
||||||
|
SAVE_32EVRS(0, r4, r3, VCPU_EVR)
|
||||||
|
evxor evr6, evr6, evr6
|
||||||
|
evmwumiaa evr6, evr6, evr6
|
||||||
|
li r4,VCPU_ACC
|
||||||
|
evstddx evr6, r4, r3 /* save acc */
|
||||||
|
blr
|
||||||
|
|
||||||
|
_GLOBAL(kvmppc_load_guest_spe)
|
||||||
|
cmpi 0,r3,0
|
||||||
|
beqlr-
|
||||||
|
li r4,VCPU_ACC
|
||||||
|
evlddx evr6,r4,r3
|
||||||
|
evmra evr6,evr6 /* load acc */
|
||||||
|
REST_32EVRS(0, r4, r3, VCPU_EVR)
|
||||||
|
blr
|
||||||
|
#endif
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
|
* Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
|
||||||
*
|
*
|
||||||
* Author: Yu Liu, <yu.liu@freescale.com>
|
* Author: Yu Liu, <yu.liu@freescale.com>
|
||||||
*
|
*
|
||||||
@ -41,6 +41,11 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||||||
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
|
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
kvmppc_e500_tlb_put(vcpu);
|
kvmppc_e500_tlb_put(vcpu);
|
||||||
|
|
||||||
|
#ifdef CONFIG_SPE
|
||||||
|
if (vcpu->arch.shadow_msr & MSR_SPE)
|
||||||
|
kvmppc_vcpu_disable_spe(vcpu);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvmppc_core_check_processor_compat(void)
|
int kvmppc_core_check_processor_compat(void)
|
||||||
|
Loading…
Reference in New Issue
Block a user