forked from luck/tmp_suning_uos_patched
KVM: arm/arm64: mask/unmask daif around VHE guests
Non-VHE systems take an exception to EL2 in order to world-switch into the guest. When returning from the guest KVM implicitly restores the DAIF flags when it returns to the kernel at EL1. With VHE none of this exception-level jumping happens, so KVMs world-switch code is exposed to the host kernel's DAIF values, and KVM spills the guest-exit DAIF values back into the host kernel. On entry to a guest we have Debug and SError exceptions unmasked, KVM has switched VBAR but isn't prepared to handle these. On guest exit Debug exceptions are left disabled once we return to the host and will stay this way until we enter user space. Add a helper to mask/unmask DAIF around VHE guests. The unmask can only happen after the hosts VBAR value has been synchronised by the isb in __vhe_hyp_call (via kvm_call_hyp()). Masking could be as late as setting KVMs VBAR value, but is kept here for symmetry. Acked-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: James Morse <james.morse@arm.com> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
parent
68ddbf09ec
commit
4f5abad9e8
|
@ -301,4 +301,6 @@ int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
|
|||
/* All host FP/SIMD state is restored on guest exit, so nothing to save: */
|
||||
static inline void kvm_fpsimd_flush_cpu_state(void) {}
|
||||
|
||||
static inline void kvm_arm_vhe_guest_enter(void) {}
|
||||
static inline void kvm_arm_vhe_guest_exit(void) {}
|
||||
#endif /* __ARM_KVM_HOST_H__ */
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/kvm_types.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/daifflags.h>
|
||||
#include <asm/fpsimd.h>
|
||||
#include <asm/kvm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
|
@ -398,4 +399,13 @@ static inline void kvm_fpsimd_flush_cpu_state(void)
|
|||
sve_flush_cpu_state();
|
||||
}
|
||||
|
||||
static inline void kvm_arm_vhe_guest_enter(void)
|
||||
{
|
||||
local_daif_mask();
|
||||
}
|
||||
|
||||
static inline void kvm_arm_vhe_guest_exit(void)
|
||||
{
|
||||
local_daif_restore(DAIF_PROCCTX_NOIRQ);
|
||||
}
|
||||
#endif /* __ARM64_KVM_HOST_H__ */
|
||||
|
|
|
@ -704,9 +704,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
*/
|
||||
trace_kvm_entry(*vcpu_pc(vcpu));
|
||||
guest_enter_irqoff();
|
||||
if (has_vhe())
|
||||
kvm_arm_vhe_guest_enter();
|
||||
|
||||
ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
|
||||
|
||||
if (has_vhe())
|
||||
kvm_arm_vhe_guest_exit();
|
||||
vcpu->mode = OUTSIDE_GUEST_MODE;
|
||||
vcpu->stat.exits++;
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue
Block a user