forked from luck/tmp_suning_uos_patched
KVM: nVMX: Sync all PGDs on nested transition with shadow paging
[ Upstream commit 07ffaf343e34b555c9e7ea39a9c81c439a706f13 ]
Trigger a full TLB flush on behalf of the guest on nested VM-Enter and
VM-Exit when VPID is disabled for L2. kvm_mmu_new_pgd() syncs only the
current PGD, which can theoretically leave stale, unsync'd entries in a
previous guest PGD, which could be consumed if L2 is allowed to load CR3
with PCID_NOFLUSH=1.
Rename KVM_REQ_HV_TLB_FLUSH to KVM_REQ_TLB_FLUSH_GUEST so that it can
be utilized for its obvious purpose of emulating a guest TLB flush.
Note, there is no change the actual TLB flush executed by KVM, even
though the fast PGD switch uses KVM_REQ_TLB_FLUSH_CURRENT. When VPID is
disabled for L2, vpid02 is guaranteed to be '0', and thus
nested_get_vpid02() will return the VPID that is shared by L1 and L2.
Generate the request outside of kvm_mmu_new_pgd(), as getting the common
helper to correctly identify which requested is needed is quite painful.
E.g. using KVM_REQ_TLB_FLUSH_GUEST when nested EPT is in play is wrong as
a TLB flush from the L1 kernel's perspective does not invalidate EPT
mappings. And, by using KVM_REQ_TLB_FLUSH_GUEST, nVMX can do future
simplification by moving the logic into nested_vmx_transition_tlb_flush().
Fixes: 41fab65e7c
("KVM: nVMX: Skip MMU sync on nested VMX transition when possible")
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210609234235.1244004-2-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
5ac406b81c
commit
b2c5af71ce
|
@ -84,7 +84,7 @@
|
|||
#define KVM_REQ_APICV_UPDATE \
|
||||
KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
||||
#define KVM_REQ_TLB_FLUSH_CURRENT KVM_ARCH_REQ(26)
|
||||
#define KVM_REQ_HV_TLB_FLUSH \
|
||||
#define KVM_REQ_TLB_FLUSH_GUEST \
|
||||
KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_NO_WAKEUP)
|
||||
#define KVM_REQ_APF_READY KVM_ARCH_REQ(28)
|
||||
#define KVM_REQ_MSR_FILTER_CHANGED KVM_ARCH_REQ(29)
|
||||
|
|
|
@ -1564,7 +1564,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
|
|||
* vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
|
||||
* analyze it here, flush TLB regardless of the specified address space.
|
||||
*/
|
||||
kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH,
|
||||
kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST,
|
||||
NULL, vcpu_mask, &hv_vcpu->tlb_flush);
|
||||
|
||||
ret_success:
|
||||
|
|
|
@ -1142,12 +1142,19 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
|
|||
|
||||
/*
|
||||
* Unconditionally skip the TLB flush on fast CR3 switch, all TLB
|
||||
* flushes are handled by nested_vmx_transition_tlb_flush(). See
|
||||
* nested_vmx_transition_mmu_sync for details on skipping the MMU sync.
|
||||
* flushes are handled by nested_vmx_transition_tlb_flush().
|
||||
*/
|
||||
if (!nested_ept)
|
||||
kvm_mmu_new_pgd(vcpu, cr3, true,
|
||||
!nested_vmx_transition_mmu_sync(vcpu));
|
||||
if (!nested_ept) {
|
||||
kvm_mmu_new_pgd(vcpu, cr3, true, true);
|
||||
|
||||
/*
|
||||
* A TLB flush on VM-Enter/VM-Exit flushes all linear mappings
|
||||
* across all PCIDs, i.e. all PGDs need to be synchronized.
|
||||
* See nested_vmx_transition_mmu_sync() for more details.
|
||||
*/
|
||||
if (nested_vmx_transition_mmu_sync(vcpu))
|
||||
kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
|
||||
}
|
||||
|
||||
vcpu->arch.cr3 = cr3;
|
||||
kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
|
||||
|
|
|
@ -8852,7 +8852,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
|
||||
kvm_vcpu_flush_tlb_current(vcpu);
|
||||
if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu))
|
||||
if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
|
||||
kvm_vcpu_flush_tlb_guest(vcpu);
|
||||
|
||||
if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
|
||||
|
|
Loading…
Reference in New Issue
Block a user