forked from luck/tmp_suning_uos_patched
kvm: x86/mmu: Support changed pte notifier in tdp MMU
In order to interoperate correctly with the rest of KVM and other Linux subsystems, the TDP MMU must correctly handle various MMU notifiers. Add a hook and handle the change_pte MMU notifier. Tested by running kvm-unit-tests and KVM selftests on an Intel Haswell machine. This series introduced no new failures. This series can be viewed in Gerrit at: https://linux-review.googlesource.com/c/virt/kvm/kvm/+/2538 Signed-off-by: Ben Gardon <bgardon@google.com> Message-Id: <20201014182700.2888246-15-bgardon@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
f8e144971c
commit
1d8dd6b3f1
|
@ -1509,7 +1509,14 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
|
|||
|
||||
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
|
||||
{
|
||||
return kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
|
||||
int r;
|
||||
|
||||
r = kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
|
||||
|
||||
if (kvm->arch.tdp_mmu_enabled)
|
||||
r |= kvm_tdp_mmu_set_spte_hva(kvm, hva, &pte);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
|
||||
|
|
|
@ -671,3 +671,59 @@ int kvm_tdp_mmu_test_age_hva(struct kvm *kvm, unsigned long hva)
|
|||
return kvm_tdp_mmu_handle_hva_range(kvm, hva, hva + 1, 0,
|
||||
test_age_gfn);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle the changed_pte MMU notifier for the TDP MMU.
|
||||
* data is a pointer to the new pte_t mapping the HVA specified by the MMU
|
||||
* notifier.
|
||||
* Returns non-zero if a flush is needed before releasing the MMU lock.
|
||||
*/
|
||||
static int set_tdp_spte(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||
struct kvm_mmu_page *root, gfn_t gfn, gfn_t unused,
|
||||
unsigned long data)
|
||||
{
|
||||
struct tdp_iter iter;
|
||||
pte_t *ptep = (pte_t *)data;
|
||||
kvm_pfn_t new_pfn;
|
||||
u64 new_spte;
|
||||
int need_flush = 0;
|
||||
|
||||
WARN_ON(pte_huge(*ptep));
|
||||
|
||||
new_pfn = pte_pfn(*ptep);
|
||||
|
||||
tdp_root_for_each_pte(iter, root, gfn, gfn + 1) {
|
||||
if (iter.level != PG_LEVEL_4K)
|
||||
continue;
|
||||
|
||||
if (!is_shadow_present_pte(iter.old_spte))
|
||||
break;
|
||||
|
||||
tdp_mmu_set_spte(kvm, &iter, 0);
|
||||
|
||||
kvm_flush_remote_tlbs_with_address(kvm, iter.gfn, 1);
|
||||
|
||||
if (!pte_write(*ptep)) {
|
||||
new_spte = kvm_mmu_changed_pte_notifier_make_spte(
|
||||
iter.old_spte, new_pfn);
|
||||
|
||||
tdp_mmu_set_spte(kvm, &iter, new_spte);
|
||||
}
|
||||
|
||||
need_flush = 1;
|
||||
}
|
||||
|
||||
if (need_flush)
|
||||
kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_tdp_mmu_set_spte_hva(struct kvm *kvm, unsigned long address,
|
||||
pte_t *host_ptep)
|
||||
{
|
||||
return kvm_tdp_mmu_handle_hva_range(kvm, address, address + 1,
|
||||
(unsigned long)host_ptep,
|
||||
set_tdp_spte);
|
||||
}
|
||||
|
||||
|
|
|
@ -25,4 +25,7 @@ int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start,
|
|||
int kvm_tdp_mmu_age_hva_range(struct kvm *kvm, unsigned long start,
|
||||
unsigned long end);
|
||||
int kvm_tdp_mmu_test_age_hva(struct kvm *kvm, unsigned long hva);
|
||||
|
||||
int kvm_tdp_mmu_set_spte_hva(struct kvm *kvm, unsigned long address,
|
||||
pte_t *host_ptep);
|
||||
#endif /* __KVM_X86_MMU_TDP_MMU_H */
|
||||
|
|
Loading…
Reference in New Issue
Block a user