forked from luck/tmp_suning_uos_patched
KVM: Pass MMU notifier range flags to kvm_unmap_hva_range()
The 'flags' field of 'struct mmu_notifier_range' is used to indicate whether invalidate_range_{start,end}() are permitted to block. In the case of kvm_mmu_notifier_invalidate_range_start(), this field is not forwarded on to the architecture-specific implementation of kvm_unmap_hva_range() and therefore the backend cannot sensibly decide whether or not to block. Add an extra 'flags' parameter to kvm_unmap_hva_range() so that architectures are aware as to whether or not they are permitted to block. Cc: <stable@vger.kernel.org> Cc: Marc Zyngier <maz@kernel.org> Cc: Suzuki K Poulose <suzuki.poulose@arm.com> Cc: James Morse <james.morse@arm.com> Signed-off-by: Will Deacon <will@kernel.org> Message-Id: <20200811102725.7121-2-will@kernel.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
cb957adb4e
commit
fdfe7cbd58
|
@ -473,7 +473,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
|
||||||
|
|
||||||
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
||||||
int kvm_unmap_hva_range(struct kvm *kvm,
|
int kvm_unmap_hva_range(struct kvm *kvm,
|
||||||
unsigned long start, unsigned long end);
|
unsigned long start, unsigned long end, unsigned flags);
|
||||||
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||||
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
|
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
|
||||||
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
||||||
|
|
|
@ -2213,7 +2213,7 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *dat
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_unmap_hva_range(struct kvm *kvm,
|
int kvm_unmap_hva_range(struct kvm *kvm,
|
||||||
unsigned long start, unsigned long end)
|
unsigned long start, unsigned long end, unsigned flags)
|
||||||
{
|
{
|
||||||
if (!kvm->arch.mmu.pgd)
|
if (!kvm->arch.mmu.pgd)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -969,7 +969,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
|
||||||
|
|
||||||
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
||||||
int kvm_unmap_hva_range(struct kvm *kvm,
|
int kvm_unmap_hva_range(struct kvm *kvm,
|
||||||
unsigned long start, unsigned long end);
|
unsigned long start, unsigned long end, unsigned flags);
|
||||||
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||||
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
|
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
|
||||||
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
||||||
|
|
|
@ -486,7 +486,8 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
|
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
|
||||||
|
unsigned flags)
|
||||||
{
|
{
|
||||||
handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
|
handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
|
||||||
|
|
||||||
|
|
|
@ -58,7 +58,8 @@
|
||||||
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
||||||
|
|
||||||
extern int kvm_unmap_hva_range(struct kvm *kvm,
|
extern int kvm_unmap_hva_range(struct kvm *kvm,
|
||||||
unsigned long start, unsigned long end);
|
unsigned long start, unsigned long end,
|
||||||
|
unsigned flags);
|
||||||
extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
|
extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
|
||||||
extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
||||||
extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||||
|
|
|
@ -834,7 +834,8 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
|
||||||
kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change);
|
kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
|
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
|
||||||
|
unsigned flags)
|
||||||
{
|
{
|
||||||
return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
|
return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
|
||||||
}
|
}
|
||||||
|
|
|
@ -734,7 +734,8 @@ static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
|
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
|
||||||
|
unsigned flags)
|
||||||
{
|
{
|
||||||
/* kvm_unmap_hva flushes everything anyways */
|
/* kvm_unmap_hva flushes everything anyways */
|
||||||
kvm_unmap_hva(kvm, start);
|
kvm_unmap_hva(kvm, start);
|
||||||
|
|
|
@ -1596,7 +1596,8 @@ asmlinkage void kvm_spurious_fault(void);
|
||||||
_ASM_EXTABLE(666b, 667b)
|
_ASM_EXTABLE(666b, 667b)
|
||||||
|
|
||||||
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
||||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
|
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
|
||||||
|
unsigned flags);
|
||||||
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
|
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
|
||||||
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
||||||
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||||
|
|
|
@ -1916,7 +1916,8 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
|
||||||
return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
|
return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
|
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
|
||||||
|
unsigned flags)
|
||||||
{
|
{
|
||||||
return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
|
return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
|
||||||
}
|
}
|
||||||
|
|
|
@ -482,7 +482,8 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
|
||||||
* count is also read inside the mmu_lock critical section.
|
* count is also read inside the mmu_lock critical section.
|
||||||
*/
|
*/
|
||||||
kvm->mmu_notifier_count++;
|
kvm->mmu_notifier_count++;
|
||||||
need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end);
|
need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end,
|
||||||
|
range->flags);
|
||||||
need_tlb_flush |= kvm->tlbs_dirty;
|
need_tlb_flush |= kvm->tlbs_dirty;
|
||||||
/* we've to flush the tlb before the pages can be freed */
|
/* we've to flush the tlb before the pages can be freed */
|
||||||
if (need_tlb_flush)
|
if (need_tlb_flush)
|
||||||
|
|
Loading…
Reference in New Issue
Block a user