forked from luck/tmp_suning_uos_patched
KVM: x86/mmu: Move slot_level_*() helper functions up a few lines
...so that kvm_mmu_invalidate_zap_pages_in_memslot() can utilize the helpers in future patches. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
164bf7e56c
commit
85875a133e
|
@ -5487,6 +5487,76 @@ void kvm_disable_tdp(void)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_disable_tdp);
|
||||
|
||||
|
||||
/* The return value indicates if tlb flush on all vcpus is needed. */
|
||||
typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
|
||||
|
||||
/* The caller should hold mmu-lock before calling this function. */
|
||||
static __always_inline bool
|
||||
slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
||||
slot_level_handler fn, int start_level, int end_level,
|
||||
gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
|
||||
{
|
||||
struct slot_rmap_walk_iterator iterator;
|
||||
bool flush = false;
|
||||
|
||||
for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
|
||||
end_gfn, &iterator) {
|
||||
if (iterator.rmap)
|
||||
flush |= fn(kvm, iterator.rmap);
|
||||
|
||||
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
|
||||
if (flush && lock_flush_tlb) {
|
||||
kvm_flush_remote_tlbs(kvm);
|
||||
flush = false;
|
||||
}
|
||||
cond_resched_lock(&kvm->mmu_lock);
|
||||
}
|
||||
}
|
||||
|
||||
if (flush && lock_flush_tlb) {
|
||||
kvm_flush_remote_tlbs(kvm);
|
||||
flush = false;
|
||||
}
|
||||
|
||||
return flush;
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
||||
slot_level_handler fn, int start_level, int end_level,
|
||||
bool lock_flush_tlb)
|
||||
{
|
||||
return slot_handle_level_range(kvm, memslot, fn, start_level,
|
||||
end_level, memslot->base_gfn,
|
||||
memslot->base_gfn + memslot->npages - 1,
|
||||
lock_flush_tlb);
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
||||
slot_level_handler fn, bool lock_flush_tlb)
|
||||
{
|
||||
return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
|
||||
PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
||||
slot_level_handler fn, bool lock_flush_tlb)
|
||||
{
|
||||
return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1,
|
||||
PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
||||
slot_level_handler fn, bool lock_flush_tlb)
|
||||
{
|
||||
return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
|
||||
PT_PAGE_TABLE_LEVEL, lock_flush_tlb);
|
||||
}
|
||||
|
||||
static void free_mmu_pages(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
free_page((unsigned long)vcpu->arch.mmu->pae_root);
|
||||
|
@ -5561,75 +5631,6 @@ void kvm_mmu_uninit_vm(struct kvm *kvm)
|
|||
kvm_page_track_unregister_notifier(kvm, node);
|
||||
}
|
||||
|
||||
/* The return value indicates if tlb flush on all vcpus is needed. */
|
||||
typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
|
||||
|
||||
/* The caller should hold mmu-lock before calling this function. */
|
||||
static __always_inline bool
|
||||
slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
||||
slot_level_handler fn, int start_level, int end_level,
|
||||
gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
|
||||
{
|
||||
struct slot_rmap_walk_iterator iterator;
|
||||
bool flush = false;
|
||||
|
||||
for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
|
||||
end_gfn, &iterator) {
|
||||
if (iterator.rmap)
|
||||
flush |= fn(kvm, iterator.rmap);
|
||||
|
||||
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
|
||||
if (flush && lock_flush_tlb) {
|
||||
kvm_flush_remote_tlbs(kvm);
|
||||
flush = false;
|
||||
}
|
||||
cond_resched_lock(&kvm->mmu_lock);
|
||||
}
|
||||
}
|
||||
|
||||
if (flush && lock_flush_tlb) {
|
||||
kvm_flush_remote_tlbs(kvm);
|
||||
flush = false;
|
||||
}
|
||||
|
||||
return flush;
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
||||
slot_level_handler fn, int start_level, int end_level,
|
||||
bool lock_flush_tlb)
|
||||
{
|
||||
return slot_handle_level_range(kvm, memslot, fn, start_level,
|
||||
end_level, memslot->base_gfn,
|
||||
memslot->base_gfn + memslot->npages - 1,
|
||||
lock_flush_tlb);
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
||||
slot_level_handler fn, bool lock_flush_tlb)
|
||||
{
|
||||
return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
|
||||
PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
||||
slot_level_handler fn, bool lock_flush_tlb)
|
||||
{
|
||||
return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1,
|
||||
PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
||||
slot_level_handler fn, bool lock_flush_tlb)
|
||||
{
|
||||
return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
|
||||
PT_PAGE_TABLE_LEVEL, lock_flush_tlb);
|
||||
}
|
||||
|
||||
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
|
||||
{
|
||||
struct kvm_memslots *slots;
|
||||
|
|
Loading…
Reference in New Issue
Block a user