forked from luck/tmp_suning_uos_patched
KVM: x86: fix APIC page invalidation
Implementation of the unpinned APIC page didn't update the VMCS address cache when invalidation was done through range mmu notifiers. This became a problem when the page notifier was removed. Re-introduce the arch-specific helper and call it from ...range_start. Reported-by: Fabian Grünbichler <f.gruenbichler@proxmox.com> Fixes:38b9917350
("kvm: vmx: Implement set_apic_access_page_addr") Fixes:369ea8242c
("mm/rmap: update to new mmu_notifier semantic v2") Cc: <stable@vger.kernel.org> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Andrea Arcangeli <aarcange@redhat.com> Tested-by: Wanpeng Li <wanpeng.li@hotmail.com> Tested-by: Fabian Grünbichler <f.gruenbichler@proxmox.com> Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
This commit is contained in:
parent
d29899a30f
commit
b1394e745b
|
@ -1448,4 +1448,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
|
||||||
#define put_smstate(type, buf, offset, val) \
|
#define put_smstate(type, buf, offset, val) \
|
||||||
*(type *)((buf) + (offset) - 0x7e00) = val
|
*(type *)((buf) + (offset) - 0x7e00) = val
|
||||||
|
|
||||||
|
void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
|
||||||
|
unsigned long start, unsigned long end);
|
||||||
|
|
||||||
#endif /* _ASM_X86_KVM_HOST_H */
|
#endif /* _ASM_X86_KVM_HOST_H */
|
||||||
|
|
|
@ -6764,6 +6764,20 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
|
||||||
kvm_x86_ops->tlb_flush(vcpu);
|
kvm_x86_ops->tlb_flush(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
|
||||||
|
unsigned long start, unsigned long end)
|
||||||
|
{
|
||||||
|
unsigned long apic_address;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The physical address of apic access page is stored in the VMCS.
|
||||||
|
* Update it when it becomes invalid.
|
||||||
|
*/
|
||||||
|
apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
|
||||||
|
if (start <= apic_address && apic_address < end)
|
||||||
|
kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
|
||||||
|
}
|
||||||
|
|
||||||
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
|
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
|
|
|
@ -135,6 +135,11 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
|
||||||
static unsigned long long kvm_createvm_count;
|
static unsigned long long kvm_createvm_count;
|
||||||
static unsigned long long kvm_active_vms;
|
static unsigned long long kvm_active_vms;
|
||||||
|
|
||||||
|
__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
|
||||||
|
unsigned long start, unsigned long end)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
|
bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
|
||||||
{
|
{
|
||||||
if (pfn_valid(pfn))
|
if (pfn_valid(pfn))
|
||||||
|
@ -360,6 +365,9 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
|
||||||
kvm_flush_remote_tlbs(kvm);
|
kvm_flush_remote_tlbs(kvm);
|
||||||
|
|
||||||
spin_unlock(&kvm->mmu_lock);
|
spin_unlock(&kvm->mmu_lock);
|
||||||
|
|
||||||
|
kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
|
||||||
|
|
||||||
srcu_read_unlock(&kvm->srcu, idx);
|
srcu_read_unlock(&kvm->srcu, idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user