forked from luck/tmp_suning_uos_patched
x86/kvm: Introduce kvm_(un)map_gfn()
kvm_vcpu_(un)map operates on gfns from any current address space. In certain cases we want to make sure we are not mapping SMRAM and for that we can use kvm_(un)map_gfn() that we are introducing in this patch. This is part of CVE-2019-3016. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Reviewed-by: Joao Martins <joao.m.martins@oracle.com> Cc: stable@vger.kernel.org Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
8c6de56a42
commit
1eff70a9ab
|
@ -775,8 +775,10 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
|
||||||
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
|
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||||
kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
|
kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||||
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
|
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
|
||||||
|
int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map);
|
||||||
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
|
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||||
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
|
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
|
||||||
|
int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
|
||||||
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
|
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||||
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
|
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
|
||||||
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
|
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
|
||||||
|
|
|
@ -1821,12 +1821,13 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(gfn_to_page);
|
EXPORT_SYMBOL_GPL(gfn_to_page);
|
||||||
|
|
||||||
static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn,
|
static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
|
||||||
struct kvm_host_map *map)
|
struct kvm_host_map *map)
|
||||||
{
|
{
|
||||||
kvm_pfn_t pfn;
|
kvm_pfn_t pfn;
|
||||||
void *hva = NULL;
|
void *hva = NULL;
|
||||||
struct page *page = KVM_UNMAPPED_PAGE;
|
struct page *page = KVM_UNMAPPED_PAGE;
|
||||||
|
struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
|
||||||
|
|
||||||
if (!map)
|
if (!map)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -1855,14 +1856,20 @@ static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
|
||||||
|
{
|
||||||
|
return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(kvm_map_gfn);
|
||||||
|
|
||||||
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
|
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
|
||||||
{
|
{
|
||||||
return __kvm_map_gfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, map);
|
return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_vcpu_map);
|
EXPORT_SYMBOL_GPL(kvm_vcpu_map);
|
||||||
|
|
||||||
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
|
static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
|
||||||
bool dirty)
|
struct kvm_host_map *map, bool dirty)
|
||||||
{
|
{
|
||||||
if (!map)
|
if (!map)
|
||||||
return;
|
return;
|
||||||
|
@ -1878,7 +1885,7 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (dirty) {
|
if (dirty) {
|
||||||
kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
|
mark_page_dirty_in_slot(memslot, map->gfn);
|
||||||
kvm_release_pfn_dirty(map->pfn);
|
kvm_release_pfn_dirty(map->pfn);
|
||||||
} else {
|
} else {
|
||||||
kvm_release_pfn_clean(map->pfn);
|
kvm_release_pfn_clean(map->pfn);
|
||||||
|
@ -1887,6 +1894,18 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
|
||||||
map->hva = NULL;
|
map->hva = NULL;
|
||||||
map->page = NULL;
|
map->page = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
|
||||||
|
{
|
||||||
|
__kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map, dirty);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
|
||||||
|
|
||||||
|
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
|
||||||
|
{
|
||||||
|
__kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, dirty);
|
||||||
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
|
EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
|
||||||
|
|
||||||
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
|
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
|
||||||
|
|
Loading…
Reference in New Issue
Block a user