forked from luck/tmp_suning_uos_patched
KVM: Clean up local variable usage in __kvm_set_memory_region()
Clean up __kvm_set_memory_region() to achieve several goals: - Remove local variables that serve no real purpose - Improve the readability of the code - Better show the relationship between the 'old' and 'new' memslot - Prepare for dynamically sizing memslots - Document subtle gotchas (via comments) Note, using 'tmp' to hold the initial memslot is not strictly necessary at this juncture, e.g. 'old' could be directly copied from id_to_memslot(), but keep the pointer usage as id_to_memslot() will be able to return a NULL pointer once memslots are dynamically sized. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
e96c81ee89
commit
163da372de
|
@ -1071,13 +1071,11 @@ static int kvm_delete_memslot(struct kvm *kvm,
|
|||
int __kvm_set_memory_region(struct kvm *kvm,
|
||||
const struct kvm_userspace_memory_region *mem)
|
||||
{
|
||||
int r;
|
||||
gfn_t base_gfn;
|
||||
unsigned long npages;
|
||||
struct kvm_memory_slot *slot;
|
||||
struct kvm_memory_slot old, new;
|
||||
int as_id, id;
|
||||
struct kvm_memory_slot *tmp;
|
||||
enum kvm_mr_change change;
|
||||
int as_id, id;
|
||||
int r;
|
||||
|
||||
r = check_memory_region_flags(mem);
|
||||
if (r)
|
||||
|
@ -1102,54 +1100,58 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|||
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
|
||||
return -EINVAL;
|
||||
|
||||
slot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
|
||||
base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
|
||||
npages = mem->memory_size >> PAGE_SHIFT;
|
||||
|
||||
if (npages > KVM_MEM_MAX_NR_PAGES)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Make a full copy of the old memslot, the pointer will become stale
|
||||
* when the memslots are re-sorted by update_memslots(), and the old
|
||||
* memslot needs to be referenced after calling update_memslots(), e.g.
|
||||
* to free its resources and for arch specific behavior.
|
||||
* to free its resources and for arch specific behavior. Kill @tmp
|
||||
* after making a copy to deter potentially dangerous usage.
|
||||
*/
|
||||
old = *slot;
|
||||
tmp = id_to_memslot(__kvm_memslots(kvm, as_id), id);
|
||||
old = *tmp;
|
||||
tmp = NULL;
|
||||
|
||||
if (!mem->memory_size)
|
||||
return kvm_delete_memslot(kvm, mem, &old, as_id);
|
||||
|
||||
new = old;
|
||||
|
||||
new.id = id;
|
||||
new.base_gfn = base_gfn;
|
||||
new.npages = npages;
|
||||
new.base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
|
||||
new.npages = mem->memory_size >> PAGE_SHIFT;
|
||||
new.flags = mem->flags;
|
||||
new.userspace_addr = mem->userspace_addr;
|
||||
|
||||
if (new.npages > KVM_MEM_MAX_NR_PAGES)
|
||||
return -EINVAL;
|
||||
|
||||
if (!old.npages) {
|
||||
change = KVM_MR_CREATE;
|
||||
new.dirty_bitmap = NULL;
|
||||
memset(&new.arch, 0, sizeof(new.arch));
|
||||
} else { /* Modify an existing slot. */
|
||||
if ((new.userspace_addr != old.userspace_addr) ||
|
||||
(npages != old.npages) ||
|
||||
(new.npages != old.npages) ||
|
||||
((new.flags ^ old.flags) & KVM_MEM_READONLY))
|
||||
return -EINVAL;
|
||||
|
||||
if (base_gfn != old.base_gfn)
|
||||
if (new.base_gfn != old.base_gfn)
|
||||
change = KVM_MR_MOVE;
|
||||
else if (new.flags != old.flags)
|
||||
change = KVM_MR_FLAGS_ONLY;
|
||||
else /* Nothing to change. */
|
||||
return 0;
|
||||
|
||||
/* Copy dirty_bitmap and arch from the current memslot. */
|
||||
new.dirty_bitmap = old.dirty_bitmap;
|
||||
memcpy(&new.arch, &old.arch, sizeof(new.arch));
|
||||
}
|
||||
|
||||
if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
|
||||
/* Check for overlaps */
|
||||
kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) {
|
||||
if (slot->id == id)
|
||||
kvm_for_each_memslot(tmp, __kvm_memslots(kvm, as_id)) {
|
||||
if (tmp->id == id)
|
||||
continue;
|
||||
if (!((base_gfn + npages <= slot->base_gfn) ||
|
||||
(base_gfn >= slot->base_gfn + slot->npages)))
|
||||
if (!((new.base_gfn + new.npages <= tmp->base_gfn) ||
|
||||
(new.base_gfn >= tmp->base_gfn + tmp->npages)))
|
||||
return -EEXIST;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user