forked from luck/tmp_suning_uos_patched
KVM: Validate userspace_addr of memslot when registered
This way, we can avoid checking the user space address many times when we read the guest memory. Although we can do the same for write if we check which slots are writable, we do not care write now: reading the guest memory happens more often than writing. [avi: change VERIFY_READ to VERIFY_WRITE] Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
12cb814f3b
commit
fa3d315a4c
|
@ -185,7 +185,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
|
|||
}
|
||||
|
||||
ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
|
||||
if (unlikely(copy_from_user(&pte, ptep_user, sizeof(pte)))) {
|
||||
if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) {
|
||||
present = false;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -648,7 +648,10 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|||
goto out;
|
||||
if (mem->guest_phys_addr & (PAGE_SIZE - 1))
|
||||
goto out;
|
||||
if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
|
||||
/* We can read the guest memory with __xxx_user() later on. */
|
||||
if (user_alloc &&
|
||||
((mem->userspace_addr & (PAGE_SIZE - 1)) ||
|
||||
!access_ok(VERIFY_WRITE, mem->userspace_addr, mem->memory_size)))
|
||||
goto out;
|
||||
if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
|
||||
goto out;
|
||||
|
@ -1283,7 +1286,7 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
|
|||
addr = gfn_to_hva(kvm, gfn);
|
||||
if (kvm_is_error_hva(addr))
|
||||
return -EFAULT;
|
||||
r = copy_from_user(data, (void __user *)addr + offset, len);
|
||||
r = __copy_from_user(data, (void __user *)addr + offset, len);
|
||||
if (r)
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
|
|
Loading…
Reference in New Issue
Block a user