forked from luck/tmp_suning_uos_patched
KVM: MMU: try to fix up page faults before giving up
The vGPU folks would like to trap the first access to a BAR by setting vm_ops on the VMAs produced by mmap-ing a VFIO device. The fault handler then can use remap_pfn_range to place some non-reserved pages in the VMA. This kind of VM_PFNMAP mapping is not handled by KVM, but follow_pfn and fixup_user_fault together help supporting it. The patch also supports VM_MIXEDMAP vmas where the pfns are not reserved and thus subject to reference counting. Cc: Xiao Guangrong <guangrong.xiao@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Radim Krčmář <rkrcmar@redhat.com> Tested-by: Neo Jia <cjia@nvidia.com> Reported-by: Kirti Wankhede <kwankhede@nvidia.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
92176a8ede
commit
add6a0cd1c
1
mm/gup.c
1
mm/gup.c
|
@ -720,6 +720,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(fixup_user_fault);
|
||||||
|
|
||||||
static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
|
static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
|
||||||
struct mm_struct *mm,
|
struct mm_struct *mm,
|
||||||
|
|
|
@ -1446,9 +1446,45 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
|
||||||
unsigned long addr, bool *async,
|
unsigned long addr, bool *async,
|
||||||
bool write_fault, kvm_pfn_t *p_pfn)
|
bool write_fault, kvm_pfn_t *p_pfn)
|
||||||
{
|
{
|
||||||
*p_pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
|
unsigned long pfn;
|
||||||
vma->vm_pgoff;
|
int r;
|
||||||
BUG_ON(!kvm_is_reserved_pfn(*p_pfn));
|
|
||||||
|
r = follow_pfn(vma, addr, &pfn);
|
||||||
|
if (r) {
|
||||||
|
/*
|
||||||
|
* get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
|
||||||
|
* not call the fault handler, so do it here.
|
||||||
|
*/
|
||||||
|
bool unlocked = false;
|
||||||
|
r = fixup_user_fault(current, current->mm, addr,
|
||||||
|
(write_fault ? FAULT_FLAG_WRITE : 0),
|
||||||
|
&unlocked);
|
||||||
|
if (unlocked)
|
||||||
|
return -EAGAIN;
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
r = follow_pfn(vma, addr, &pfn);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Get a reference here because callers of *hva_to_pfn* and
|
||||||
|
* *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the
|
||||||
|
* returned pfn. This is only needed if the VMA has VM_MIXEDMAP
|
||||||
|
* set, but the kvm_get_pfn/kvm_release_pfn_clean pair will
|
||||||
|
* simply do nothing for reserved pfns.
|
||||||
|
*
|
||||||
|
* Whoever called remap_pfn_range is also going to call e.g.
|
||||||
|
* unmap_mapping_range before the underlying pages are freed,
|
||||||
|
* causing a call to our MMU notifier.
|
||||||
|
*/
|
||||||
|
kvm_get_pfn(pfn);
|
||||||
|
|
||||||
|
*p_pfn = pfn;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1493,12 +1529,15 @@ static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
retry:
|
||||||
vma = find_vma_intersection(current->mm, addr, addr + 1);
|
vma = find_vma_intersection(current->mm, addr, addr + 1);
|
||||||
|
|
||||||
if (vma == NULL)
|
if (vma == NULL)
|
||||||
pfn = KVM_PFN_ERR_FAULT;
|
pfn = KVM_PFN_ERR_FAULT;
|
||||||
else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
|
else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
|
||||||
r = hva_to_pfn_remapped(vma, addr, async, write_fault, &pfn);
|
r = hva_to_pfn_remapped(vma, addr, async, write_fault, &pfn);
|
||||||
|
if (r == -EAGAIN)
|
||||||
|
goto retry;
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
pfn = KVM_PFN_ERR_FAULT;
|
pfn = KVM_PFN_ERR_FAULT;
|
||||||
} else {
|
} else {
|
||||||
|
|
Loading…
Reference in New Issue
Block a user