forked from luck/tmp_suning_uos_patched
vfio/type1: Use follow_pte()
[ Upstream commit 07956b6269d3ed05d854233d5bb776dca91751dd ]
follow_pfn() doesn't make sure that we're using the correct page
protections, get the pte with follow_pte() so that we can test
protections and get the pfn from the pte.
Fixes: 5cbf3264bc
("vfio/type1: Fix VA->PA translation for PFNMAP VMAs in vaddr_get_pfn()")
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
e47685ec4c
commit
c2ff994883
|
@ -24,6 +24,7 @@
|
|||
#include <linux/compat.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
|
@ -431,9 +432,11 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
|
|||
unsigned long vaddr, unsigned long *pfn,
|
||||
bool write_fault)
|
||||
{
|
||||
pte_t *ptep;
|
||||
spinlock_t *ptl;
|
||||
int ret;
|
||||
|
||||
ret = follow_pfn(vma, vaddr, pfn);
|
||||
ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
|
||||
if (ret) {
|
||||
bool unlocked = false;
|
||||
|
||||
|
@ -447,9 +450,17 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = follow_pfn(vma, vaddr, pfn);
|
||||
ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (write_fault && !pte_write(*ptep))
|
||||
ret = -EFAULT;
|
||||
else
|
||||
*pfn = pte_pfn(*ptep);
|
||||
|
||||
pte_unmap_unlock(ptep, ptl);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user