forked from luck/tmp_suning_uos_patched
device-dax: Convert to vmf_insert_mixed and vm_fault_t
Use new return type vm_fault_t for fault and huge_fault handler. For
now, this is just documenting that the function returns a VM_FAULT value
rather than an errno. Once all instances are converted, vm_fault_t will
become a distinct type.
Commit 1c8f422059
("mm: change return type to vm_fault_t")
Previously vm_insert_mixed() returned an error code which driver mapped into
VM_FAULT_* type. The new function vmf_insert_mixed() will replace this
inefficiency by returning VM_FAULT_* type.
Signed-off-by: Souptick Joarder <jrdr.linux@gmail.com>
Reviewed-by: Matthew Wilcox <willy@infradead.org>
Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
This commit is contained in:
parent
021c91791a
commit
226ab56107
|
@ -244,11 +244,11 @@ __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
|
|||
return -1;
|
||||
}
|
||||
|
||||
static int __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
|
||||
static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
|
||||
struct vm_fault *vmf)
|
||||
{
|
||||
struct device *dev = &dev_dax->dev;
|
||||
struct dax_region *dax_region;
|
||||
int rc = VM_FAULT_SIGBUS;
|
||||
phys_addr_t phys;
|
||||
pfn_t pfn;
|
||||
unsigned int fault_size = PAGE_SIZE;
|
||||
|
@ -274,17 +274,11 @@ static int __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
|
|||
|
||||
pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
|
||||
|
||||
rc = vm_insert_mixed(vmf->vma, vmf->address, pfn);
|
||||
|
||||
if (rc == -ENOMEM)
|
||||
return VM_FAULT_OOM;
|
||||
if (rc < 0 && rc != -EBUSY)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
return VM_FAULT_NOPAGE;
|
||||
return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
|
||||
}
|
||||
|
||||
static int __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
|
||||
static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
|
||||
struct vm_fault *vmf)
|
||||
{
|
||||
unsigned long pmd_addr = vmf->address & PMD_MASK;
|
||||
struct device *dev = &dev_dax->dev;
|
||||
|
@ -334,7 +328,8 @@ static int __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
|
||||
static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
|
||||
static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
|
||||
struct vm_fault *vmf)
|
||||
{
|
||||
unsigned long pud_addr = vmf->address & PUD_MASK;
|
||||
struct device *dev = &dev_dax->dev;
|
||||
|
@ -384,13 +379,14 @@ static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
|
|||
vmf->flags & FAULT_FLAG_WRITE);
|
||||
}
|
||||
#else
|
||||
static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
|
||||
static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
|
||||
struct vm_fault *vmf)
|
||||
{
|
||||
return VM_FAULT_FALLBACK;
|
||||
}
|
||||
#endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
|
||||
|
||||
static int dev_dax_huge_fault(struct vm_fault *vmf,
|
||||
static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
|
||||
enum page_entry_size pe_size)
|
||||
{
|
||||
int rc, id;
|
||||
|
@ -420,7 +416,7 @@ static int dev_dax_huge_fault(struct vm_fault *vmf,
|
|||
return rc;
|
||||
}
|
||||
|
||||
static int dev_dax_fault(struct vm_fault *vmf)
|
||||
static vm_fault_t dev_dax_fault(struct vm_fault *vmf)
|
||||
{
|
||||
return dev_dax_huge_fault(vmf, PE_SIZE_PTE);
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
#define _LINUX_HUGE_MM_H
|
||||
|
||||
#include <linux/sched/coredump.h>
|
||||
#include <linux/mm_types.h>
|
||||
|
||||
#include <linux/fs.h> /* only for vma_is_dax() */
|
||||
|
||||
|
@ -46,9 +47,9 @@ extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
|
|||
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
unsigned long addr, pgprot_t newprot,
|
||||
int prot_numa);
|
||||
int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
|
||||
vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
|
||||
pmd_t *pmd, pfn_t pfn, bool write);
|
||||
int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
|
||||
vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
|
||||
pud_t *pud, pfn_t pfn, bool write);
|
||||
enum transparent_hugepage_flag {
|
||||
TRANSPARENT_HUGEPAGE_FLAG,
|
||||
|
|
|
@ -752,7 +752,7 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
|
|||
spin_unlock(ptl);
|
||||
}
|
||||
|
||||
int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
|
||||
vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
|
||||
pmd_t *pmd, pfn_t pfn, bool write)
|
||||
{
|
||||
pgprot_t pgprot = vma->vm_page_prot;
|
||||
|
@ -812,7 +812,7 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
|
|||
spin_unlock(ptl);
|
||||
}
|
||||
|
||||
int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
|
||||
vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
|
||||
pud_t *pud, pfn_t pfn, bool write)
|
||||
{
|
||||
pgprot_t pgprot = vma->vm_page_prot;
|
||||
|
|
Loading…
Reference in New Issue
Block a user