forked from luck/tmp_suning_uos_patched
mm: consolidate the get_user_pages* implementations
Always build mm/gup.c so that we don't have to provide separate nommu stubs. Also merge the get_user_pages_fast and __get_user_pages_fast stubs when HAVE_FAST_GUP into the main implementations, which will never call the fast path if HAVE_FAST_GUP is not set. This also ensures the new put_user_pages* helpers are available for nommu, as those are currently missing, which would create a problem as soon as we actually grew users for it. Link: http://lkml.kernel.org/r/20190625143715.1689-13-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Cc: Andrey Konovalov <andreyknvl@google.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Miller <davem@davemloft.net> Cc: James Hogan <jhogan@kernel.org> Cc: Jason Gunthorpe <jgg@mellanox.com> Cc: Khalid Aziz <khalid.aziz@oracle.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Paul Burton <paul.burton@mips.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Rich Felker <dalias@libc.org> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
d3649f68b4
commit
050a9adc64
|
@ -133,6 +133,7 @@ config HAVE_MEMBLOCK_PHYS_MAP
|
||||||
bool
|
bool
|
||||||
|
|
||||||
config HAVE_FAST_GUP
|
config HAVE_FAST_GUP
|
||||||
|
depends on MMU
|
||||||
bool
|
bool
|
||||||
|
|
||||||
config ARCH_KEEP_MEMBLOCK
|
config ARCH_KEEP_MEMBLOCK
|
||||||
|
|
|
@ -22,7 +22,7 @@ KCOV_INSTRUMENT_mmzone.o := n
|
||||||
KCOV_INSTRUMENT_vmstat.o := n
|
KCOV_INSTRUMENT_vmstat.o := n
|
||||||
|
|
||||||
mmu-y := nommu.o
|
mmu-y := nommu.o
|
||||||
mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \
|
mmu-$(CONFIG_MMU) := highmem.o memory.o mincore.o \
|
||||||
mlock.o mmap.o mmu_gather.o mprotect.o mremap.o \
|
mlock.o mmap.o mmu_gather.o mprotect.o mremap.o \
|
||||||
msync.o page_vma_mapped.o pagewalk.o \
|
msync.o page_vma_mapped.o pagewalk.o \
|
||||||
pgtable-generic.o rmap.o vmalloc.o
|
pgtable-generic.o rmap.o vmalloc.o
|
||||||
|
@ -39,7 +39,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
|
||||||
mm_init.o mmu_context.o percpu.o slab_common.o \
|
mm_init.o mmu_context.o percpu.o slab_common.o \
|
||||||
compaction.o vmacache.o \
|
compaction.o vmacache.o \
|
||||||
interval_tree.o list_lru.o workingset.o \
|
interval_tree.o list_lru.o workingset.o \
|
||||||
debug.o $(mmu-y)
|
debug.o gup.o $(mmu-y)
|
||||||
|
|
||||||
# Give 'page_alloc' its own module-parameter namespace
|
# Give 'page_alloc' its own module-parameter namespace
|
||||||
page-alloc-y := page_alloc.o
|
page-alloc-y := page_alloc.o
|
||||||
|
|
67
mm/gup.c
67
mm/gup.c
|
@ -134,6 +134,7 @@ void put_user_pages(struct page **pages, unsigned long npages)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(put_user_pages);
|
EXPORT_SYMBOL(put_user_pages);
|
||||||
|
|
||||||
|
#ifdef CONFIG_MMU
|
||||||
static struct page *no_page_table(struct vm_area_struct *vma,
|
static struct page *no_page_table(struct vm_area_struct *vma,
|
||||||
unsigned int flags)
|
unsigned int flags)
|
||||||
{
|
{
|
||||||
|
@ -1322,6 +1323,51 @@ struct page *get_dump_page(unsigned long addr)
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_ELF_CORE */
|
#endif /* CONFIG_ELF_CORE */
|
||||||
|
#else /* CONFIG_MMU */
|
||||||
|
static long __get_user_pages_locked(struct task_struct *tsk,
|
||||||
|
struct mm_struct *mm, unsigned long start,
|
||||||
|
unsigned long nr_pages, struct page **pages,
|
||||||
|
struct vm_area_struct **vmas, int *locked,
|
||||||
|
unsigned int foll_flags)
|
||||||
|
{
|
||||||
|
struct vm_area_struct *vma;
|
||||||
|
unsigned long vm_flags;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
/* calculate required read or write permissions.
|
||||||
|
* If FOLL_FORCE is set, we only require the "MAY" flags.
|
||||||
|
*/
|
||||||
|
vm_flags = (foll_flags & FOLL_WRITE) ?
|
||||||
|
(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
|
||||||
|
vm_flags &= (foll_flags & FOLL_FORCE) ?
|
||||||
|
(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
|
||||||
|
|
||||||
|
for (i = 0; i < nr_pages; i++) {
|
||||||
|
vma = find_vma(mm, start);
|
||||||
|
if (!vma)
|
||||||
|
goto finish_or_fault;
|
||||||
|
|
||||||
|
/* protect what we can, including chardevs */
|
||||||
|
if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
|
||||||
|
!(vm_flags & vma->vm_flags))
|
||||||
|
goto finish_or_fault;
|
||||||
|
|
||||||
|
if (pages) {
|
||||||
|
pages[i] = virt_to_page(start);
|
||||||
|
if (pages[i])
|
||||||
|
get_page(pages[i]);
|
||||||
|
}
|
||||||
|
if (vmas)
|
||||||
|
vmas[i] = vma;
|
||||||
|
start = (start + PAGE_SIZE) & PAGE_MASK;
|
||||||
|
}
|
||||||
|
|
||||||
|
return i;
|
||||||
|
|
||||||
|
finish_or_fault:
|
||||||
|
return i ? : -EFAULT;
|
||||||
|
}
|
||||||
|
#endif /* !CONFIG_MMU */
|
||||||
|
|
||||||
#if defined(CONFIG_FS_DAX) || defined (CONFIG_CMA)
|
#if defined(CONFIG_FS_DAX) || defined (CONFIG_CMA)
|
||||||
static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
|
static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
|
||||||
|
@ -1484,7 +1530,7 @@ static long check_and_migrate_cma_pages(struct task_struct *tsk,
|
||||||
{
|
{
|
||||||
return nr_pages;
|
return nr_pages;
|
||||||
}
|
}
|
||||||
#endif
|
#endif /* CONFIG_CMA */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
|
* __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
|
||||||
|
@ -2160,6 +2206,12 @@ static void gup_pgd_range(unsigned long addr, unsigned long end,
|
||||||
return;
|
return;
|
||||||
} while (pgdp++, addr = next, addr != end);
|
} while (pgdp++, addr = next, addr != end);
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
static inline void gup_pgd_range(unsigned long addr, unsigned long end,
|
||||||
|
unsigned int flags, struct page **pages, int *nr)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_HAVE_FAST_GUP */
|
||||||
|
|
||||||
#ifndef gup_fast_permitted
|
#ifndef gup_fast_permitted
|
||||||
/*
|
/*
|
||||||
|
@ -2177,6 +2229,9 @@ static bool gup_fast_permitted(unsigned long start, unsigned long end)
|
||||||
* the regular GUP.
|
* the regular GUP.
|
||||||
* Note a difference with get_user_pages_fast: this always returns the
|
* Note a difference with get_user_pages_fast: this always returns the
|
||||||
* number of pages pinned, 0 if no pages were pinned.
|
* number of pages pinned, 0 if no pages were pinned.
|
||||||
|
*
|
||||||
|
* If the architecture does not support this function, simply return with no
|
||||||
|
* pages pinned.
|
||||||
*/
|
*/
|
||||||
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||||
struct page **pages)
|
struct page **pages)
|
||||||
|
@ -2206,7 +2261,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||||
* block IPIs that come from THPs splitting.
|
* block IPIs that come from THPs splitting.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (gup_fast_permitted(start, end)) {
|
if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) &&
|
||||||
|
gup_fast_permitted(start, end)) {
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
gup_pgd_range(start, end, write ? FOLL_WRITE : 0, pages, &nr);
|
gup_pgd_range(start, end, write ? FOLL_WRITE : 0, pages, &nr);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
@ -2214,6 +2270,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||||
|
|
||||||
return nr;
|
return nr;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(__get_user_pages_fast);
|
||||||
|
|
||||||
static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
|
static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
|
||||||
unsigned int gup_flags, struct page **pages)
|
unsigned int gup_flags, struct page **pages)
|
||||||
|
@ -2270,7 +2327,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages,
|
||||||
if (unlikely(!access_ok((void __user *)start, len)))
|
if (unlikely(!access_ok((void __user *)start, len)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
if (gup_fast_permitted(start, end)) {
|
if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) &&
|
||||||
|
gup_fast_permitted(start, end)) {
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
gup_pgd_range(addr, end, gup_flags, pages, &nr);
|
gup_pgd_range(addr, end, gup_flags, pages, &nr);
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
@ -2296,5 +2354,4 @@ int get_user_pages_fast(unsigned long start, int nr_pages,
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(get_user_pages_fast);
|
||||||
#endif /* CONFIG_HAVE_GENERIC_GUP */
|
|
||||||
|
|
88
mm/nommu.c
88
mm/nommu.c
|
@ -111,94 +111,6 @@ unsigned int kobjsize(const void *objp)
|
||||||
return PAGE_SIZE << compound_order(page);
|
return PAGE_SIZE << compound_order(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|
||||||
unsigned long start, unsigned long nr_pages,
|
|
||||||
unsigned int foll_flags, struct page **pages,
|
|
||||||
struct vm_area_struct **vmas, int *nonblocking)
|
|
||||||
{
|
|
||||||
struct vm_area_struct *vma;
|
|
||||||
unsigned long vm_flags;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
/* calculate required read or write permissions.
|
|
||||||
* If FOLL_FORCE is set, we only require the "MAY" flags.
|
|
||||||
*/
|
|
||||||
vm_flags = (foll_flags & FOLL_WRITE) ?
|
|
||||||
(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
|
|
||||||
vm_flags &= (foll_flags & FOLL_FORCE) ?
|
|
||||||
(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
|
|
||||||
|
|
||||||
for (i = 0; i < nr_pages; i++) {
|
|
||||||
vma = find_vma(mm, start);
|
|
||||||
if (!vma)
|
|
||||||
goto finish_or_fault;
|
|
||||||
|
|
||||||
/* protect what we can, including chardevs */
|
|
||||||
if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
|
|
||||||
!(vm_flags & vma->vm_flags))
|
|
||||||
goto finish_or_fault;
|
|
||||||
|
|
||||||
if (pages) {
|
|
||||||
pages[i] = virt_to_page(start);
|
|
||||||
if (pages[i])
|
|
||||||
get_page(pages[i]);
|
|
||||||
}
|
|
||||||
if (vmas)
|
|
||||||
vmas[i] = vma;
|
|
||||||
start = (start + PAGE_SIZE) & PAGE_MASK;
|
|
||||||
}
|
|
||||||
|
|
||||||
return i;
|
|
||||||
|
|
||||||
finish_or_fault:
|
|
||||||
return i ? : -EFAULT;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* get a list of pages in an address range belonging to the specified process
|
|
||||||
* and indicate the VMA that covers each page
|
|
||||||
* - this is potentially dodgy as we may end incrementing the page count of a
|
|
||||||
* slab page or a secondary page from a compound page
|
|
||||||
* - don't permit access to VMAs that don't support it, such as I/O mappings
|
|
||||||
*/
|
|
||||||
long get_user_pages(unsigned long start, unsigned long nr_pages,
|
|
||||||
unsigned int gup_flags, struct page **pages,
|
|
||||||
struct vm_area_struct **vmas)
|
|
||||||
{
|
|
||||||
return __get_user_pages(current, current->mm, start, nr_pages,
|
|
||||||
gup_flags, pages, vmas, NULL);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(get_user_pages);
|
|
||||||
|
|
||||||
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
|
|
||||||
unsigned int gup_flags, struct page **pages,
|
|
||||||
int *locked)
|
|
||||||
{
|
|
||||||
return get_user_pages(start, nr_pages, gup_flags, pages, NULL);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(get_user_pages_locked);
|
|
||||||
|
|
||||||
static long __get_user_pages_unlocked(struct task_struct *tsk,
|
|
||||||
struct mm_struct *mm, unsigned long start,
|
|
||||||
unsigned long nr_pages, struct page **pages,
|
|
||||||
unsigned int gup_flags)
|
|
||||||
{
|
|
||||||
long ret;
|
|
||||||
down_read(&mm->mmap_sem);
|
|
||||||
ret = __get_user_pages(tsk, mm, start, nr_pages, gup_flags, pages,
|
|
||||||
NULL, NULL);
|
|
||||||
up_read(&mm->mmap_sem);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
|
|
||||||
struct page **pages, unsigned int gup_flags)
|
|
||||||
{
|
|
||||||
return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
|
|
||||||
pages, gup_flags);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(get_user_pages_unlocked);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* follow_pfn - look up PFN at a user virtual address
|
* follow_pfn - look up PFN at a user virtual address
|
||||||
* @vma: memory mapping
|
* @vma: memory mapping
|
||||||
|
|
47
mm/util.c
47
mm/util.c
|
@ -300,53 +300,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
|
||||||
* Like get_user_pages_fast() except its IRQ-safe in that it won't fall
|
|
||||||
* back to the regular GUP.
|
|
||||||
* Note a difference with get_user_pages_fast: this always returns the
|
|
||||||
* number of pages pinned, 0 if no pages were pinned.
|
|
||||||
* If the architecture does not support this function, simply return with no
|
|
||||||
* pages pinned.
|
|
||||||
*/
|
|
||||||
int __weak __get_user_pages_fast(unsigned long start,
|
|
||||||
int nr_pages, int write, struct page **pages)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(__get_user_pages_fast);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* get_user_pages_fast() - pin user pages in memory
|
|
||||||
* @start: starting user address
|
|
||||||
* @nr_pages: number of pages from start to pin
|
|
||||||
* @gup_flags: flags modifying pin behaviour
|
|
||||||
* @pages: array that receives pointers to the pages pinned.
|
|
||||||
* Should be at least nr_pages long.
|
|
||||||
*
|
|
||||||
* get_user_pages_fast provides equivalent functionality to get_user_pages,
|
|
||||||
* operating on current and current->mm, with force=0 and vma=NULL. However
|
|
||||||
* unlike get_user_pages, it must be called without mmap_sem held.
|
|
||||||
*
|
|
||||||
* get_user_pages_fast may take mmap_sem and page table locks, so no
|
|
||||||
* assumptions can be made about lack of locking. get_user_pages_fast is to be
|
|
||||||
* implemented in a way that is advantageous (vs get_user_pages()) when the
|
|
||||||
* user memory area is already faulted in and present in ptes. However if the
|
|
||||||
* pages have to be faulted in, it may turn out to be slightly slower so
|
|
||||||
* callers need to carefully consider what to use. On many architectures,
|
|
||||||
* get_user_pages_fast simply falls back to get_user_pages.
|
|
||||||
*
|
|
||||||
* Return: number of pages pinned. This may be fewer than the number
|
|
||||||
* requested. If nr_pages is 0 or negative, returns 0. If no pages
|
|
||||||
* were pinned, returns -errno.
|
|
||||||
*/
|
|
||||||
int __weak get_user_pages_fast(unsigned long start,
|
|
||||||
int nr_pages, unsigned int gup_flags,
|
|
||||||
struct page **pages)
|
|
||||||
{
|
|
||||||
return get_user_pages_unlocked(start, nr_pages, pages, gup_flags);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(get_user_pages_fast);
|
|
||||||
|
|
||||||
unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
|
unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
|
||||||
unsigned long len, unsigned long prot,
|
unsigned long len, unsigned long prot,
|
||||||
unsigned long flag, unsigned long pgoff)
|
unsigned long flag, unsigned long pgoff)
|
||||||
|
|
Loading…
Reference in New Issue
Block a user