VM: add "vm_mmap()" helper function

This continues the theme started with vm_brk() and vm_munmap():
vm_mmap() does the same thing as do_mmap(), but additionally does the
required VM locking.

This uninlines (and rewrites it to be clearer) do_mmap(), which sadly
duplicates it in mm/mmap.c and mm/nommu.c.  But that way we don't have
to export our internal do_mmap_pgoff() function.

Some day we hopefully don't have to export do_mmap() either, if all
modular users can become the simpler vm_mmap() instead.  We're actually
very close to that already, with the notable exception of the (broken)
use in i810, and a couple of stragglers in binfmt_elf.

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Linus Torvalds 2012-04-20 17:13:58 -07:00
parent a46ef99d80
commit 6be5ceb02e
15 changed files with 87 additions and 97 deletions

View File

@ -346,12 +346,10 @@ void single_step_once(struct pt_regs *regs)
}
/* allocate a cache line of writable, executable memory */
down_write(&current->mm->mmap_sem);
buffer = (void __user *) do_mmap(NULL, 0, 64,
buffer = (void __user *) vm_mmap(NULL, 0, 64,
PROT_EXEC | PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
0);
up_write(&current->mm->mmap_sem);
if (IS_ERR((void __force *)buffer)) {
kfree(state);

View File

@ -379,26 +379,22 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
goto beyond_if;
}
down_write(&current->mm->mmap_sem);
error = do_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
error = vm_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
PROT_READ | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE |
MAP_EXECUTABLE | MAP_32BIT,
fd_offset);
up_write(&current->mm->mmap_sem);
if (error != N_TXTADDR(ex)) {
send_sig(SIGKILL, current, 0);
return error;
}
down_write(&current->mm->mmap_sem);
error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE |
MAP_EXECUTABLE | MAP_32BIT,
fd_offset + ex.a_text);
up_write(&current->mm->mmap_sem);
if (error != N_DATADDR(ex)) {
send_sig(SIGKILL, current, 0);
return error;
@ -482,12 +478,10 @@ static int load_aout_library(struct file *file)
goto out;
}
/* Now use mmap to map the library into memory. */
down_write(&current->mm->mmap_sem);
error = do_mmap(file, start_addr, ex.a_text + ex.a_data,
error = vm_mmap(file, start_addr, ex.a_text + ex.a_data,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_32BIT,
N_TXTOFF(ex));
up_write(&current->mm->mmap_sem);
retval = error;
if (error != start_addr)
goto out;

View File

@ -6336,13 +6336,11 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
if (npages && !old.rmap) {
unsigned long userspace_addr;
down_write(&current->mm->mmap_sem);
userspace_addr = do_mmap(NULL, 0,
userspace_addr = vm_mmap(NULL, 0,
npages * PAGE_SIZE,
PROT_READ | PROT_WRITE,
map_flags,
0);
up_write(&current->mm->mmap_sem);
if (IS_ERR((void *)userspace_addr))
return PTR_ERR((void *)userspace_addr);

View File

@ -1510,8 +1510,8 @@ int drm_freebufs(struct drm_device *dev, void *data,
* \param arg pointer to a drm_buf_map structure.
* \return zero on success or a negative number on failure.
*
* Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
* about each buffer into user space. For PCI buffers, it calls do_mmap() with
* Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
* about each buffer into user space. For PCI buffers, it calls vm_mmap() with
* offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
* drm_mmap_dma().
*/
@ -1553,18 +1553,14 @@ int drm_mapbufs(struct drm_device *dev, void *data,
retcode = -EINVAL;
goto done;
}
down_write(&current->mm->mmap_sem);
virtual = do_mmap(file_priv->filp, 0, map->size,
virtual = vm_mmap(file_priv->filp, 0, map->size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
token);
up_write(&current->mm->mmap_sem);
} else {
down_write(&current->mm->mmap_sem);
virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
PROT_READ | PROT_WRITE,
MAP_SHARED, 0);
up_write(&current->mm->mmap_sem);
}
if (virtual > -1024UL) {
/* Real error */

View File

@ -581,10 +581,8 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
obj->filp->f_op = &exynos_drm_gem_fops;
obj->filp->private_data = obj;
down_write(&current->mm->mmap_sem);
addr = do_mmap(obj->filp, 0, args->size,
addr = vm_mmap(obj->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED, 0);
up_write(&current->mm->mmap_sem);
drm_gem_object_unreference_unlocked(obj);

View File

@ -129,6 +129,7 @@ static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
if (buf_priv->currently_mapped == I810_BUF_MAPPED)
return -EINVAL;
/* This is all entirely broken */
down_write(&current->mm->mmap_sem);
old_fops = file_priv->filp->f_op;
file_priv->filp->f_op = &i810_buffer_fops;

View File

@ -1087,11 +1087,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
if (obj == NULL)
return -ENOENT;
down_write(&current->mm->mmap_sem);
addr = do_mmap(obj->filp, 0, args->size,
addr = vm_mmap(obj->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED,
args->offset);
up_write(&current->mm->mmap_sem);
drm_gem_object_unreference_unlocked(obj);
if (IS_ERR((void *)addr))
return addr;

View File

@ -319,24 +319,20 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
goto beyond_if;
}
down_write(&current->mm->mmap_sem);
error = do_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
error = vm_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
PROT_READ | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
fd_offset);
up_write(&current->mm->mmap_sem);
if (error != N_TXTADDR(ex)) {
send_sig(SIGKILL, current, 0);
return error;
}
down_write(&current->mm->mmap_sem);
error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
fd_offset + ex.a_text);
up_write(&current->mm->mmap_sem);
if (error != N_DATADDR(ex)) {
send_sig(SIGKILL, current, 0);
return error;
@ -417,12 +413,10 @@ static int load_aout_library(struct file *file)
goto out;
}
/* Now use mmap to map the library into memory. */
down_write(&current->mm->mmap_sem);
error = do_mmap(file, start_addr, ex.a_text + ex.a_data,
error = vm_mmap(file, start_addr, ex.a_text + ex.a_data,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
N_TXTOFF(ex));
up_write(&current->mm->mmap_sem);
retval = error;
if (error != start_addr)
goto out;

View File

@ -958,10 +958,8 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
and some applications "depend" upon this behavior.
Since we do not have the power to recompile these, we
emulate the SVr4 behavior. Sigh. */
down_write(&current->mm->mmap_sem);
error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE, 0);
up_write(&current->mm->mmap_sem);
}
#ifdef ELF_PLAT_INIT
@ -1046,8 +1044,7 @@ static int load_elf_library(struct file *file)
eppnt++;
/* Now use mmap to map the library into memory. */
down_write(&current->mm->mmap_sem);
error = do_mmap(file,
error = vm_mmap(file,
ELF_PAGESTART(eppnt->p_vaddr),
(eppnt->p_filesz +
ELF_PAGEOFFSET(eppnt->p_vaddr)),
@ -1055,7 +1052,6 @@ static int load_elf_library(struct file *file)
MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
(eppnt->p_offset -
ELF_PAGEOFFSET(eppnt->p_vaddr)));
up_write(&current->mm->mmap_sem);
if (error != ELF_PAGESTART(eppnt->p_vaddr))
goto out_free_ph;

View File

@ -390,21 +390,17 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
(executable_stack == EXSTACK_DEFAULT && VM_STACK_FLAGS & VM_EXEC))
stack_prot |= PROT_EXEC;
down_write(&current->mm->mmap_sem);
current->mm->start_brk = do_mmap(NULL, 0, stack_size, stack_prot,
current->mm->start_brk = vm_mmap(NULL, 0, stack_size, stack_prot,
MAP_PRIVATE | MAP_ANONYMOUS |
MAP_UNINITIALIZED | MAP_GROWSDOWN,
0);
if (IS_ERR_VALUE(current->mm->start_brk)) {
up_write(&current->mm->mmap_sem);
retval = current->mm->start_brk;
current->mm->start_brk = 0;
goto error_kill;
}
up_write(&current->mm->mmap_sem);
current->mm->brk = current->mm->start_brk;
current->mm->context.end_brk = current->mm->start_brk;
current->mm->context.end_brk +=
@ -955,10 +951,8 @@ static int elf_fdpic_map_file_constdisp_on_uclinux(
if (params->flags & ELF_FDPIC_FLAG_EXECUTABLE)
mflags |= MAP_EXECUTABLE;
down_write(&mm->mmap_sem);
maddr = do_mmap(NULL, load_addr, top - base,
maddr = vm_mmap(NULL, load_addr, top - base,
PROT_READ | PROT_WRITE | PROT_EXEC, mflags, 0);
up_write(&mm->mmap_sem);
if (IS_ERR_VALUE(maddr))
return (int) maddr;
@ -1096,10 +1090,8 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
/* create the mapping */
disp = phdr->p_vaddr & ~PAGE_MASK;
down_write(&mm->mmap_sem);
maddr = do_mmap(file, maddr, phdr->p_memsz + disp, prot, flags,
maddr = vm_mmap(file, maddr, phdr->p_memsz + disp, prot, flags,
phdr->p_offset - disp);
up_write(&mm->mmap_sem);
kdebug("mmap[%d] <file> sz=%lx pr=%x fl=%x of=%lx --> %08lx",
loop, phdr->p_memsz + disp, prot, flags,
@ -1143,10 +1135,8 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
unsigned long xmaddr;
flags |= MAP_FIXED | MAP_ANONYMOUS;
down_write(&mm->mmap_sem);
xmaddr = do_mmap(NULL, xaddr, excess - excess1,
xmaddr = vm_mmap(NULL, xaddr, excess - excess1,
prot, flags, 0);
up_write(&mm->mmap_sem);
kdebug("mmap[%d] <anon>"
" ad=%lx sz=%lx pr=%x fl=%x of=0 --> %08lx",

View File

@ -542,10 +542,8 @@ static int load_flat_file(struct linux_binprm * bprm,
*/
DBG_FLT("BINFMT_FLAT: ROM mapping of file (we hope)\n");
down_write(&current->mm->mmap_sem);
textpos = do_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC,
textpos = vm_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC,
MAP_PRIVATE|MAP_EXECUTABLE, 0);
up_write(&current->mm->mmap_sem);
if (!textpos || IS_ERR_VALUE(textpos)) {
if (!textpos)
textpos = (unsigned long) -ENOMEM;
@ -556,10 +554,8 @@ static int load_flat_file(struct linux_binprm * bprm,
len = data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long);
len = PAGE_ALIGN(len);
down_write(&current->mm->mmap_sem);
realdatastart = do_mmap(0, 0, len,
realdatastart = vm_mmap(0, 0, len,
PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0);
up_write(&current->mm->mmap_sem);
if (realdatastart == 0 || IS_ERR_VALUE(realdatastart)) {
if (!realdatastart)
@ -603,10 +599,8 @@ static int load_flat_file(struct linux_binprm * bprm,
len = text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long);
len = PAGE_ALIGN(len);
down_write(&current->mm->mmap_sem);
textpos = do_mmap(0, 0, len,
textpos = vm_mmap(0, 0, len,
PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0);
up_write(&current->mm->mmap_sem);
if (!textpos || IS_ERR_VALUE(textpos)) {
if (!textpos)

View File

@ -147,10 +147,8 @@ static int map_som_binary(struct file *file,
code_size = SOM_PAGEALIGN(hpuxhdr->exec_tsize);
current->mm->start_code = code_start;
current->mm->end_code = code_start + code_size;
down_write(&current->mm->mmap_sem);
retval = do_mmap(file, code_start, code_size, prot,
retval = vm_mmap(file, code_start, code_size, prot,
flags, SOM_PAGESTART(hpuxhdr->exec_tfile));
up_write(&current->mm->mmap_sem);
if (retval < 0 && retval > -1024)
goto out;
@ -158,20 +156,16 @@ static int map_som_binary(struct file *file,
data_size = SOM_PAGEALIGN(hpuxhdr->exec_dsize);
current->mm->start_data = data_start;
current->mm->end_data = bss_start = data_start + data_size;
down_write(&current->mm->mmap_sem);
retval = do_mmap(file, data_start, data_size,
retval = vm_mmap(file, data_start, data_size,
prot | PROT_WRITE, flags,
SOM_PAGESTART(hpuxhdr->exec_dfile));
up_write(&current->mm->mmap_sem);
if (retval < 0 && retval > -1024)
goto out;
som_brk = bss_start + SOM_PAGEALIGN(hpuxhdr->exec_bsize);
current->mm->start_brk = current->mm->brk = som_brk;
down_write(&current->mm->mmap_sem);
retval = do_mmap(NULL, bss_start, som_brk - bss_start,
retval = vm_mmap(NULL, bss_start, som_brk - bss_start,
prot | PROT_WRITE, MAP_FIXED | MAP_PRIVATE, 0);
up_write(&current->mm->mmap_sem);
if (retval > 0 || retval < -1024)
retval = 0;
out:

View File

@ -1393,31 +1393,20 @@ extern int install_special_mapping(struct mm_struct *mm,
extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long pgoff);
extern unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, unsigned long flags,
vm_flags_t vm_flags, unsigned long pgoff);
static inline unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long offset)
{
unsigned long ret = -EINVAL;
if ((offset + PAGE_ALIGN(len)) < offset)
goto out;
if (!(offset & ~PAGE_MASK))
ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
out:
return ret;
}
extern unsigned long do_mmap(struct file *, unsigned long,
unsigned long, unsigned long,
unsigned long, unsigned long);
extern int do_munmap(struct mm_struct *, unsigned long, size_t);
/* These take the mm semaphore themselves */
extern unsigned long vm_brk(unsigned long, unsigned long);
extern int vm_munmap(struct mm_struct *, unsigned long, size_t);
extern unsigned long vm_mmap(struct file *, unsigned long,
unsigned long, unsigned long,
unsigned long, unsigned long);
/* truncate.c */
extern void truncate_inode_pages(struct address_space *, loff_t);

View File

@ -953,7 +953,7 @@ static inline unsigned long round_hint_to_min(unsigned long hint)
* The caller must hold down_write(&current->mm->mmap_sem).
*/
unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flags, unsigned long pgoff)
{
@ -1089,7 +1089,32 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
return mmap_region(file, addr, len, flags, vm_flags, pgoff);
}
EXPORT_SYMBOL(do_mmap_pgoff);
unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long offset)
{
if (unlikely(offset + PAGE_ALIGN(len) < offset))
return -EINVAL;
if (unlikely(offset & ~PAGE_MASK))
return -EINVAL;
return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
}
EXPORT_SYMBOL(do_mmap);
unsigned long vm_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long offset)
{
unsigned long ret;
struct mm_struct *mm = current->mm;
down_write(&mm->mmap_sem);
ret = do_mmap(file, addr, len, prot, flag, offset);
up_write(&mm->mmap_sem);
return ret;
}
EXPORT_SYMBOL(vm_mmap);
SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags,

View File

@ -1233,7 +1233,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
/*
* handle mapping creation for uClinux
*/
unsigned long do_mmap_pgoff(struct file *file,
static unsigned long do_mmap_pgoff(struct file *file,
unsigned long addr,
unsigned long len,
unsigned long prot,
@ -1470,7 +1470,32 @@ unsigned long do_mmap_pgoff(struct file *file,
show_free_areas(0);
return -ENOMEM;
}
EXPORT_SYMBOL(do_mmap_pgoff);
unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long offset)
{
if (unlikely(offset + PAGE_ALIGN(len) < offset))
return -EINVAL;
if (unlikely(offset & ~PAGE_MASK))
return -EINVAL;
return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
}
EXPORT_SYMBOL(do_mmap);
unsigned long vm_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long offset)
{
unsigned long ret;
struct mm_struct *mm = current->mm;
down_write(&mm->mmap_sem);
ret = do_mmap(file, addr, len, prot, flag, offset);
up_write(&mm->mmap_sem);
return ret;
}
EXPORT_SYMBOL(vm_mmap);
SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags,