forked from luck/tmp_suning_uos_patched
mm: oom: let oom_reap_task and exit_mmap run concurrently
This is purely required because exit_aio() may block and exit_mmap() may
never start, if the oom_reap_task cannot start running on a mm with
mm_users == 0.
At the same time if the OOM reaper doesn't wait at all for the memory of
the current OOM candidate to be freed by exit_mmap->unmap_vmas, it would
generate a spurious OOM kill.
If it wasn't because of the exit_aio or similar blocking functions in
the last mmput, it would be enough to change the oom_reap_task() in the
case it finds mm_users == 0, to wait for a timeout or to wait for
__mmput to set MMF_OOM_SKIP itself, but it's not just exit_mmap the
problem here so the concurrency of exit_mmap and oom_reap_task is
apparently warranted.
It's a non standard runtime, exit_mmap() runs without mmap_sem, and
oom_reap_task runs with the mmap_sem for reading as usual (kind of
MADV_DONTNEED).
The race between the two is solved with a combination of
tsk_is_oom_victim() (serialized by task_lock) and MMF_OOM_SKIP
(serialized by a dummy down_write/up_write cycle on the same lines of
the ksm_exit method).
If the oom_reap_task() may be running concurrently during exit_mmap,
exit_mmap will wait it to finish in down_write (before taking down mm
structures that would make the oom_reap_task fail with use after free).
If exit_mmap comes first, oom_reap_task() will skip the mm if
MMF_OOM_SKIP is already set and in turn all memory is already freed and
furthermore the mm data structures may already have been taken down by
free_pgtables.
[aarcange@redhat.com: incremental one liner]
Link: http://lkml.kernel.org/r/20170726164319.GC29716@redhat.com
[rientjes@google.com: remove unused mmput_async]
Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1708141733130.50317@chino.kir.corp.google.com
[aarcange@redhat.com: microoptimization]
Link: http://lkml.kernel.org/r/20170817171240.GB5066@redhat.com
Link: http://lkml.kernel.org/r/20170726162912.GA29716@redhat.com
Fixes: 26db62f179
("oom: keep mm of the killed task available")
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: David Rientjes <rientjes@google.com>
Reported-by: David Rientjes <rientjes@google.com>
Tested-by: David Rientjes <rientjes@google.com>
Reviewed-by: Michal Hocko <mhocko@suse.com>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
a2468cc9bf
commit
2129258024
|
@ -84,12 +84,6 @@ static inline bool mmget_not_zero(struct mm_struct *mm)
|
||||||
|
|
||||||
/* mmput gets rid of the mappings and all user-space */
|
/* mmput gets rid of the mappings and all user-space */
|
||||||
extern void mmput(struct mm_struct *);
|
extern void mmput(struct mm_struct *);
|
||||||
#ifdef CONFIG_MMU
|
|
||||||
/* same as above but performs the slow path from the async context. Can
|
|
||||||
* be called from the atomic context as well
|
|
||||||
*/
|
|
||||||
extern void mmput_async(struct mm_struct *);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Grab a reference to a task's mm, if it is not already going away */
|
/* Grab a reference to a task's mm, if it is not already going away */
|
||||||
extern struct mm_struct *get_task_mm(struct task_struct *task);
|
extern struct mm_struct *get_task_mm(struct task_struct *task);
|
||||||
|
|
|
@ -922,7 +922,6 @@ static inline void __mmput(struct mm_struct *mm)
|
||||||
}
|
}
|
||||||
if (mm->binfmt)
|
if (mm->binfmt)
|
||||||
module_put(mm->binfmt->module);
|
module_put(mm->binfmt->module);
|
||||||
set_bit(MMF_OOM_SKIP, &mm->flags);
|
|
||||||
mmdrop(mm);
|
mmdrop(mm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -938,22 +937,6 @@ void mmput(struct mm_struct *mm)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mmput);
|
EXPORT_SYMBOL_GPL(mmput);
|
||||||
|
|
||||||
#ifdef CONFIG_MMU
|
|
||||||
static void mmput_async_fn(struct work_struct *work)
|
|
||||||
{
|
|
||||||
struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
|
|
||||||
__mmput(mm);
|
|
||||||
}
|
|
||||||
|
|
||||||
void mmput_async(struct mm_struct *mm)
|
|
||||||
{
|
|
||||||
if (atomic_dec_and_test(&mm->mm_users)) {
|
|
||||||
INIT_WORK(&mm->async_put_work, mmput_async_fn);
|
|
||||||
schedule_work(&mm->async_put_work);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* set_mm_exe_file - change a reference to the mm's executable file
|
* set_mm_exe_file - change a reference to the mm's executable file
|
||||||
*
|
*
|
||||||
|
|
18
mm/mmap.c
18
mm/mmap.c
|
@ -44,6 +44,7 @@
|
||||||
#include <linux/userfaultfd_k.h>
|
#include <linux/userfaultfd_k.h>
|
||||||
#include <linux/moduleparam.h>
|
#include <linux/moduleparam.h>
|
||||||
#include <linux/pkeys.h>
|
#include <linux/pkeys.h>
|
||||||
|
#include <linux/oom.h>
|
||||||
|
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
|
@ -3001,6 +3002,23 @@ void exit_mmap(struct mm_struct *mm)
|
||||||
/* Use -1 here to ensure all VMAs in the mm are unmapped */
|
/* Use -1 here to ensure all VMAs in the mm are unmapped */
|
||||||
unmap_vmas(&tlb, vma, 0, -1);
|
unmap_vmas(&tlb, vma, 0, -1);
|
||||||
|
|
||||||
|
set_bit(MMF_OOM_SKIP, &mm->flags);
|
||||||
|
if (unlikely(tsk_is_oom_victim(current))) {
|
||||||
|
/*
|
||||||
|
* Wait for oom_reap_task() to stop working on this
|
||||||
|
* mm. Because MMF_OOM_SKIP is already set before
|
||||||
|
* calling down_read(), oom_reap_task() will not run
|
||||||
|
* on this "mm" post up_write().
|
||||||
|
*
|
||||||
|
* tsk_is_oom_victim() cannot be set from under us
|
||||||
|
* either because current->mm is already set to NULL
|
||||||
|
* under task_lock before calling mmput and oom_mm is
|
||||||
|
* set not NULL by the OOM killer only if current->mm
|
||||||
|
* is found not NULL while holding the task_lock.
|
||||||
|
*/
|
||||||
|
down_write(&mm->mmap_sem);
|
||||||
|
up_write(&mm->mmap_sem);
|
||||||
|
}
|
||||||
free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
|
free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
|
||||||
tlb_finish_mmu(&tlb, 0, -1);
|
tlb_finish_mmu(&tlb, 0, -1);
|
||||||
|
|
||||||
|
|
|
@ -495,11 +495,12 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* increase mm_users only after we know we will reap something so
|
* MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
|
||||||
* that the mmput_async is called only when we have reaped something
|
* work on the mm anymore. The check for MMF_OOM_SKIP must run
|
||||||
* and delayed __mmput doesn't matter that much
|
* under mmap_sem for reading because it serializes against the
|
||||||
|
* down_write();up_write() cycle in exit_mmap().
|
||||||
*/
|
*/
|
||||||
if (!mmget_not_zero(mm)) {
|
if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
|
||||||
up_read(&mm->mmap_sem);
|
up_read(&mm->mmap_sem);
|
||||||
trace_skip_task_reaping(tsk->pid);
|
trace_skip_task_reaping(tsk->pid);
|
||||||
goto unlock_oom;
|
goto unlock_oom;
|
||||||
|
@ -542,12 +543,6 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
|
||||||
K(get_mm_counter(mm, MM_SHMEMPAGES)));
|
K(get_mm_counter(mm, MM_SHMEMPAGES)));
|
||||||
up_read(&mm->mmap_sem);
|
up_read(&mm->mmap_sem);
|
||||||
|
|
||||||
/*
|
|
||||||
* Drop our reference but make sure the mmput slow path is called from a
|
|
||||||
* different context because we shouldn't risk we get stuck there and
|
|
||||||
* put the oom_reaper out of the way.
|
|
||||||
*/
|
|
||||||
mmput_async(mm);
|
|
||||||
trace_finish_task_reaping(tsk->pid);
|
trace_finish_task_reaping(tsk->pid);
|
||||||
unlock_oom:
|
unlock_oom:
|
||||||
mutex_unlock(&oom_lock);
|
mutex_unlock(&oom_lock);
|
||||||
|
|
Loading…
Reference in New Issue
Block a user