forked from luck/tmp_suning_uos_patched
mm: use VM_BUG_ON_MM where possible
Dump the contents of the relevant struct_mm when we hit the bug condition. Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
31c9afa6db
commit
96dad67ff2
|
@ -601,9 +601,8 @@ static void check_mm(struct mm_struct *mm)
|
|||
printk(KERN_ALERT "BUG: Bad rss-counter state "
|
||||
"mm:%p idx:%d val:%ld\n", mm, i, x);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
|
||||
VM_BUG_ON(mm->pmd_huge_pte);
|
||||
VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -1634,7 +1634,7 @@ static int prctl_set_mm_exe_file_locked(struct mm_struct *mm, unsigned int fd)
|
|||
struct inode *inode;
|
||||
int err;
|
||||
|
||||
VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
|
||||
VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
|
||||
|
||||
exe = fdget(fd);
|
||||
if (!exe.file)
|
||||
|
|
|
@ -2048,7 +2048,7 @@ int __khugepaged_enter(struct mm_struct *mm)
|
|||
return -ENOMEM;
|
||||
|
||||
/* __khugepaged_exit() must not run from under us */
|
||||
VM_BUG_ON(khugepaged_test_exit(mm));
|
||||
VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
|
||||
if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
|
||||
free_mm_slot(mm_slot);
|
||||
return 0;
|
||||
|
|
|
@ -235,7 +235,7 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma,
|
|||
VM_BUG_ON(end & ~PAGE_MASK);
|
||||
VM_BUG_ON_VMA(start < vma->vm_start, vma);
|
||||
VM_BUG_ON_VMA(end > vma->vm_end, vma);
|
||||
VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
|
||||
VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
|
||||
|
||||
gup_flags = FOLL_TOUCH | FOLL_MLOCK;
|
||||
/*
|
||||
|
|
|
@ -410,8 +410,9 @@ static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore)
|
|||
for (nd = rb_first(root); nd; nd = rb_next(nd)) {
|
||||
struct vm_area_struct *vma;
|
||||
vma = rb_entry(nd, struct vm_area_struct, vm_rb);
|
||||
BUG_ON(vma != ignore &&
|
||||
vma->rb_subtree_gap != vma_compute_subtree_gap(vma));
|
||||
VM_BUG_ON_VMA(vma != ignore &&
|
||||
vma->rb_subtree_gap != vma_compute_subtree_gap(vma),
|
||||
vma);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -448,7 +449,7 @@ static void validate_mm(struct mm_struct *mm)
|
|||
pr_emerg("map_count %d rb %d\n", mm->map_count, i);
|
||||
bug = 1;
|
||||
}
|
||||
BUG_ON(bug);
|
||||
VM_BUG_ON_MM(bug, mm);
|
||||
}
|
||||
#else
|
||||
#define validate_mm_rb(root, ignore) do { } while (0)
|
||||
|
|
|
@ -177,7 +177,7 @@ int walk_page_range(unsigned long addr, unsigned long end,
|
|||
if (!walk->mm)
|
||||
return -EINVAL;
|
||||
|
||||
VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
|
||||
VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm);
|
||||
|
||||
pgd = pgd_offset(walk->mm, addr);
|
||||
do {
|
||||
|
|
Loading…
Reference in New Issue
Block a user