forked from luck/tmp_suning_uos_patched
e9b61f1985
This patch adds implementation of split_huge_page() for new refcountings. Unlike previous implementation, new split_huge_page() can fail if somebody holds GUP pin on the page. It also means that pin on page would prevent it from bening split under you. It makes situation in many places much cleaner. The basic scheme of split_huge_page(): - Check that sum of mapcounts of all subpage is equal to page_count() plus one (caller pin). Foll off with -EBUSY. This way we can avoid useless PMD-splits. - Freeze the page counters by splitting all PMD and setup migration PTEs. - Re-check sum of mapcounts against page_count(). Page's counts are stable now. -EBUSY if page is pinned. - Split compound page. - Unfreeze the page by removing migration entries. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Tested-by: Sasha Levin <sasha.levin@oracle.com> Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Acked-by: Jerome Marchand <jmarchan@redhat.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Rik van Riel <riel@redhat.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Steve Capper <steve.capper@linaro.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
207 lines
6.2 KiB
C
207 lines
6.2 KiB
C
#ifndef _LINUX_HUGE_MM_H
|
|
#define _LINUX_HUGE_MM_H
|
|
|
|
extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
|
|
struct vm_area_struct *vma,
|
|
unsigned long address, pmd_t *pmd,
|
|
unsigned int flags);
|
|
extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
|
|
struct vm_area_struct *vma);
|
|
extern void huge_pmd_set_accessed(struct mm_struct *mm,
|
|
struct vm_area_struct *vma,
|
|
unsigned long address, pmd_t *pmd,
|
|
pmd_t orig_pmd, int dirty);
|
|
extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
unsigned long address, pmd_t *pmd,
|
|
pmd_t orig_pmd);
|
|
extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
|
|
unsigned long addr,
|
|
pmd_t *pmd,
|
|
unsigned int flags);
|
|
extern int zap_huge_pmd(struct mmu_gather *tlb,
|
|
struct vm_area_struct *vma,
|
|
pmd_t *pmd, unsigned long addr);
|
|
extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
|
unsigned long addr, unsigned long end,
|
|
unsigned char *vec);
|
|
extern bool move_huge_pmd(struct vm_area_struct *vma,
|
|
struct vm_area_struct *new_vma,
|
|
unsigned long old_addr,
|
|
unsigned long new_addr, unsigned long old_end,
|
|
pmd_t *old_pmd, pmd_t *new_pmd);
|
|
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
|
unsigned long addr, pgprot_t newprot,
|
|
int prot_numa);
|
|
int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
|
|
unsigned long pfn, bool write);
|
|
|
|
enum transparent_hugepage_flag {
|
|
TRANSPARENT_HUGEPAGE_FLAG,
|
|
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
|
|
TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
|
|
TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
|
|
TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
|
|
TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
|
|
#ifdef CONFIG_DEBUG_VM
|
|
TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
|
|
#endif
|
|
};
|
|
|
|
extern pmd_t *page_check_address_pmd(struct page *page,
|
|
struct mm_struct *mm,
|
|
unsigned long address,
|
|
spinlock_t **ptl);
|
|
|
|
#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
|
|
#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
|
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
#define HPAGE_PMD_SHIFT PMD_SHIFT
|
|
#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
|
|
#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
|
|
|
|
extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
|
|
|
|
#define transparent_hugepage_enabled(__vma) \
|
|
((transparent_hugepage_flags & \
|
|
(1<<TRANSPARENT_HUGEPAGE_FLAG) || \
|
|
(transparent_hugepage_flags & \
|
|
(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \
|
|
((__vma)->vm_flags & VM_HUGEPAGE))) && \
|
|
!((__vma)->vm_flags & VM_NOHUGEPAGE) && \
|
|
!is_vma_temporary_stack(__vma))
|
|
#define transparent_hugepage_defrag(__vma) \
|
|
((transparent_hugepage_flags & \
|
|
(1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \
|
|
(transparent_hugepage_flags & \
|
|
(1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) && \
|
|
(__vma)->vm_flags & VM_HUGEPAGE))
|
|
#define transparent_hugepage_use_zero_page() \
|
|
(transparent_hugepage_flags & \
|
|
(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
|
|
#ifdef CONFIG_DEBUG_VM
|
|
#define transparent_hugepage_debug_cow() \
|
|
(transparent_hugepage_flags & \
|
|
(1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
|
|
#else /* CONFIG_DEBUG_VM */
|
|
#define transparent_hugepage_debug_cow() 0
|
|
#endif /* CONFIG_DEBUG_VM */
|
|
|
|
extern unsigned long transparent_hugepage_flags;
|
|
|
|
int split_huge_page_to_list(struct page *page, struct list_head *list);
|
|
static inline int split_huge_page(struct page *page)
|
|
{
|
|
return split_huge_page_to_list(page, NULL);
|
|
}
|
|
|
|
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
|
unsigned long address);
|
|
|
|
#define split_huge_pmd(__vma, __pmd, __address) \
|
|
do { \
|
|
pmd_t *____pmd = (__pmd); \
|
|
if (pmd_trans_huge(*____pmd)) \
|
|
__split_huge_pmd(__vma, __pmd, __address); \
|
|
} while (0)
|
|
|
|
#if HPAGE_PMD_ORDER >= MAX_ORDER
|
|
#error "hugepages can't be allocated by the buddy allocator"
|
|
#endif
|
|
extern int hugepage_madvise(struct vm_area_struct *vma,
|
|
unsigned long *vm_flags, int advice);
|
|
extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
|
|
unsigned long start,
|
|
unsigned long end,
|
|
long adjust_next);
|
|
extern bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
|
|
spinlock_t **ptl);
|
|
/* mmap_sem must be held on entry */
|
|
static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
|
|
spinlock_t **ptl)
|
|
{
|
|
VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
|
|
if (pmd_trans_huge(*pmd))
|
|
return __pmd_trans_huge_lock(pmd, vma, ptl);
|
|
else
|
|
return false;
|
|
}
|
|
static inline int hpage_nr_pages(struct page *page)
|
|
{
|
|
if (unlikely(PageTransHuge(page)))
|
|
return HPAGE_PMD_NR;
|
|
return 1;
|
|
}
|
|
|
|
extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
unsigned long addr, pmd_t pmd, pmd_t *pmdp);
|
|
|
|
extern struct page *huge_zero_page;
|
|
|
|
static inline bool is_huge_zero_page(struct page *page)
|
|
{
|
|
return ACCESS_ONCE(huge_zero_page) == page;
|
|
}
|
|
|
|
static inline bool is_huge_zero_pmd(pmd_t pmd)
|
|
{
|
|
return is_huge_zero_page(pmd_page(pmd));
|
|
}
|
|
|
|
struct page *get_huge_zero_page(void);
|
|
|
|
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
|
|
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
|
|
#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
|
|
|
|
#define hpage_nr_pages(x) 1
|
|
|
|
#define transparent_hugepage_enabled(__vma) 0
|
|
|
|
#define transparent_hugepage_flags 0UL
|
|
static inline int
|
|
split_huge_page_to_list(struct page *page, struct list_head *list)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline int split_huge_page(struct page *page)
|
|
{
|
|
return 0;
|
|
}
|
|
#define split_huge_pmd(__vma, __pmd, __address) \
|
|
do { } while (0)
|
|
static inline int hugepage_madvise(struct vm_area_struct *vma,
|
|
unsigned long *vm_flags, int advice)
|
|
{
|
|
BUG();
|
|
return 0;
|
|
}
|
|
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
|
|
unsigned long start,
|
|
unsigned long end,
|
|
long adjust_next)
|
|
{
|
|
}
|
|
static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
|
|
spinlock_t **ptl)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
unsigned long addr, pmd_t pmd, pmd_t *pmdp)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline bool is_huge_zero_page(struct page *page)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
|
|
#endif /* _LINUX_HUGE_MM_H */
|