forked from luck/tmp_suning_uos_patched
Merge branch 'akpm' (Fixes from Andrew)
Merge misc fixes from Andrew Morton: "Seven fixes, some of them fingers-crossed :(" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (7 patches) drivers/rtc/rtc-tps65910.c: fix invalid pointer access on _remove() mm: soft offline: split thp at the beginning of soft_offline_page() mm: avoid waking kswapd for THP allocations when compaction is deferred or contended revert "Revert "mm: remove __GFP_NO_KSWAPD"" mm: vmscan: fix endless loop in kswapd balancing mm/vmemmap: fix wrong use of virt_to_page mm: compaction: fix return value of capture_free_page()
This commit is contained in:
commit
50a53bbe12
@ -1077,8 +1077,7 @@ EXPORT_SYMBOL_GPL(mtd_writev);
|
||||
* until the request succeeds or until the allocation size falls below
|
||||
* the system page size. This attempts to make sure it does not adversely
|
||||
* impact system performance, so when allocating more than one page, we
|
||||
* ask the memory allocator to avoid re-trying, swapping, writing back
|
||||
* or performing I/O.
|
||||
* ask the memory allocator to avoid re-trying.
|
||||
*
|
||||
* Note, this function also makes sure that the allocated buffer is aligned to
|
||||
* the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
|
||||
@ -1092,8 +1091,7 @@ EXPORT_SYMBOL_GPL(mtd_writev);
|
||||
*/
|
||||
void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
|
||||
{
|
||||
gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
|
||||
__GFP_NORETRY | __GFP_NO_KSWAPD;
|
||||
gfp_t flags = __GFP_NOWARN | __GFP_WAIT | __GFP_NORETRY;
|
||||
size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
|
||||
void *kbuf;
|
||||
|
||||
|
@ -288,11 +288,11 @@ static int __devinit tps65910_rtc_probe(struct platform_device *pdev)
|
||||
static int __devexit tps65910_rtc_remove(struct platform_device *pdev)
|
||||
{
|
||||
/* leave rtc running, but disable irqs */
|
||||
struct rtc_device *rtc = platform_get_drvdata(pdev);
|
||||
struct tps65910_rtc *tps_rtc = platform_get_drvdata(pdev);
|
||||
|
||||
tps65910_rtc_alarm_irq_enable(&rtc->dev, 0);
|
||||
tps65910_rtc_alarm_irq_enable(&pdev->dev, 0);
|
||||
|
||||
rtc_device_unregister(rtc);
|
||||
rtc_device_unregister(tps_rtc->rtc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -30,10 +30,9 @@ struct vm_area_struct;
|
||||
#define ___GFP_HARDWALL 0x20000u
|
||||
#define ___GFP_THISNODE 0x40000u
|
||||
#define ___GFP_RECLAIMABLE 0x80000u
|
||||
#define ___GFP_NOTRACK 0x200000u
|
||||
#define ___GFP_NO_KSWAPD 0x400000u
|
||||
#define ___GFP_OTHER_NODE 0x800000u
|
||||
#define ___GFP_WRITE 0x1000000u
|
||||
#define ___GFP_NOTRACK 0x100000u
|
||||
#define ___GFP_OTHER_NODE 0x200000u
|
||||
#define ___GFP_WRITE 0x400000u
|
||||
|
||||
/*
|
||||
* GFP bitmasks..
|
||||
@ -86,7 +85,6 @@ struct vm_area_struct;
|
||||
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
|
||||
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
|
||||
|
||||
#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
|
||||
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
|
||||
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
|
||||
|
||||
@ -96,7 +94,7 @@ struct vm_area_struct;
|
||||
*/
|
||||
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
|
||||
|
||||
#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
|
||||
#define __GFP_BITS_SHIFT 23 /* Room for N __GFP_FOO bits */
|
||||
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
|
||||
|
||||
/* This equals 0, but use constants in case they ever change */
|
||||
@ -116,8 +114,7 @@ struct vm_area_struct;
|
||||
__GFP_MOVABLE)
|
||||
#define GFP_IOFS (__GFP_IO | __GFP_FS)
|
||||
#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
|
||||
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
|
||||
__GFP_NO_KSWAPD)
|
||||
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN)
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
|
||||
|
@ -36,7 +36,6 @@
|
||||
{(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \
|
||||
{(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \
|
||||
{(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \
|
||||
{(unsigned long)__GFP_NO_KSWAPD, "GFP_NO_KSWAPD"}, \
|
||||
{(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \
|
||||
) : "GFP_NOWAIT"
|
||||
|
||||
|
@ -1476,9 +1476,17 @@ int soft_offline_page(struct page *page, int flags)
|
||||
{
|
||||
int ret;
|
||||
unsigned long pfn = page_to_pfn(page);
|
||||
struct page *hpage = compound_trans_head(page);
|
||||
|
||||
if (PageHuge(page))
|
||||
return soft_offline_huge_page(page, flags);
|
||||
if (PageTransHuge(hpage)) {
|
||||
if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) {
|
||||
pr_info("soft offline: %#lx: failed to split THP\n",
|
||||
pfn);
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
|
||||
ret = get_any_page(page, pfn, flags);
|
||||
if (ret < 0)
|
||||
|
@ -1422,7 +1422,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
|
||||
}
|
||||
}
|
||||
|
||||
return 1UL << order;
|
||||
return 1UL << alloc_order;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2378,6 +2378,15 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
|
||||
return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
|
||||
}
|
||||
|
||||
/* Returns true if the allocation is likely for THP */
|
||||
static bool is_thp_alloc(gfp_t gfp_mask, unsigned int order)
|
||||
{
|
||||
if (order == pageblock_order &&
|
||||
(gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline struct page *
|
||||
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
||||
struct zonelist *zonelist, enum zone_type high_zoneidx,
|
||||
@ -2416,9 +2425,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
||||
goto nopage;
|
||||
|
||||
restart:
|
||||
if (!(gfp_mask & __GFP_NO_KSWAPD))
|
||||
/* The decision whether to wake kswapd for THP is made later */
|
||||
if (!is_thp_alloc(gfp_mask, order))
|
||||
wake_all_kswapd(order, zonelist, high_zoneidx,
|
||||
zone_idx(preferred_zone));
|
||||
zone_idx(preferred_zone));
|
||||
|
||||
/*
|
||||
* OK, we're below the kswapd watermark and have kicked background
|
||||
@ -2488,15 +2498,21 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
||||
goto got_pg;
|
||||
sync_migration = true;
|
||||
|
||||
/*
|
||||
* If compaction is deferred for high-order allocations, it is because
|
||||
* sync compaction recently failed. In this is the case and the caller
|
||||
* requested a movable allocation that does not heavily disrupt the
|
||||
* system then fail the allocation instead of entering direct reclaim.
|
||||
*/
|
||||
if ((deferred_compaction || contended_compaction) &&
|
||||
(gfp_mask & __GFP_NO_KSWAPD))
|
||||
goto nopage;
|
||||
if (is_thp_alloc(gfp_mask, order)) {
|
||||
/*
|
||||
* If compaction is deferred for high-order allocations, it is
|
||||
* because sync compaction recently failed. If this is the case
|
||||
* and the caller requested a movable allocation that does not
|
||||
* heavily disrupt the system then fail the allocation instead
|
||||
* of entering direct reclaim.
|
||||
*/
|
||||
if (deferred_compaction || contended_compaction)
|
||||
goto nopage;
|
||||
|
||||
/* If process is willing to reclaim/compact then wake kswapd */
|
||||
wake_all_kswapd(order, zonelist, high_zoneidx,
|
||||
zone_idx(preferred_zone));
|
||||
}
|
||||
|
||||
/* Try direct reclaim and then allocating */
|
||||
page = __alloc_pages_direct_reclaim(gfp_mask, order,
|
||||
|
10
mm/sparse.c
10
mm/sparse.c
@ -617,7 +617,7 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
|
||||
{
|
||||
return; /* XXX: Not implemented yet */
|
||||
}
|
||||
static void free_map_bootmem(struct page *page, unsigned long nr_pages)
|
||||
static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
|
||||
{
|
||||
}
|
||||
#else
|
||||
@ -658,10 +658,11 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
|
||||
get_order(sizeof(struct page) * nr_pages));
|
||||
}
|
||||
|
||||
static void free_map_bootmem(struct page *page, unsigned long nr_pages)
|
||||
static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
|
||||
{
|
||||
unsigned long maps_section_nr, removing_section_nr, i;
|
||||
unsigned long magic;
|
||||
struct page *page = virt_to_page(memmap);
|
||||
|
||||
for (i = 0; i < nr_pages; i++, page++) {
|
||||
magic = (unsigned long) page->lru.next;
|
||||
@ -710,13 +711,10 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
|
||||
*/
|
||||
|
||||
if (memmap) {
|
||||
struct page *memmap_page;
|
||||
memmap_page = virt_to_page(memmap);
|
||||
|
||||
nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
|
||||
>> PAGE_SHIFT;
|
||||
|
||||
free_map_bootmem(memmap_page, nr_pages);
|
||||
free_map_bootmem(memmap, nr_pages);
|
||||
}
|
||||
}
|
||||
|
||||
|
27
mm/vmscan.c
27
mm/vmscan.c
@ -2414,6 +2414,19 @@ static void age_active_anon(struct zone *zone, struct scan_control *sc)
|
||||
} while (memcg);
|
||||
}
|
||||
|
||||
static bool zone_balanced(struct zone *zone, int order,
|
||||
unsigned long balance_gap, int classzone_idx)
|
||||
{
|
||||
if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
|
||||
balance_gap, classzone_idx, 0))
|
||||
return false;
|
||||
|
||||
if (COMPACTION_BUILD && order && !compaction_suitable(zone, order))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* pgdat_balanced is used when checking if a node is balanced for high-order
|
||||
* allocations. Only zones that meet watermarks and are in a zone allowed
|
||||
@ -2492,8 +2505,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
|
||||
i, 0))
|
||||
if (!zone_balanced(zone, order, 0, i))
|
||||
all_zones_ok = false;
|
||||
else
|
||||
balanced += zone->present_pages;
|
||||
@ -2602,8 +2614,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
|
||||
break;
|
||||
}
|
||||
|
||||
if (!zone_watermark_ok_safe(zone, order,
|
||||
high_wmark_pages(zone), 0, 0)) {
|
||||
if (!zone_balanced(zone, order, 0, 0)) {
|
||||
end_zone = i;
|
||||
break;
|
||||
} else {
|
||||
@ -2679,9 +2690,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
|
||||
testorder = 0;
|
||||
|
||||
if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
|
||||
!zone_watermark_ok_safe(zone, testorder,
|
||||
high_wmark_pages(zone) + balance_gap,
|
||||
end_zone, 0)) {
|
||||
!zone_balanced(zone, testorder,
|
||||
balance_gap, end_zone)) {
|
||||
shrink_zone(zone, &sc);
|
||||
|
||||
reclaim_state->reclaimed_slab = 0;
|
||||
@ -2708,8 +2718,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!zone_watermark_ok_safe(zone, testorder,
|
||||
high_wmark_pages(zone), end_zone, 0)) {
|
||||
if (!zone_balanced(zone, testorder, 0, end_zone)) {
|
||||
all_zones_ok = 0;
|
||||
/*
|
||||
* We are still under min water mark. This
|
||||
|
Loading…
Reference in New Issue
Block a user