forked from luck/tmp_suning_uos_patched
per-zone and reclaim enhancements for memory controller: calculate active/inactive imbalance per cgroup
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: David Rientjes <rientjes@google.com> Cc: Herbert Poetzl <herbert@13thfloor.at> Cc: Kirill Korotaev <dev@sw.ru> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Paul Menage <menage@google.com> Cc: Pavel Emelianov <xemul@openvz.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
58ae83db2a
commit
5932f3671b
@ -68,6 +68,8 @@ extern void mem_cgroup_page_migration(struct page *page, struct page *newpage);
|
|||||||
* For memory reclaim.
|
* For memory reclaim.
|
||||||
*/
|
*/
|
||||||
extern int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem);
|
extern int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem);
|
||||||
|
extern long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#else /* CONFIG_CGROUP_MEM_CONT */
|
#else /* CONFIG_CGROUP_MEM_CONT */
|
||||||
@ -145,6 +147,12 @@ static inline int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
|
|||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_CGROUP_MEM_CONT */
|
#endif /* CONFIG_CGROUP_MEM_CONT */
|
||||||
|
|
||||||
#endif /* _LINUX_MEMCONTROL_H */
|
#endif /* _LINUX_MEMCONTROL_H */
|
||||||
|
@ -436,6 +436,20 @@ int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
|
|||||||
rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
|
rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
|
||||||
return (int)((rss * 100L) / total);
|
return (int)((rss * 100L) / total);
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
* This function is called from vmscan.c. In page reclaiming loop. balance
|
||||||
|
* between active and inactive list is calculated. For memory controller
|
||||||
|
* page reclaiming, we should use using mem_cgroup's imbalance rather than
|
||||||
|
* zone's global lru imbalance.
|
||||||
|
*/
|
||||||
|
long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
|
||||||
|
{
|
||||||
|
unsigned long active, inactive;
|
||||||
|
/* active and inactive are the number of pages. 'long' is ok.*/
|
||||||
|
active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE);
|
||||||
|
inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE);
|
||||||
|
return (long) (active / (inactive + 1));
|
||||||
|
}
|
||||||
|
|
||||||
unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
|
unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
|
||||||
struct list_head *dst,
|
struct list_head *dst,
|
||||||
|
Loading…
Reference in New Issue
Block a user