forked from luck/tmp_suning_uos_patched
mm: memcontrol: do not acquire page_cgroup lock for kmem pages
Kmem page charging and uncharging is serialized by means of exclusive access to the page. Do not take the page_cgroup lock and don't set pc->flags atomically. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.cz> Acked-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Hugh Dickins <hughd@google.com> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
9a2385eef9
commit
a840cda63e
|
@ -3407,12 +3407,13 @@ void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
|
|||
memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* The page is freshly allocated and not visible to any
|
||||
* outside callers yet. Set up pc non-atomically.
|
||||
*/
|
||||
pc = lookup_page_cgroup(page);
|
||||
lock_page_cgroup(pc);
|
||||
pc->mem_cgroup = memcg;
|
||||
SetPageCgroupUsed(pc);
|
||||
unlock_page_cgroup(pc);
|
||||
pc->flags = PCG_USED;
|
||||
}
|
||||
|
||||
void __memcg_kmem_uncharge_pages(struct page *page, int order)
|
||||
|
@ -3422,19 +3423,11 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order)
|
|||
|
||||
|
||||
pc = lookup_page_cgroup(page);
|
||||
/*
|
||||
* Fast unlocked return. Theoretically might have changed, have to
|
||||
* check again after locking.
|
||||
*/
|
||||
if (!PageCgroupUsed(pc))
|
||||
return;
|
||||
|
||||
lock_page_cgroup(pc);
|
||||
if (PageCgroupUsed(pc)) {
|
||||
memcg = pc->mem_cgroup;
|
||||
ClearPageCgroupUsed(pc);
|
||||
}
|
||||
unlock_page_cgroup(pc);
|
||||
memcg = pc->mem_cgroup;
|
||||
pc->flags = 0;
|
||||
|
||||
/*
|
||||
* We trust that only if there is a memcg associated with the page, it
|
||||
|
|
Loading…
Reference in New Issue
Block a user