slab, slub: remove size disparity on debug kernel

I have noticed on debug kernel with SLAB, the size of some non-root
slabs were larger than their corresponding root slabs.

e.g. for radix_tree_node:
  $cat /proc/slabinfo | grep radix
  name     <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab> ...
  radix_tree_node 15052    15075      4096         1             1 ...

  $cat /cgroup/memory/temp/memory.kmem.slabinfo | grep radix
  name     <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab> ...
  radix_tree_node 1581      158       4120         1             2 ...

However for SLUB in debug kernel, the sizes were same.  On further
inspection it is found that SLUB always use kmem_cache.object_size to
measure the kmem_cache.size while SLAB use the given kmem_cache.size.
In the debug kernel the slab's size can be larger than its object_size.
Thus in the creation of non-root slab, the SLAB uses the root's size as
base to calculate the non-root slab's size and thus non-root slab's size
can be larger than the root slab's size.  For SLUB, the non-root slab's
size is measured based on the root's object_size and thus the size will
remain same for root and non-root slab.

This patch makes slab's object_size the default base to measure the
slab's size.

Link: http://lkml.kernel.org/r/20180313165428.58699-1-shakeelb@google.com
Fixes: 794b1248be ("memcg, slab: separate memcg vs root cache creation paths")
Signed-off-by: Shakeel Butt <shakeelb@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Shakeel Butt 2018-04-05 16:21:50 -07:00 committed by Linus Torvalds
parent 302d55d51d
commit 613a5eb567

View File

@ -379,7 +379,7 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
} }
static struct kmem_cache *create_cache(const char *name, static struct kmem_cache *create_cache(const char *name,
unsigned int object_size, unsigned int size, unsigned int align, unsigned int object_size, unsigned int align,
slab_flags_t flags, unsigned int useroffset, slab_flags_t flags, unsigned int useroffset,
unsigned int usersize, void (*ctor)(void *), unsigned int usersize, void (*ctor)(void *),
struct mem_cgroup *memcg, struct kmem_cache *root_cache) struct mem_cgroup *memcg, struct kmem_cache *root_cache)
@ -396,8 +396,7 @@ static struct kmem_cache *create_cache(const char *name,
goto out; goto out;
s->name = name; s->name = name;
s->object_size = object_size; s->size = s->object_size = object_size;
s->size = size;
s->align = align; s->align = align;
s->ctor = ctor; s->ctor = ctor;
s->useroffset = useroffset; s->useroffset = useroffset;
@ -503,7 +502,7 @@ kmem_cache_create_usercopy(const char *name,
goto out_unlock; goto out_unlock;
} }
s = create_cache(cache_name, size, size, s = create_cache(cache_name, size,
calculate_alignment(flags, align, size), calculate_alignment(flags, align, size),
flags, useroffset, usersize, ctor, NULL, NULL); flags, useroffset, usersize, ctor, NULL, NULL);
if (IS_ERR(s)) { if (IS_ERR(s)) {
@ -650,7 +649,7 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
goto out_unlock; goto out_unlock;
s = create_cache(cache_name, root_cache->object_size, s = create_cache(cache_name, root_cache->object_size,
root_cache->size, root_cache->align, root_cache->align,
root_cache->flags & CACHE_CREATE_MASK, root_cache->flags & CACHE_CREATE_MASK,
root_cache->useroffset, root_cache->usersize, root_cache->useroffset, root_cache->usersize,
root_cache->ctor, memcg, root_cache); root_cache->ctor, memcg, root_cache);