forked from luck/tmp_suning_uos_patched
mm/slab: remove the checks for slab implementation bug
Some of "#if DEBUG" are for reporting slab implementation bug rather than user usecase bug. It's not really needed because slab is stable for a quite long time and it makes code too dirty. This patch remove it. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
6fb924304a
commit
260b61dd46
29
mm/slab.c
29
mm/slab.c
|
@ -2110,8 +2110,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
|
|||
if (!(flags & SLAB_DESTROY_BY_RCU))
|
||||
flags |= SLAB_POISON;
|
||||
#endif
|
||||
if (flags & SLAB_DESTROY_BY_RCU)
|
||||
BUG_ON(flags & SLAB_POISON);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -2368,9 +2366,6 @@ static int drain_freelist(struct kmem_cache *cache,
|
|||
}
|
||||
|
||||
page = list_entry(p, struct page, lru);
|
||||
#if DEBUG
|
||||
BUG_ON(page->active);
|
||||
#endif
|
||||
list_del(&page->lru);
|
||||
/*
|
||||
* Safe to drop the lock. The slab is no longer linked
|
||||
|
@ -2528,30 +2523,23 @@ static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
|
|||
}
|
||||
}
|
||||
|
||||
static void *slab_get_obj(struct kmem_cache *cachep, struct page *page,
|
||||
int nodeid)
|
||||
static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
|
||||
{
|
||||
void *objp;
|
||||
|
||||
objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
|
||||
page->active++;
|
||||
#if DEBUG
|
||||
WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
|
||||
#endif
|
||||
|
||||
return objp;
|
||||
}
|
||||
|
||||
static void slab_put_obj(struct kmem_cache *cachep, struct page *page,
|
||||
void *objp, int nodeid)
|
||||
static void slab_put_obj(struct kmem_cache *cachep,
|
||||
struct page *page, void *objp)
|
||||
{
|
||||
unsigned int objnr = obj_to_index(cachep, page, objp);
|
||||
#if DEBUG
|
||||
unsigned int i;
|
||||
|
||||
/* Verify that the slab belongs to the intended node */
|
||||
WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
|
||||
|
||||
/* Verify double free bug */
|
||||
for (i = page->active; i < cachep->num; i++) {
|
||||
if (get_free_obj(page, i) == objnr) {
|
||||
|
@ -2817,8 +2805,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
|
|||
STATS_INC_ACTIVE(cachep);
|
||||
STATS_SET_HIGH(cachep);
|
||||
|
||||
ac_put_obj(cachep, ac, slab_get_obj(cachep, page,
|
||||
node));
|
||||
ac_put_obj(cachep, ac, slab_get_obj(cachep, page));
|
||||
}
|
||||
|
||||
/* move slabp to correct slabp list: */
|
||||
|
@ -3101,7 +3088,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
|
|||
|
||||
BUG_ON(page->active == cachep->num);
|
||||
|
||||
obj = slab_get_obj(cachep, page, nodeid);
|
||||
obj = slab_get_obj(cachep, page);
|
||||
n->free_objects--;
|
||||
/* move slabp to correct slabp list: */
|
||||
list_del(&page->lru);
|
||||
|
@ -3252,7 +3239,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp,
|
|||
page = virt_to_head_page(objp);
|
||||
list_del(&page->lru);
|
||||
check_spinlock_acquired_node(cachep, node);
|
||||
slab_put_obj(cachep, page, objp, node);
|
||||
slab_put_obj(cachep, page, objp);
|
||||
STATS_DEC_ACTIVE(cachep);
|
||||
n->free_objects++;
|
||||
|
||||
|
@ -3282,9 +3269,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
|
|||
LIST_HEAD(list);
|
||||
|
||||
batchcount = ac->batchcount;
|
||||
#if DEBUG
|
||||
BUG_ON(!batchcount || batchcount > ac->avail);
|
||||
#endif
|
||||
|
||||
check_irq_off();
|
||||
n = get_node(cachep, node);
|
||||
spin_lock(&n->list_lock);
|
||||
|
|
Loading…
Reference in New Issue
Block a user