forked from luck/tmp_suning_uos_patched
4b356be019
If ARCH_KMALLOC_MINALIGN is set to a value greater than 8 (SLUBs smallest kmalloc cache) then SLUB may generate duplicate slabs in sysfs (yes again) because the object size is padded to reach ARCH_KMALLOC_MINALIGN. Thus the size of the small slabs is all the same. No arch sets ARCH_KMALLOC_MINALIGN larger than 8 though except mips which for some reason wants a 128 byte alignment. This patch increases the size of the smallest cache if ARCH_KMALLOC_MINALIGN is greater than 8. In that case more and more of the smallest caches are disabled. If we do that then the count of the active general caches that is displayed on boot is not correct anymore since we may skip elements of the kmalloc array. So count them separately. This approach was tested by Havard yesterday. Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Haavard Skinnemoen <hskinnemoen@atmel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
218 lines
5.3 KiB
C
218 lines
5.3 KiB
C
#ifndef _LINUX_SLUB_DEF_H
|
|
#define _LINUX_SLUB_DEF_H
|
|
|
|
/*
|
|
* SLUB : A Slab allocator without object queues.
|
|
*
|
|
* (C) 2007 SGI, Christoph Lameter <clameter@sgi.com>
|
|
*/
|
|
#include <linux/types.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/workqueue.h>
|
|
#include <linux/kobject.h>
|
|
|
|
struct kmem_cache_node {
|
|
spinlock_t list_lock; /* Protect partial list and nr_partial */
|
|
unsigned long nr_partial;
|
|
atomic_long_t nr_slabs;
|
|
struct list_head partial;
|
|
struct list_head full;
|
|
};
|
|
|
|
/*
|
|
* Slab cache management.
|
|
*/
|
|
struct kmem_cache {
|
|
/* Used for retriving partial slabs etc */
|
|
unsigned long flags;
|
|
int size; /* The size of an object including meta data */
|
|
int objsize; /* The size of an object without meta data */
|
|
int offset; /* Free pointer offset. */
|
|
int order;
|
|
|
|
/*
|
|
* Avoid an extra cache line for UP, SMP and for the node local to
|
|
* struct kmem_cache.
|
|
*/
|
|
struct kmem_cache_node local_node;
|
|
|
|
/* Allocation and freeing of slabs */
|
|
int objects; /* Number of objects in slab */
|
|
int refcount; /* Refcount for slab cache destroy */
|
|
void (*ctor)(void *, struct kmem_cache *, unsigned long);
|
|
int inuse; /* Offset to metadata */
|
|
int align; /* Alignment */
|
|
const char *name; /* Name (only for display!) */
|
|
struct list_head list; /* List of slab caches */
|
|
struct kobject kobj; /* For sysfs */
|
|
|
|
#ifdef CONFIG_NUMA
|
|
int defrag_ratio;
|
|
struct kmem_cache_node *node[MAX_NUMNODES];
|
|
#endif
|
|
struct page *cpu_slab[NR_CPUS];
|
|
};
|
|
|
|
/*
|
|
* Kmalloc subsystem.
|
|
*/
|
|
#if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8
|
|
#define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
|
|
#else
|
|
#define KMALLOC_MIN_SIZE 8
|
|
#endif
|
|
|
|
#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
|
|
|
|
/*
|
|
* We keep the general caches in an array of slab caches that are used for
|
|
* 2^x bytes of allocations.
|
|
*/
|
|
extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
|
|
|
|
/*
|
|
* Sorry that the following has to be that ugly but some versions of GCC
|
|
* have trouble with constant propagation and loops.
|
|
*/
|
|
static inline int kmalloc_index(size_t size)
|
|
{
|
|
if (!size)
|
|
return 0;
|
|
|
|
if (size > KMALLOC_MAX_SIZE)
|
|
return -1;
|
|
|
|
if (size <= KMALLOC_MIN_SIZE)
|
|
return KMALLOC_SHIFT_LOW;
|
|
|
|
if (size > 64 && size <= 96)
|
|
return 1;
|
|
if (size > 128 && size <= 192)
|
|
return 2;
|
|
if (size <= 8) return 3;
|
|
if (size <= 16) return 4;
|
|
if (size <= 32) return 5;
|
|
if (size <= 64) return 6;
|
|
if (size <= 128) return 7;
|
|
if (size <= 256) return 8;
|
|
if (size <= 512) return 9;
|
|
if (size <= 1024) return 10;
|
|
if (size <= 2 * 1024) return 11;
|
|
if (size <= 4 * 1024) return 12;
|
|
if (size <= 8 * 1024) return 13;
|
|
if (size <= 16 * 1024) return 14;
|
|
if (size <= 32 * 1024) return 15;
|
|
if (size <= 64 * 1024) return 16;
|
|
if (size <= 128 * 1024) return 17;
|
|
if (size <= 256 * 1024) return 18;
|
|
if (size <= 512 * 1024) return 19;
|
|
if (size <= 1024 * 1024) return 20;
|
|
if (size <= 2 * 1024 * 1024) return 21;
|
|
if (size <= 4 * 1024 * 1024) return 22;
|
|
if (size <= 8 * 1024 * 1024) return 23;
|
|
if (size <= 16 * 1024 * 1024) return 24;
|
|
if (size <= 32 * 1024 * 1024) return 25;
|
|
return -1;
|
|
|
|
/*
|
|
* What we really wanted to do and cannot do because of compiler issues is:
|
|
* int i;
|
|
* for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
|
|
* if (size <= (1 << i))
|
|
* return i;
|
|
*/
|
|
}
|
|
|
|
/*
|
|
* Find the slab cache for a given combination of allocation flags and size.
|
|
*
|
|
* This ought to end up with a global pointer to the right cache
|
|
* in kmalloc_caches.
|
|
*/
|
|
static inline struct kmem_cache *kmalloc_slab(size_t size)
|
|
{
|
|
int index = kmalloc_index(size);
|
|
|
|
if (index == 0)
|
|
return NULL;
|
|
|
|
/*
|
|
* This function only gets expanded if __builtin_constant_p(size), so
|
|
* testing it here shouldn't be needed. But some versions of gcc need
|
|
* help.
|
|
*/
|
|
if (__builtin_constant_p(size) && index < 0) {
|
|
/*
|
|
* Generate a link failure. Would be great if we could
|
|
* do something to stop the compile here.
|
|
*/
|
|
extern void __kmalloc_size_too_large(void);
|
|
__kmalloc_size_too_large();
|
|
}
|
|
return &kmalloc_caches[index];
|
|
}
|
|
|
|
#ifdef CONFIG_ZONE_DMA
|
|
#define SLUB_DMA __GFP_DMA
|
|
#else
|
|
/* Disable DMA functionality */
|
|
#define SLUB_DMA 0
|
|
#endif
|
|
|
|
|
|
/*
|
|
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
|
|
*
|
|
* Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
|
|
*
|
|
* ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
|
|
* Both make kfree a no-op.
|
|
*/
|
|
#define ZERO_SIZE_PTR ((void *)16)
|
|
|
|
|
|
static inline void *kmalloc(size_t size, gfp_t flags)
|
|
{
|
|
if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
|
|
struct kmem_cache *s = kmalloc_slab(size);
|
|
|
|
if (!s)
|
|
return ZERO_SIZE_PTR;
|
|
|
|
return kmem_cache_alloc(s, flags);
|
|
} else
|
|
return __kmalloc(size, flags);
|
|
}
|
|
|
|
static inline void *kzalloc(size_t size, gfp_t flags)
|
|
{
|
|
if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
|
|
struct kmem_cache *s = kmalloc_slab(size);
|
|
|
|
if (!s)
|
|
return ZERO_SIZE_PTR;
|
|
|
|
return kmem_cache_zalloc(s, flags);
|
|
} else
|
|
return __kzalloc(size, flags);
|
|
}
|
|
|
|
#ifdef CONFIG_NUMA
|
|
extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
|
|
|
|
static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
|
|
{
|
|
if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
|
|
struct kmem_cache *s = kmalloc_slab(size);
|
|
|
|
if (!s)
|
|
return ZERO_SIZE_PTR;
|
|
|
|
return kmem_cache_alloc_node(s, flags, node);
|
|
} else
|
|
return __kmalloc_node(size, flags, node);
|
|
}
|
|
#endif
|
|
|
|
#endif /* _LINUX_SLUB_DEF_H */
|