forked from luck/tmp_suning_uos_patched
dma kmalloc handling fixes
1. We need kmalloc_percpu for all of the now extended kmalloc caches array not just for each shift value. 2. init_kmem_cache_nodes() must assume node 0 locality for statically allocated dma kmem_cache structures even after boot is complete. Reported-and-tested-by: Alex Chiang <achiang@hp.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
This commit is contained in:
parent
7738dd9e8f
commit
91efd773c7
|
@ -2062,7 +2062,7 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
|
|||
#endif
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[SLUB_PAGE_SHIFT]);
|
||||
static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[KMALLOC_CACHES]);
|
||||
|
||||
static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
|
||||
{
|
||||
|
@ -2148,7 +2148,8 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
|
|||
int node;
|
||||
int local_node;
|
||||
|
||||
if (slab_state >= UP)
|
||||
if (slab_state >= UP && (s < kmalloc_caches ||
|
||||
s > kmalloc_caches + KMALLOC_CACHES))
|
||||
local_node = page_to_nid(virt_to_page(s));
|
||||
else
|
||||
local_node = 0;
|
||||
|
|
Loading…
Reference in New Issue
Block a user