forked from luck/tmp_suning_uos_patched
22fc6eccbf
____cacheline_maxaligned_in_smp is currently used to align critical structures and avoid false sharing. It uses per-arch L1_CACHE_SHIFT_MAX and people find L1_CACHE_SHIFT_MAX useless. However, we have been using ____cacheline_maxaligned_in_smp to align structures on the internode cacheline size. As per Andi's suggestion, following patch kills ____cacheline_maxaligned_in_smp and introduces INTERNODE_CACHE_SHIFT, which defaults to L1_CACHE_SHIFT for all arches. Arches needing L3/Internode cacheline alignment can define INTERNODE_CACHE_SHIFT in the arch asm/cache.h. Patch replaces ____cacheline_maxaligned_in_smp with ____cacheline_internodealigned_in_smp With this patch, L1_CACHE_SHIFT_MAX can be killed Signed-off-by: Ravikiran Thirumalai <kiran@scalex86.org> Signed-off-by: Shai Fultheim <shai@scalex86.org> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
67 lines
1.6 KiB
C
67 lines
1.6 KiB
C
#ifndef __LINUX_CACHE_H
|
|
#define __LINUX_CACHE_H
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/config.h>
|
|
#include <asm/cache.h>
|
|
|
|
#ifndef L1_CACHE_ALIGN
|
|
#define L1_CACHE_ALIGN(x) ALIGN(x, L1_CACHE_BYTES)
|
|
#endif
|
|
|
|
#ifndef SMP_CACHE_BYTES
|
|
#define SMP_CACHE_BYTES L1_CACHE_BYTES
|
|
#endif
|
|
|
|
#if defined(CONFIG_X86) || defined(CONFIG_SPARC64) || defined(CONFIG_IA64)
|
|
#define __read_mostly __attribute__((__section__(".data.read_mostly")))
|
|
#else
|
|
#define __read_mostly
|
|
#endif
|
|
|
|
#ifndef ____cacheline_aligned
|
|
#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
|
|
#endif
|
|
|
|
#ifndef ____cacheline_aligned_in_smp
|
|
#ifdef CONFIG_SMP
|
|
#define ____cacheline_aligned_in_smp ____cacheline_aligned
|
|
#else
|
|
#define ____cacheline_aligned_in_smp
|
|
#endif /* CONFIG_SMP */
|
|
#endif
|
|
|
|
#ifndef __cacheline_aligned
|
|
#define __cacheline_aligned \
|
|
__attribute__((__aligned__(SMP_CACHE_BYTES), \
|
|
__section__(".data.cacheline_aligned")))
|
|
#endif /* __cacheline_aligned */
|
|
|
|
#ifndef __cacheline_aligned_in_smp
|
|
#ifdef CONFIG_SMP
|
|
#define __cacheline_aligned_in_smp __cacheline_aligned
|
|
#else
|
|
#define __cacheline_aligned_in_smp
|
|
#endif /* CONFIG_SMP */
|
|
#endif
|
|
|
|
/*
|
|
* The maximum alignment needed for some critical structures
|
|
* These could be inter-node cacheline sizes/L3 cacheline
|
|
* size etc. Define this in asm/cache.h for your arch
|
|
*/
|
|
#ifndef INTERNODE_CACHE_SHIFT
|
|
#define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT
|
|
#endif
|
|
|
|
#if !defined(____cacheline_internodealigned_in_smp)
|
|
#if defined(CONFIG_SMP)
|
|
#define ____cacheline_internodealigned_in_smp \
|
|
__attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT))))
|
|
#else
|
|
#define ____cacheline_internodealigned_in_smp
|
|
#endif
|
|
#endif
|
|
|
|
#endif /* __LINUX_CACHE_H */
|