forked from luck/tmp_suning_uos_patched
5fb7dc37dc
per cpu data section contains two types of data. One set which is exclusively accessed by the local cpu and the other set which is per cpu, but also shared by remote cpus. In the current kernel, these two sets are not clearely separated out. This can potentially cause the same data cacheline shared between the two sets of data, which will result in unnecessary bouncing of the cacheline between cpus. One way to fix the problem is to cacheline align the remotely accessed per cpu data, both at the beginning and at the end. Because of the padding at both ends, this will likely cause some memory wastage and also the interface to achieve this is not clean. This patch: Moves the remotely accessed per cpu data (which is currently marked as ____cacheline_aligned_in_smp) into a different section, where all the data elements are cacheline aligned. And as such, this differentiates the local only data and remotely accessed data cleanly. Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Acked-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Christoph Lameter <clameter@sgi.com> Cc: <linux-arch@vger.kernel.org> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
192 lines
3.7 KiB
ArmAsm
192 lines
3.7 KiB
ArmAsm
/* ld script to make ARM Linux kernel
|
|
* taken from the i386 version by Russell King
|
|
* Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
|
|
*/
|
|
|
|
#include <asm-generic/vmlinux.lds.h>
|
|
#include <asm/thread_info.h>
|
|
#include <asm/memory.h>
|
|
|
|
OUTPUT_ARCH(arm)
|
|
ENTRY(stext)
|
|
|
|
#ifndef __ARMEB__
|
|
jiffies = jiffies_64;
|
|
#else
|
|
jiffies = jiffies_64 + 4;
|
|
#endif
|
|
|
|
SECTIONS
|
|
{
|
|
#ifdef CONFIG_XIP_KERNEL
|
|
. = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
|
|
#else
|
|
. = PAGE_OFFSET + TEXT_OFFSET;
|
|
#endif
|
|
.text.head : {
|
|
_stext = .;
|
|
_sinittext = .;
|
|
*(.text.head)
|
|
}
|
|
|
|
.init : { /* Init code and data */
|
|
*(.init.text)
|
|
_einittext = .;
|
|
__proc_info_begin = .;
|
|
*(.proc.info.init)
|
|
__proc_info_end = .;
|
|
__arch_info_begin = .;
|
|
*(.arch.info.init)
|
|
__arch_info_end = .;
|
|
__tagtable_begin = .;
|
|
*(.taglist.init)
|
|
__tagtable_end = .;
|
|
. = ALIGN(16);
|
|
__setup_start = .;
|
|
*(.init.setup)
|
|
__setup_end = .;
|
|
__early_begin = .;
|
|
*(.early_param.init)
|
|
__early_end = .;
|
|
__initcall_start = .;
|
|
INITCALLS
|
|
__initcall_end = .;
|
|
__con_initcall_start = .;
|
|
*(.con_initcall.init)
|
|
__con_initcall_end = .;
|
|
__security_initcall_start = .;
|
|
*(.security_initcall.init)
|
|
__security_initcall_end = .;
|
|
#ifdef CONFIG_BLK_DEV_INITRD
|
|
. = ALIGN(32);
|
|
__initramfs_start = .;
|
|
usr/built-in.o(.init.ramfs)
|
|
__initramfs_end = .;
|
|
#endif
|
|
. = ALIGN(4096);
|
|
__per_cpu_start = .;
|
|
*(.data.percpu)
|
|
*(.data.percpu.shared_aligned)
|
|
__per_cpu_end = .;
|
|
#ifndef CONFIG_XIP_KERNEL
|
|
__init_begin = _stext;
|
|
*(.init.data)
|
|
. = ALIGN(4096);
|
|
__init_end = .;
|
|
#endif
|
|
}
|
|
|
|
/DISCARD/ : { /* Exit code and data */
|
|
*(.exit.text)
|
|
*(.exit.data)
|
|
*(.exitcall.exit)
|
|
#ifndef CONFIG_MMU
|
|
*(.fixup)
|
|
*(__ex_table)
|
|
#endif
|
|
}
|
|
|
|
.text : { /* Real text segment */
|
|
_text = .; /* Text and read-only data */
|
|
__exception_text_start = .;
|
|
*(.exception.text)
|
|
__exception_text_end = .;
|
|
TEXT_TEXT
|
|
SCHED_TEXT
|
|
LOCK_TEXT
|
|
#ifdef CONFIG_MMU
|
|
*(.fixup)
|
|
#endif
|
|
*(.gnu.warning)
|
|
*(.rodata)
|
|
*(.rodata.*)
|
|
*(.glue_7)
|
|
*(.glue_7t)
|
|
*(.got) /* Global offset table */
|
|
}
|
|
|
|
RODATA
|
|
|
|
_etext = .; /* End of text and rodata section */
|
|
|
|
#ifdef CONFIG_XIP_KERNEL
|
|
__data_loc = ALIGN(4); /* location in binary */
|
|
. = PAGE_OFFSET + TEXT_OFFSET;
|
|
#else
|
|
. = ALIGN(THREAD_SIZE);
|
|
__data_loc = .;
|
|
#endif
|
|
|
|
.data : AT(__data_loc) {
|
|
__data_start = .; /* address in memory */
|
|
|
|
/*
|
|
* first, the init task union, aligned
|
|
* to an 8192 byte boundary.
|
|
*/
|
|
*(.data.init_task)
|
|
|
|
#ifdef CONFIG_XIP_KERNEL
|
|
. = ALIGN(4096);
|
|
__init_begin = .;
|
|
*(.init.data)
|
|
. = ALIGN(4096);
|
|
__init_end = .;
|
|
#endif
|
|
|
|
. = ALIGN(4096);
|
|
__nosave_begin = .;
|
|
*(.data.nosave)
|
|
. = ALIGN(4096);
|
|
__nosave_end = .;
|
|
|
|
/*
|
|
* then the cacheline aligned data
|
|
*/
|
|
. = ALIGN(32);
|
|
*(.data.cacheline_aligned)
|
|
|
|
/*
|
|
* The exception fixup table (might need resorting at runtime)
|
|
*/
|
|
. = ALIGN(32);
|
|
__start___ex_table = .;
|
|
#ifdef CONFIG_MMU
|
|
*(__ex_table)
|
|
#endif
|
|
__stop___ex_table = .;
|
|
|
|
/*
|
|
* and the usual data section
|
|
*/
|
|
DATA_DATA
|
|
CONSTRUCTORS
|
|
|
|
_edata = .;
|
|
}
|
|
_edata_loc = __data_loc + SIZEOF(.data);
|
|
|
|
.bss : {
|
|
__bss_start = .; /* BSS */
|
|
*(.bss)
|
|
*(COMMON)
|
|
_end = .;
|
|
}
|
|
/* Stabs debugging sections. */
|
|
.stab 0 : { *(.stab) }
|
|
.stabstr 0 : { *(.stabstr) }
|
|
.stab.excl 0 : { *(.stab.excl) }
|
|
.stab.exclstr 0 : { *(.stab.exclstr) }
|
|
.stab.index 0 : { *(.stab.index) }
|
|
.stab.indexstr 0 : { *(.stab.indexstr) }
|
|
.comment 0 : { *(.comment) }
|
|
}
|
|
|
|
/*
|
|
* These must never be empty
|
|
* If you have to comment these two assert statements out, your
|
|
* binutils is too old (for other reasons as well)
|
|
*/
|
|
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
|
|
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
|