forked from luck/tmp_suning_uos_patched
5b11f1cee5
Both on 32 and 64 bits, we copy all the way up to the end of bss, except that on 64 bits there is a hack to avoid copying on top of the page tables. There is no point in copying bss at all, especially since we are just about to zero it all anyway. To clean up and unify the handling, we now do: - copy from startup_32 to _bss. - zero from _bss to _ebss. - the _ebss symbol is aligned to an 8-byte boundary. - the page tables are moved to a separate section. Use _bss as the copy endpoint since _edata may be misaligned. [ Impact: cleanup, trivial performance improvement ] Signed-off-by: H. Peter Anvin <hpa@zytor.com>
66 lines
1000 B
ArmAsm
66 lines
1000 B
ArmAsm
OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
|
|
|
|
#undef i386
|
|
|
|
#include <asm/page_types.h>
|
|
|
|
#ifdef CONFIG_X86_64
|
|
OUTPUT_ARCH(i386:x86-64)
|
|
ENTRY(startup_64)
|
|
#else
|
|
OUTPUT_ARCH(i386)
|
|
ENTRY(startup_32)
|
|
#endif
|
|
|
|
SECTIONS
|
|
{
|
|
/* Be careful parts of head_64.S assume startup_32 is at
|
|
* address 0.
|
|
*/
|
|
. = 0;
|
|
.text.head : {
|
|
_head = . ;
|
|
*(.text.head)
|
|
_ehead = . ;
|
|
}
|
|
.rodata.compressed : {
|
|
*(.rodata.compressed)
|
|
}
|
|
.text : {
|
|
_text = .; /* Text */
|
|
*(.text)
|
|
*(.text.*)
|
|
_etext = . ;
|
|
}
|
|
.rodata : {
|
|
_rodata = . ;
|
|
*(.rodata) /* read-only data */
|
|
*(.rodata.*)
|
|
_erodata = . ;
|
|
}
|
|
.data : {
|
|
_data = . ;
|
|
*(.data)
|
|
*(.data.*)
|
|
_edata = . ;
|
|
}
|
|
. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
|
|
.bss : {
|
|
_bss = . ;
|
|
*(.bss)
|
|
*(.bss.*)
|
|
*(COMMON)
|
|
. = ALIGN(8); /* For convenience during zeroing */
|
|
_ebss = .;
|
|
}
|
|
#ifdef CONFIG_X86_64
|
|
. = ALIGN(PAGE_SIZE);
|
|
.pgtable : {
|
|
_pgtable = . ;
|
|
*(.pgtable)
|
|
_epgtable = . ;
|
|
}
|
|
#endif
|
|
_end = .;
|
|
}
|