Commit c62e4320 authored by Jan Beulich's avatar Jan Beulich Committed by Ingo Molnar

x86: Fix build with older binutils and consolidate linker script

binutils prior to 2.17 can't deal with the currently possible
situation of a new segment following the per-CPU segment, but
that new segment being empty - objcopy misplaces the .bss (and
perhaps also the .brk) sections outside of any segment.

However, the current ordering of sections really just appears
to be the effect of cumulative unrelated changes; re-ordering
things allows to easily guarantee that the segment following
the per-CPU one is non-empty, and at once eliminates the need
for the bogus data.init2 segment.

Once touching this code, also use the various data section
helper macros from include/asm-generic/vmlinux.lds.h.

-v2: fix !SMP builds.
Signed-off-by: default avatarJan Beulich <jbeulich@novell.com>
Cc: <sam@ravnborg.org>
LKML-Reference: <4A94085D02000078000119A5@vpn.id2.novell.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent a6a06f7b
...@@ -46,11 +46,10 @@ PHDRS { ...@@ -46,11 +46,10 @@ PHDRS {
data PT_LOAD FLAGS(7); /* RWE */ data PT_LOAD FLAGS(7); /* RWE */
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
user PT_LOAD FLAGS(7); /* RWE */ user PT_LOAD FLAGS(7); /* RWE */
data.init PT_LOAD FLAGS(7); /* RWE */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
percpu PT_LOAD FLAGS(7); /* RWE */ percpu PT_LOAD FLAGS(7); /* RWE */
#endif #endif
data.init2 PT_LOAD FLAGS(7); /* RWE */ init PT_LOAD FLAGS(7); /* RWE */
#endif #endif
note PT_NOTE FLAGS(0); /* ___ */ note PT_NOTE FLAGS(0); /* ___ */
} }
...@@ -103,65 +102,43 @@ SECTIONS ...@@ -103,65 +102,43 @@ SECTIONS
__stop___ex_table = .; __stop___ex_table = .;
} :text = 0x9090 } :text = 0x9090
RODATA RO_DATA(PAGE_SIZE)
/* Data */ /* Data */
. = ALIGN(PAGE_SIZE);
.data : AT(ADDR(.data) - LOAD_OFFSET) { .data : AT(ADDR(.data) - LOAD_OFFSET) {
/* Start of data section */ /* Start of data section */
_sdata = .; _sdata = .;
DATA_DATA
CONSTRUCTORS /* init_task */
} :data INIT_TASK_DATA(THREAD_SIZE)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* 32 bit has nosave before _edata */ /* 32 bit has nosave before _edata */
. = ALIGN(PAGE_SIZE); NOSAVE_DATA
.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
__nosave_begin = .;
*(.data.nosave)
. = ALIGN(PAGE_SIZE);
__nosave_end = .;
}
#endif #endif
. = ALIGN(PAGE_SIZE); PAGE_ALIGNED_DATA(PAGE_SIZE)
.data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
*(.data.page_aligned)
*(.data.idt) *(.data.idt)
}
#ifdef CONFIG_X86_32 CACHELINE_ALIGNED_DATA(CONFIG_X86_L1_CACHE_BYTES)
. = ALIGN(32);
#else
. = ALIGN(PAGE_SIZE);
. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
#endif
.data.cacheline_aligned :
AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
*(.data.cacheline_aligned)
}
/* rarely changed data like cpu maps */ DATA_DATA
#ifdef CONFIG_X86_32 CONSTRUCTORS
. = ALIGN(32);
#else /* rarely changed data like cpu maps */
. = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES); READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
#endif
.data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
*(.data.read_mostly)
/* End of data section */ /* End of data section */
_edata = .; _edata = .;
} } :data
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#define VSYSCALL_ADDR (-10*1024*1024) #define VSYSCALL_ADDR (-10*1024*1024)
#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + \ #define VSYSCALL_PHYS_ADDR ((LOADADDR(.data) + SIZEOF(.data) + \
SIZEOF(.data.read_mostly) + 4095) & ~(4095)) PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
#define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + \ #define VSYSCALL_VIRT_ADDR ((ADDR(.data) + SIZEOF(.data) + \
SIZEOF(.data.read_mostly) + 4095) & ~(4095)) PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
#define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR) #define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
#define VLOAD(x) (ADDR(x) - VLOAD_OFFSET) #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
...@@ -227,35 +204,29 @@ SECTIONS ...@@ -227,35 +204,29 @@ SECTIONS
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
/* init_task */ /* Init code and data - will be freed after init */
. = ALIGN(THREAD_SIZE); . = ALIGN(PAGE_SIZE);
.data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
*(.data.init_task) __init_begin = .; /* paired with __init_end */
} }
#ifdef CONFIG_X86_64
:data.init
#endif
#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
/* /*
* smp_locks might be freed after init * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
* start/end must be page aligned * output PHDR, so the next output section - .init.text - should
* start another segment - init.
*/ */
. = ALIGN(PAGE_SIZE); PERCPU_VADDR(0, :percpu)
.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { #endif
__smp_locks = .;
*(.smp_locks)
__smp_locks_end = .;
. = ALIGN(PAGE_SIZE);
}
/* Init code and data - will be freed after init */
. = ALIGN(PAGE_SIZE);
.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
__init_begin = .; /* paired with __init_end */
_sinittext = .; _sinittext = .;
INIT_TEXT INIT_TEXT
_einittext = .; _einittext = .;
} }
#ifdef CONFIG_X86_64
:init
#endif
.init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
INIT_DATA INIT_DATA
...@@ -326,17 +297,7 @@ SECTIONS ...@@ -326,17 +297,7 @@ SECTIONS
} }
#endif #endif
#if defined(CONFIG_X86_64) && defined(CONFIG_SMP) #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
/*
* percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
* output PHDR, so the next output section - __data_nosave - should
* start another section data.init2. Also, pda should be at the head of
* percpu area. Preallocate it and define the percpu offset symbol
* so that it can be accessed as a percpu variable.
*/
. = ALIGN(PAGE_SIZE);
PERCPU_VADDR(0, :percpu)
#else
PERCPU(PAGE_SIZE) PERCPU(PAGE_SIZE)
#endif #endif
...@@ -347,15 +308,22 @@ SECTIONS ...@@ -347,15 +308,22 @@ SECTIONS
__init_end = .; __init_end = .;
} }
/*
* smp_locks might be freed after init
* start/end must be page aligned
*/
. = ALIGN(PAGE_SIZE);
.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
__smp_locks = .;
*(.smp_locks)
__smp_locks_end = .;
. = ALIGN(PAGE_SIZE);
}
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
. = ALIGN(PAGE_SIZE); NOSAVE_DATA
__nosave_begin = .; }
*(.data.nosave)
. = ALIGN(PAGE_SIZE);
__nosave_end = .;
} :data.init2
/* use another section data.init2, see PERCPU_VADDR() above */
#endif #endif
/* BSS */ /* BSS */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment