Commit 57f26649 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

powerpc: Use gas sections for arranging exception vectors

Use assembler sections of fixed size and location to arrange the 64-bit
Book3S exception vector code (64-bit Book3E also uses it in head_64.S
for 0x0..0x100).

This allows better flexibility in arranging exception code and hiding
unimportant details behind macros.

Gas sections can be a bit painful to use this way, mainly because the
assembler does not know where they will be finally linked. Taking
absolute addresses requires a bit of trickery for example, but it can
be hidden behind macros for the most part.

Generated code is mostly the same except locations, offsets, alignments.

The "+ 0x2" is only required for the trap number / kvm exit number,
which gets loaded as a constant into a register.

Previously, code also used + 0x2 for label names, but we changed to
using "H" to distinguish HV case for that. Remove the last vestiges
of that.

__after_prom_start is taking absolute address of a label in another
fixed section. Newer toolchains seemed to compile this okay, but older
ones do not. FIXED_SYMBOL_ABS_ADDR is more foolproof, it just takes an
additional line to define.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 573819e3
...@@ -91,7 +91,7 @@ ...@@ -91,7 +91,7 @@
*/ */
#define LOAD_HANDLER(reg, label) \ #define LOAD_HANDLER(reg, label) \
ld reg,PACAKBASE(r13); /* get high part of &label */ \ ld reg,PACAKBASE(r13); /* get high part of &label */ \
ori reg,reg,((label)-_stext)@l; /* virt addr of handler ... */ ori reg,reg,(FIXED_SYMBOL_ABS_ADDR(label))@l;
/* Exception register prefixes */ /* Exception register prefixes */
#define EXC_HV H #define EXC_HV H
......
This diff is collapsed.
...@@ -19,16 +19,68 @@ ...@@ -19,16 +19,68 @@
#include <asm/head-64.h> #include <asm/head-64.h>
/* /*
* There are a few constraints to be concerned with.
* - Real mode exceptions code/data must be located at their physical location.
* - Virtual mode exceptions must be mapped at their 0xc000... location.
* - Fixed location code must not call directly beyond the __end_interrupts
* area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence
* must be used.
* - LOAD_HANDLER targets must be within first 64K of physical 0 /
* virtual 0xc00...
* - Conditional branch targets must be within +/-32K of caller.
*
* "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and
* therefore don't have to run in physically located code or rfid to
* virtual mode kernel code. However on relocatable kernels they do have
* to branch to KERNELBASE offset because the rest of the kernel (outside
* the exception vectors) may be located elsewhere.
*
* Virtual exceptions correspond with physical, except their entry points
* are offset by 0xc000000000000000 and also tend to get an added 0x4000
* offset applied. Virtual exceptions are enabled with the Alternate
* Interrupt Location (AIL) bit set in the LPCR. However this does not
* guarantee they will be delivered virtually. Some conditions (see the ISA)
* cause exceptions to be delivered in real mode.
*
* It's impossible to receive interrupts below 0x300 via AIL.
*
* KVM: None of the virtual exceptions are from the guest. Anything that
* escalated to HV=1 from HV=0 is delivered via real mode handlers.
*
*
* We layout physical memory as follows: * We layout physical memory as follows:
* 0x0000 - 0x00ff : Secondary processor spin code * 0x0000 - 0x00ff : Secondary processor spin code
* 0x0100 - 0x17ff : pSeries Interrupt prologs * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors
* 0x1800 - 0x4000 : interrupt support common interrupt prologs * 0x1900 - 0x3fff : Real mode trampolines
* 0x4000 - 0x5fff : pSeries interrupts with IR=1,DR=1 * 0x4000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors
* 0x6000 - 0x6fff : more interrupt support including for IR=1,DR=1 * 0x5900 - 0x6fff : Relon mode trampolines
* 0x7000 - 0x7fff : FWNMI data area * 0x7000 - 0x7fff : FWNMI data area
* 0x8000 - 0x8fff : Initial (CPU0) segment table * 0x8000 - .... : Common interrupt handlers, remaining early
* 0x9000 - : Early init and support code * setup code, rest of kernel.
*/
OPEN_FIXED_SECTION(real_vectors, 0x0100, 0x1900)
OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x4000)
OPEN_FIXED_SECTION(virt_vectors, 0x4000, 0x5900)
OPEN_FIXED_SECTION(virt_trampolines, 0x5900, 0x7000)
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
/*
* Data area reserved for FWNMI option.
* This address (0x7000) is fixed by the RPA.
* pseries and powernv need to keep the whole page from
* 0x7000 to 0x8000 free for use by the firmware
*/ */
ZERO_FIXED_SECTION(fwnmi_page, 0x7000, 0x8000)
OPEN_TEXT_SECTION(0x8000)
#else
OPEN_TEXT_SECTION(0x7000)
#endif
USE_FIXED_SECTION(real_vectors)
#define LOAD_SYSCALL_HANDLER(reg) \
ld reg,PACAKBASE(r13); \
ori reg,reg,(ABS_ADDR(system_call_common))@l;
/* Syscall routine is used twice, in reloc-off and reloc-on paths */ /* Syscall routine is used twice, in reloc-off and reloc-on paths */
#define SYSCALL_PSERIES_1 \ #define SYSCALL_PSERIES_1 \
BEGIN_FTR_SECTION \ BEGIN_FTR_SECTION \
...@@ -42,7 +94,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \ ...@@ -42,7 +94,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
#define SYSCALL_PSERIES_2_RFID \ #define SYSCALL_PSERIES_2_RFID \
mfspr r12,SPRN_SRR1 ; \ mfspr r12,SPRN_SRR1 ; \
LOAD_HANDLER(r10, system_call_common) ; \ LOAD_SYSCALL_HANDLER(r10) ; \
mtspr SPRN_SRR0,r10 ; \ mtspr SPRN_SRR0,r10 ; \
ld r10,PACAKMSR(r13) ; \ ld r10,PACAKMSR(r13) ; \
mtspr SPRN_SRR1,r10 ; \ mtspr SPRN_SRR1,r10 ; \
...@@ -63,7 +115,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \ ...@@ -63,7 +115,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
* is volatile across system calls. * is volatile across system calls.
*/ */
#define SYSCALL_PSERIES_2_DIRECT \ #define SYSCALL_PSERIES_2_DIRECT \
LOAD_HANDLER(r12, system_call_common) ; \ LOAD_SYSCALL_HANDLER(r12) ; \
mtctr r12 ; \ mtctr r12 ; \
mfspr r12,SPRN_SRR1 ; \ mfspr r12,SPRN_SRR1 ; \
li r10,MSR_RI ; \ li r10,MSR_RI ; \
...@@ -86,7 +138,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \ ...@@ -86,7 +138,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
* Therefore any relative branches in this section must only * Therefore any relative branches in this section must only
* branch to labels in this section. * branch to labels in this section.
*/ */
. = 0x100
.globl __start_interrupts .globl __start_interrupts
__start_interrupts: __start_interrupts:
...@@ -200,9 +251,6 @@ EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x500) ...@@ -200,9 +251,6 @@ EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x500)
#endif #endif
EXC_REAL_END(instruction_access_slb, 0x480, 0x500) EXC_REAL_END(instruction_access_slb, 0x480, 0x500)
/* We open code these as we can't have a ". = x" (even with
* x = "." within a feature section
*/
EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x600) EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x600)
.globl hardware_interrupt_hv; .globl hardware_interrupt_hv;
hardware_interrupt_hv: hardware_interrupt_hv:
...@@ -306,7 +354,6 @@ __EXC_REAL_OOL_HV(h_facility_unavailable, 0xf80, 0xfa0) ...@@ -306,7 +354,6 @@ __EXC_REAL_OOL_HV(h_facility_unavailable, 0xf80, 0xfa0)
EXC_REAL_NONE(0xfa0, 0x1200) EXC_REAL_NONE(0xfa0, 0x1200)
#ifdef CONFIG_CBE_RAS #ifdef CONFIG_CBE_RAS
EXC_REAL_HV(cbe_system_error, 0x1200, 0x1300) EXC_REAL_HV(cbe_system_error, 0x1200, 0x1300)
...@@ -359,7 +406,6 @@ TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1800) ...@@ -359,7 +406,6 @@ TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1800)
#else /* CONFIG_CBE_RAS */ #else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1800, 0x1900) EXC_REAL_NONE(0x1800, 0x1900)
. = 0x1800
#endif #endif
...@@ -606,7 +652,13 @@ masked_##_H##interrupt: \ ...@@ -606,7 +652,13 @@ masked_##_H##interrupt: \
GET_SCRATCH0(r13); \ GET_SCRATCH0(r13); \
##_H##rfid; \ ##_H##rfid; \
b . b .
/*
* Real mode exceptions actually use this too, but alternate
* instruction code patches (which end up in the common .text area)
* cannot reach these if they are put there.
*/
USE_FIXED_SECTION(virt_trampolines)
MASKED_INTERRUPT() MASKED_INTERRUPT()
MASKED_INTERRUPT(H) MASKED_INTERRUPT(H)
...@@ -620,6 +672,7 @@ masked_##_H##interrupt: \ ...@@ -620,6 +672,7 @@ masked_##_H##interrupt: \
* in the generated frame has EE set to 1 or the exception * in the generated frame has EE set to 1 or the exception
* handler will not properly re-enable them. * handler will not properly re-enable them.
*/ */
USE_TEXT_SECTION()
_GLOBAL(__replay_interrupt) _GLOBAL(__replay_interrupt)
/* We are going to jump to the exception common code which /* We are going to jump to the exception common code which
* will retrieve various register values from the PACA which * will retrieve various register values from the PACA which
...@@ -862,7 +915,7 @@ EXC_VIRT(altivec_assist, 0x5700, 0x5800, 0x1700) ...@@ -862,7 +915,7 @@ EXC_VIRT(altivec_assist, 0x5700, 0x5800, 0x1700)
EXC_VIRT_NONE(0x5800, 0x5900) EXC_VIRT_NONE(0x5800, 0x5900)
TRAMP_REAL_BEGIN(ppc64_runlatch_on_trampoline) EXC_COMMON_BEGIN(ppc64_runlatch_on_trampoline)
b __ppc64_runlatch_on b __ppc64_runlatch_on
/* /*
...@@ -1070,6 +1123,7 @@ __TRAMP_REAL_VIRT_OOL(vsx_unavailable, 0xf40) ...@@ -1070,6 +1123,7 @@ __TRAMP_REAL_VIRT_OOL(vsx_unavailable, 0xf40)
__TRAMP_REAL_VIRT_OOL(facility_unavailable, 0xf60) __TRAMP_REAL_VIRT_OOL(facility_unavailable, 0xf60)
__TRAMP_REAL_VIRT_OOL_HV(h_facility_unavailable, 0xf80) __TRAMP_REAL_VIRT_OOL_HV(h_facility_unavailable, 0xf80)
USE_FIXED_SECTION(virt_trampolines)
/* /*
* The __end_interrupts marker must be past the out-of-line (OOL) * The __end_interrupts marker must be past the out-of-line (OOL)
* handlers, so that they are copied to real address 0x100 when running * handlers, so that they are copied to real address 0x100 when running
...@@ -1080,21 +1134,7 @@ __TRAMP_REAL_VIRT_OOL_HV(h_facility_unavailable, 0xf80) ...@@ -1080,21 +1134,7 @@ __TRAMP_REAL_VIRT_OOL_HV(h_facility_unavailable, 0xf80)
.align 7 .align 7
.globl __end_interrupts .globl __end_interrupts
__end_interrupts: __end_interrupts:
DEFINE_FIXED_SYMBOL(__end_interrupts)
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
/*
* Data area reserved for FWNMI option.
* This address (0x7000) is fixed by the RPA.
*/
.= 0x7000
.globl fwnmi_data_area
fwnmi_data_area:
/* pseries and powernv need to keep the whole page from
* 0x7000 to 0x8000 free for use by the firmware
*/
. = 0x8000
#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
EXC_COMMON(facility_unavailable_common, 0xf60, facility_unavailable_exception) EXC_COMMON(facility_unavailable_common, 0xf60, facility_unavailable_exception)
EXC_COMMON(h_facility_unavailable_common, 0xf80, facility_unavailable_exception) EXC_COMMON(h_facility_unavailable_common, 0xf80, facility_unavailable_exception)
...@@ -1106,7 +1146,7 @@ EXC_COMMON(cbe_thermal_common, 0x1800, cbe_thermal_exception) ...@@ -1106,7 +1146,7 @@ EXC_COMMON(cbe_thermal_common, 0x1800, cbe_thermal_exception)
#endif /* CONFIG_CBE_RAS */ #endif /* CONFIG_CBE_RAS */
EXC_COMMON_BEGIN(hmi_exception_early) TRAMP_REAL_BEGIN(hmi_exception_early)
EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, 0xe60) EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, 0xe60)
mr r10,r1 /* Save r1 */ mr r10,r1 /* Save r1 */
ld r1,PACAEMERGSP(r13) /* Use emergency stack */ ld r1,PACAEMERGSP(r13) /* Use emergency stack */
...@@ -1430,6 +1470,13 @@ TRAMP_REAL_BEGIN(power4_fixup_nap) ...@@ -1430,6 +1470,13 @@ TRAMP_REAL_BEGIN(power4_fixup_nap)
blr blr
#endif #endif
CLOSE_FIXED_SECTION(real_vectors);
CLOSE_FIXED_SECTION(real_trampolines);
CLOSE_FIXED_SECTION(virt_vectors);
CLOSE_FIXED_SECTION(virt_trampolines);
USE_TEXT_SECTION()
/* /*
* Hash table stuff * Hash table stuff
*/ */
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#include <asm/head-64.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/bug.h> #include <asm/bug.h>
#include <asm/cputable.h> #include <asm/cputable.h>
...@@ -65,9 +66,14 @@ ...@@ -65,9 +66,14 @@
* 2. The kernel is entered at __start * 2. The kernel is entered at __start
*/ */
.text OPEN_FIXED_SECTION(first_256B, 0x0, 0x100)
.globl _stext USE_FIXED_SECTION(first_256B)
_stext: /*
* Offsets are relative from the start of fixed section, and
* first_256B starts at 0. Offsets are a bit easier to use here
* than the fixed section entry macros.
*/
. = 0x0
_GLOBAL(__start) _GLOBAL(__start)
/* NOP this out unconditionally */ /* NOP this out unconditionally */
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
...@@ -104,6 +110,7 @@ __secondary_hold_acknowledge: ...@@ -104,6 +110,7 @@ __secondary_hold_acknowledge:
. = 0x5c . = 0x5c
.globl __run_at_load .globl __run_at_load
__run_at_load: __run_at_load:
DEFINE_FIXED_SYMBOL(__run_at_load)
.long 0x72756e30 /* "run0" -- relocate to 0 by default */ .long 0x72756e30 /* "run0" -- relocate to 0 by default */
#endif #endif
...@@ -133,7 +140,7 @@ __secondary_hold: ...@@ -133,7 +140,7 @@ __secondary_hold:
/* Tell the master cpu we're here */ /* Tell the master cpu we're here */
/* Relocation is off & we are located at an address less */ /* Relocation is off & we are located at an address less */
/* than 0x100, so only need to grab low order offset. */ /* than 0x100, so only need to grab low order offset. */
std r24,__secondary_hold_acknowledge-_stext(0) std r24,(ABS_ADDR(__secondary_hold_acknowledge))(0)
sync sync
li r26,0 li r26,0
...@@ -141,7 +148,7 @@ __secondary_hold: ...@@ -141,7 +148,7 @@ __secondary_hold:
tovirt(r26,r26) tovirt(r26,r26)
#endif #endif
/* All secondary cpus wait here until told to start. */ /* All secondary cpus wait here until told to start. */
100: ld r12,__secondary_hold_spinloop-_stext(r26) 100: ld r12,(ABS_ADDR(__secondary_hold_spinloop))(r26)
cmpdi 0,r12,0 cmpdi 0,r12,0
beq 100b beq 100b
...@@ -166,12 +173,13 @@ __secondary_hold: ...@@ -166,12 +173,13 @@ __secondary_hold:
#else #else
BUG_OPCODE BUG_OPCODE
#endif #endif
CLOSE_FIXED_SECTION(first_256B)
/* This value is used to mark exception frames on the stack. */ /* This value is used to mark exception frames on the stack. */
.section ".toc","aw" .section ".toc","aw"
exception_marker: exception_marker:
.tc ID_72656773_68657265[TC],0x7265677368657265 .tc ID_72656773_68657265[TC],0x7265677368657265
.text .previous
/* /*
* On server, we include the exception vectors code here as it * On server, we include the exception vectors code here as it
...@@ -180,8 +188,12 @@ exception_marker: ...@@ -180,8 +188,12 @@ exception_marker:
*/ */
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
#include "exceptions-64s.S" #include "exceptions-64s.S"
#else
OPEN_TEXT_SECTION(0x100)
#endif #endif
USE_TEXT_SECTION()
#ifdef CONFIG_PPC_BOOK3E #ifdef CONFIG_PPC_BOOK3E
/* /*
* The booting_thread_hwid holds the thread id we want to boot in cpu * The booting_thread_hwid holds the thread id we want to boot in cpu
...@@ -558,7 +570,7 @@ __after_prom_start: ...@@ -558,7 +570,7 @@ __after_prom_start:
#if defined(CONFIG_PPC_BOOK3E) #if defined(CONFIG_PPC_BOOK3E)
tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */ tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */
#endif #endif
lwz r7,__run_at_load-_stext(r26) lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26)
#if defined(CONFIG_PPC_BOOK3E) #if defined(CONFIG_PPC_BOOK3E)
tophys(r26,r26) tophys(r26,r26)
#endif #endif
...@@ -601,7 +613,7 @@ __after_prom_start: ...@@ -601,7 +613,7 @@ __after_prom_start:
#if defined(CONFIG_PPC_BOOK3E) #if defined(CONFIG_PPC_BOOK3E)
tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */ tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */
#endif #endif
lwz r7,__run_at_load-_stext(r26) lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26)
cmplwi cr0,r7,1 cmplwi cr0,r7,1
bne 3f bne 3f
...@@ -611,19 +623,21 @@ __after_prom_start: ...@@ -611,19 +623,21 @@ __after_prom_start:
sub r5,r5,r11 sub r5,r5,r11
#else #else
/* just copy interrupts */ /* just copy interrupts */
LOAD_REG_IMMEDIATE(r5, __end_interrupts - _stext) LOAD_REG_IMMEDIATE(r5, FIXED_SYMBOL_ABS_ADDR(__end_interrupts))
#endif #endif
b 5f b 5f
3: 3:
#endif #endif
lis r5,(copy_to_here - _stext)@ha /* # bytes of memory to copy */
addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */ lis r5,(ABS_ADDR(copy_to_here))@ha
addi r5,r5,(ABS_ADDR(copy_to_here))@l
bl copy_and_flush /* copy the first n bytes */ bl copy_and_flush /* copy the first n bytes */
/* this includes the code being */ /* this includes the code being */
/* executed here. */ /* executed here. */
addis r8,r3,(4f - _stext)@ha /* Jump to the copy of this code */ /* Jump to the copy of this code that we just made */
addi r12,r8,(4f - _stext)@l /* that we just made */ addis r8,r3,(ABS_ADDR(4f))@ha
addi r12,r8,(ABS_ADDR(4f))@l
mtctr r12 mtctr r12
bctr bctr
...@@ -635,8 +649,8 @@ p_end: .llong _end - copy_to_here ...@@ -635,8 +649,8 @@ p_end: .llong _end - copy_to_here
* Now copy the rest of the kernel up to _end, add * Now copy the rest of the kernel up to _end, add
* _end - copy_to_here to the copy limit and run again. * _end - copy_to_here to the copy limit and run again.
*/ */
addis r8,r26,(p_end - _stext)@ha addis r8,r26,(ABS_ADDR(p_end))@ha
ld r8,(p_end - _stext)@l(r8) ld r8,(ABS_ADDR(p_end))@l(r8)
add r5,r5,r8 add r5,r5,r8
5: bl copy_and_flush /* copy the rest */ 5: bl copy_and_flush /* copy the rest */
......
...@@ -44,11 +44,58 @@ SECTIONS ...@@ -44,11 +44,58 @@ SECTIONS
* Text, read only data and other permanent read-only sections * Text, read only data and other permanent read-only sections
*/ */
/* Text and gots */ _text = .;
_stext = .;
/*
* Head text.
* This needs to be in its own output section to avoid ld placing
* branch trampoline stubs randomly throughout the fixed sections,
* which it will do (even if the branch comes from another section)
* in order to optimize stub generation.
*/
.head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {
#ifdef CONFIG_PPC64
KEEP(*(.head.text.first_256B));
#ifdef CONFIG_PPC_BOOK3E
# define END_FIXED 0x100
#else
KEEP(*(.head.text.real_vectors));
*(.head.text.real_trampolines);
KEEP(*(.head.text.virt_vectors));
*(.head.text.virt_trampolines);
# if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
KEEP(*(.head.data.fwnmi_page));
# define END_FIXED 0x8000
# else
# define END_FIXED 0x7000
# endif
#endif
ASSERT((. == END_FIXED), "vmlinux.lds.S: fixed section overflow error");
#else /* !CONFIG_PPC64 */
HEAD_TEXT
#endif
} :kernel
/*
* If the build dies here, it's likely code in head_64.S is referencing
* labels it can't reach, and the linker inserting stubs without the
* assembler's knowledge. To debug, remove the above assert and
* rebuild. Look for branch stubs in the fixed section region.
*
* Linker stub generation could be allowed in "trampoline"
* sections if absolutely necessary, but this would require
* some rework of the fixed sections. Before resorting to this,
* consider references that have sufficient addressing range,
* (e.g., hand coded trampolines) so the linker does not have
* to add stubs.
*
* Linker stubs at the top of the main text section are currently not
* detected, and will result in a crash at boot due to offsets being
* wrong.
*/
.text : AT(ADDR(.text) - LOAD_OFFSET) { .text : AT(ADDR(.text) - LOAD_OFFSET) {
ALIGN_FUNCTION(); ALIGN_FUNCTION();
HEAD_TEXT
_text = .;
/* careful! __ftr_alt_* sections need to be close to .text */ /* careful! __ftr_alt_* sections need to be close to .text */
*(.text .fixup __ftr_alt_* .ref.text) *(.text .fixup __ftr_alt_* .ref.text)
SCHED_TEXT SCHED_TEXT
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment