Commit d6905849 authored by Ard Biesheuvel's avatar Ard Biesheuvel

ARM: assembler: define a Kconfig symbol for group relocation support

Nathan reports the group relocations go out of range in pathological
cases such as allyesconfig kernels, which have little chance of actually
booting but are still used in validation.

So add a Kconfig symbol for this feature, and make it depend on
!COMPILE_TEST.
Signed-off-by: default avatarArd Biesheuvel <ardb@kernel.org>
parent 8b806b82
......@@ -128,7 +128,7 @@ config ARM
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
select THREAD_INFO_IN_TASK
select HAVE_ARCH_VMAP_STACK if MMU && (!LD_IS_LLD || LLD_VERSION >= 140000)
select HAVE_ARCH_VMAP_STACK if MMU && ARM_HAS_GROUP_RELOCS
select TRACE_IRQFLAGS_SUPPORT if !CPU_V7M
# Above selects are sorted alphabetically; please add new ones
# according to that. Thanks.
......@@ -140,6 +140,17 @@ config ARM
Europe. There is an ARM Linux project with a web page at
<http://www.arm.linux.org.uk/>.
config ARM_HAS_GROUP_RELOCS
def_bool y
depends on !LD_IS_LLD || LLD_VERSION >= 140000
depends on !COMPILE_TEST
help
Whether or not to use R_ARM_ALU_PC_Gn or R_ARM_LDR_PC_Gn group
relocations, which have been around for a long time, but were not
supported in LLD until version 14. The combined range is -/+ 256 MiB,
which is usually sufficient, but not for allyesconfig, so we disable
this feature when doing compile testing.
config ARM_HAS_SG_CHAIN
bool
......
......@@ -656,8 +656,8 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
.macro __ldst_va, op, reg, tmp, sym, cond
#if __LINUX_ARM_ARCH__ >= 7 || \
(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS)) || \
(defined(CONFIG_LD_IS_LLD) && CONFIG_LLD_VERSION < 140000)
!defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
mov_l \tmp, \sym, \cond
\op\cond \reg, [\tmp]
#else
......@@ -716,8 +716,8 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
*/
.macro ldr_this_cpu, rd:req, sym:req, t1:req, t2:req
#if __LINUX_ARM_ARCH__ >= 7 || \
(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS)) || \
(defined(CONFIG_LD_IS_LLD) && CONFIG_LLD_VERSION < 140000)
!defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
this_cpu_offset \t1
mov_l \t2, \sym
ldr \rd, [\t1, \t2]
......
......@@ -37,8 +37,8 @@ static inline __attribute_const__ struct task_struct *get_current(void)
#ifdef CONFIG_CPU_V6
"1: \n\t"
" .subsection 1 \n\t"
#if !(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS)) && \
!(defined(CONFIG_LD_IS_LLD) && CONFIG_LLD_VERSION < 140000)
#if defined(CONFIG_ARM_HAS_GROUP_RELOCS) && \
!(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
"2: " LOAD_SYM_ARMV6(%0, __current) " \n\t"
" b 1b \n\t"
#else
......@@ -55,8 +55,8 @@ static inline __attribute_const__ struct task_struct *get_current(void)
#endif
: "=r"(cur));
#elif __LINUX_ARM_ARCH__>= 7 || \
(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS)) || \
(defined(CONFIG_LD_IS_LLD) && CONFIG_LLD_VERSION < 140000)
!defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
cur = __current;
#else
asm(LOAD_SYM_ARMV6(%0, __current) : "=r"(cur));
......
......@@ -38,8 +38,8 @@ static inline unsigned long __my_cpu_offset(void)
#ifdef CONFIG_CPU_V6
"1: \n\t"
" .subsection 1 \n\t"
#if !(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS)) && \
!(defined(CONFIG_LD_IS_LLD) && CONFIG_LLD_VERSION < 140000)
#if defined(CONFIG_ARM_HAS_GROUP_RELOCS) && \
!(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
"2: " LOAD_SYM_ARMV6(%0, __per_cpu_offset) " \n\t"
" b 1b \n\t"
#else
......
......@@ -68,6 +68,7 @@ bool module_exit_section(const char *name)
strstarts(name, ".ARM.exidx.exit");
}
#ifdef CONFIG_ARM_HAS_GROUP_RELOCS
/*
* This implements the partitioning algorithm for group relocations as
* documented in the ARM AArch32 ELF psABI (IHI 0044).
......@@ -103,6 +104,7 @@ static u32 get_group_rem(u32 group, u32 *offset)
} while (group--);
return shift;
}
#endif
int
apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
......@@ -118,7 +120,9 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
unsigned long loc;
Elf32_Sym *sym;
const char *symname;
#ifdef CONFIG_ARM_HAS_GROUP_RELOCS
u32 shift, group = 1;
#endif
s32 offset;
u32 tmp;
#ifdef CONFIG_THUMB2_KERNEL
......@@ -249,6 +253,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
*(u32 *)loc = __opcode_to_mem_arm(tmp);
break;
#ifdef CONFIG_ARM_HAS_GROUP_RELOCS
case R_ARM_ALU_PC_G0_NC:
group = 0;
fallthrough;
......@@ -296,7 +301,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
}
*(u32 *)loc = __opcode_to_mem_arm((tmp & ~0xfff) | offset);
break;
#endif
#ifdef CONFIG_THUMB2_KERNEL
case R_ARM_THM_CALL:
case R_ARM_THM_JUMP24:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment