Commit c8ebf64e authored by Jessica Yu's avatar Jessica Yu Committed by Will Deacon

arm64/module: use plt section indices for relocations

Instead of saving a pointer to the .plt and .init.plt sections to apply
plt-based relocations, save and use their section indices instead.

The mod->arch.{core,init}.plt pointers were problematic for livepatch
because they pointed within temporary section headers (provided by the
module loader via info->sechdrs) that would be freed after module load.
Since livepatch modules may need to apply relocations post-module-load
(for example, to patch a module that is loaded later), using section
indices to offset into the section headers (instead of accessing them
through a saved pointer) allows livepatch modules on arm64 to pass in
their own copy of the section headers to apply_relocate_add() to apply
delayed relocations.
Reviewed-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: default avatarMiroslav Benes <mbenes@suse.cz>
Signed-off-by: default avatarJessica Yu <jeyu@kernel.org>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent c55191e9
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#ifdef CONFIG_ARM64_MODULE_PLTS #ifdef CONFIG_ARM64_MODULE_PLTS
struct mod_plt_sec { struct mod_plt_sec {
struct elf64_shdr *plt; int plt_shndx;
int plt_num_entries; int plt_num_entries;
int plt_max_entries; int plt_max_entries;
}; };
...@@ -36,10 +36,12 @@ struct mod_arch_specific { ...@@ -36,10 +36,12 @@ struct mod_arch_specific {
}; };
#endif #endif
u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
void *loc, const Elf64_Rela *rela,
Elf64_Sym *sym); Elf64_Sym *sym);
u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val); u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
void *loc, u64 val);
#ifdef CONFIG_RANDOMIZE_BASE #ifdef CONFIG_RANDOMIZE_BASE
extern u64 module_alloc_base; extern u64 module_alloc_base;
......
...@@ -16,12 +16,13 @@ static bool in_init(const struct module *mod, void *loc) ...@@ -16,12 +16,13 @@ static bool in_init(const struct module *mod, void *loc)
return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size; return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size;
} }
u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
void *loc, const Elf64_Rela *rela,
Elf64_Sym *sym) Elf64_Sym *sym)
{ {
struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core : struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
&mod->arch.init; &mod->arch.init;
struct plt_entry *plt = (struct plt_entry *)pltsec->plt->sh_addr; struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
int i = pltsec->plt_num_entries; int i = pltsec->plt_num_entries;
u64 val = sym->st_value + rela->r_addend; u64 val = sym->st_value + rela->r_addend;
...@@ -43,11 +44,12 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, ...@@ -43,11 +44,12 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
} }
#ifdef CONFIG_ARM64_ERRATUM_843419 #ifdef CONFIG_ARM64_ERRATUM_843419
u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val) u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
void *loc, u64 val)
{ {
struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core : struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
&mod->arch.init; &mod->arch.init;
struct plt_entry *plt = (struct plt_entry *)pltsec->plt->sh_addr; struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
int i = pltsec->plt_num_entries++; int i = pltsec->plt_num_entries++;
u32 mov0, mov1, mov2, br; u32 mov0, mov1, mov2, br;
int rd; int rd;
...@@ -202,7 +204,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, ...@@ -202,7 +204,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
unsigned long core_plts = 0; unsigned long core_plts = 0;
unsigned long init_plts = 0; unsigned long init_plts = 0;
Elf64_Sym *syms = NULL; Elf64_Sym *syms = NULL;
Elf_Shdr *tramp = NULL; Elf_Shdr *pltsec, *tramp = NULL;
int i; int i;
/* /*
...@@ -211,9 +213,9 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, ...@@ -211,9 +213,9 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
*/ */
for (i = 0; i < ehdr->e_shnum; i++) { for (i = 0; i < ehdr->e_shnum; i++) {
if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt")) if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
mod->arch.core.plt = sechdrs + i; mod->arch.core.plt_shndx = i;
else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt")) else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
mod->arch.init.plt = sechdrs + i; mod->arch.init.plt_shndx = i;
else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) && else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
!strcmp(secstrings + sechdrs[i].sh_name, !strcmp(secstrings + sechdrs[i].sh_name,
".text.ftrace_trampoline")) ".text.ftrace_trampoline"))
...@@ -222,7 +224,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, ...@@ -222,7 +224,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
syms = (Elf64_Sym *)sechdrs[i].sh_addr; syms = (Elf64_Sym *)sechdrs[i].sh_addr;
} }
if (!mod->arch.core.plt || !mod->arch.init.plt) { if (!mod->arch.core.plt_shndx || !mod->arch.init.plt_shndx) {
pr_err("%s: module PLT section(s) missing\n", mod->name); pr_err("%s: module PLT section(s) missing\n", mod->name);
return -ENOEXEC; return -ENOEXEC;
} }
...@@ -254,17 +256,19 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, ...@@ -254,17 +256,19 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
sechdrs[i].sh_info, dstsec); sechdrs[i].sh_info, dstsec);
} }
mod->arch.core.plt->sh_type = SHT_NOBITS; pltsec = sechdrs + mod->arch.core.plt_shndx;
mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; pltsec->sh_type = SHT_NOBITS;
mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES; pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
mod->arch.core.plt->sh_size = (core_plts + 1) * sizeof(struct plt_entry); pltsec->sh_addralign = L1_CACHE_BYTES;
pltsec->sh_size = (core_plts + 1) * sizeof(struct plt_entry);
mod->arch.core.plt_num_entries = 0; mod->arch.core.plt_num_entries = 0;
mod->arch.core.plt_max_entries = core_plts; mod->arch.core.plt_max_entries = core_plts;
mod->arch.init.plt->sh_type = SHT_NOBITS; pltsec = sechdrs + mod->arch.init.plt_shndx;
mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; pltsec->sh_type = SHT_NOBITS;
mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES; pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
mod->arch.init.plt->sh_size = (init_plts + 1) * sizeof(struct plt_entry); pltsec->sh_addralign = L1_CACHE_BYTES;
pltsec->sh_size = (init_plts + 1) * sizeof(struct plt_entry);
mod->arch.init.plt_num_entries = 0; mod->arch.init.plt_num_entries = 0;
mod->arch.init.plt_max_entries = init_plts; mod->arch.init.plt_max_entries = init_plts;
......
...@@ -198,7 +198,8 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val, ...@@ -198,7 +198,8 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
return 0; return 0;
} }
static int reloc_insn_adrp(struct module *mod, __le32 *place, u64 val) static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
__le32 *place, u64 val)
{ {
u32 insn; u32 insn;
...@@ -215,7 +216,7 @@ static int reloc_insn_adrp(struct module *mod, __le32 *place, u64 val) ...@@ -215,7 +216,7 @@ static int reloc_insn_adrp(struct module *mod, __le32 *place, u64 val)
insn &= ~BIT(31); insn &= ~BIT(31);
} else { } else {
/* out of range for ADR -> emit a veneer */ /* out of range for ADR -> emit a veneer */
val = module_emit_veneer_for_adrp(mod, place, val & ~0xfff); val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff);
if (!val) if (!val)
return -ENOEXEC; return -ENOEXEC;
insn = aarch64_insn_gen_branch_imm((u64)place, val, insn = aarch64_insn_gen_branch_imm((u64)place, val,
...@@ -368,7 +369,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, ...@@ -368,7 +369,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
case R_AARCH64_ADR_PREL_PG_HI21_NC: case R_AARCH64_ADR_PREL_PG_HI21_NC:
overflow_check = false; overflow_check = false;
case R_AARCH64_ADR_PREL_PG_HI21: case R_AARCH64_ADR_PREL_PG_HI21:
ovf = reloc_insn_adrp(me, loc, val); ovf = reloc_insn_adrp(me, sechdrs, loc, val);
if (ovf && ovf != -ERANGE) if (ovf && ovf != -ERANGE)
return ovf; return ovf;
break; break;
...@@ -413,7 +414,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, ...@@ -413,7 +414,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
ovf == -ERANGE) { ovf == -ERANGE) {
val = module_emit_plt_entry(me, loc, &rel[i], sym); val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
if (!val) if (!val)
return -ENOEXEC; return -ENOEXEC;
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment