Commit fef7f2b2 authored by Marc Zyngier's avatar Marc Zyngier Committed by Will Deacon

arm64: alternative: Allow immediate branch as alternative instruction

Since all immediate branches are PC-relative on Aarch64, these
instructions cannot be used as an alternative with the simplistic
approach we currently have (the immediate has been computed from
the .altinstr_replacement section, and end-up being completely off
if we insert it directly).

This patch handles the b and bl instructions in a different way,
using the insn framework to recompute the immediate, and generate
the right displacement.
Reviewed-by: default avatarAndre Przywara <andre.przywara@arm.com>
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 0978fb25
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/insn.h>
#include <linux/stop_machine.h> #include <linux/stop_machine.h>
extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
...@@ -33,6 +34,48 @@ struct alt_region { ...@@ -33,6 +34,48 @@ struct alt_region {
struct alt_instr *end; struct alt_instr *end;
}; };
/*
* Decode the imm field of a b/bl instruction, and return the byte
* offset as a signed value (so it can be used when computing a new
* branch target).
*/
static s32 get_branch_offset(u32 insn)
{
s32 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
/* sign-extend the immediate before turning it into a byte offset */
return (imm << 6) >> 4;
}
static u32 get_alt_insn(u8 *insnptr, u8 *altinsnptr)
{
u32 insn;
aarch64_insn_read(altinsnptr, &insn);
/* Stop the world on instructions we don't support... */
BUG_ON(aarch64_insn_is_cbz(insn));
BUG_ON(aarch64_insn_is_cbnz(insn));
BUG_ON(aarch64_insn_is_bcond(insn));
/* ... and there is probably more. */
if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
enum aarch64_insn_branch_type type;
unsigned long target;
if (aarch64_insn_is_b(insn))
type = AARCH64_INSN_BRANCH_NOLINK;
else
type = AARCH64_INSN_BRANCH_LINK;
target = (unsigned long)altinsnptr + get_branch_offset(insn);
insn = aarch64_insn_gen_branch_imm((unsigned long)insnptr,
target, type);
}
return insn;
}
static int __apply_alternatives(void *alt_region) static int __apply_alternatives(void *alt_region)
{ {
struct alt_instr *alt; struct alt_instr *alt;
...@@ -40,16 +83,24 @@ static int __apply_alternatives(void *alt_region) ...@@ -40,16 +83,24 @@ static int __apply_alternatives(void *alt_region)
u8 *origptr, *replptr; u8 *origptr, *replptr;
for (alt = region->begin; alt < region->end; alt++) { for (alt = region->begin; alt < region->end; alt++) {
u32 insn;
int i;
if (!cpus_have_cap(alt->cpufeature)) if (!cpus_have_cap(alt->cpufeature))
continue; continue;
BUG_ON(alt->alt_len > alt->orig_len); BUG_ON(alt->alt_len != alt->orig_len);
pr_info_once("patching kernel code\n"); pr_info_once("patching kernel code\n");
origptr = (u8 *)&alt->orig_offset + alt->orig_offset; origptr = (u8 *)&alt->orig_offset + alt->orig_offset;
replptr = (u8 *)&alt->alt_offset + alt->alt_offset; replptr = (u8 *)&alt->alt_offset + alt->alt_offset;
memcpy(origptr, replptr, alt->alt_len);
for (i = 0; i < alt->alt_len; i += sizeof(insn)) {
insn = get_alt_insn(origptr + i, replptr + i);
aarch64_insn_write(origptr + i, insn);
}
flush_icache_range((uintptr_t)origptr, flush_icache_range((uintptr_t)origptr,
(uintptr_t)(origptr + alt->alt_len)); (uintptr_t)(origptr + alt->alt_len));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment