Commit 122e2fa0 authored by Will Deacon's avatar Will Deacon Committed by Catalin Marinas

arm64: module: ensure instruction is little-endian before manipulation

Relocations that require an instruction immediate to be re-encoded must
ensure that the instruction pattern is represented in a little-endian
format for the manipulation code to work correctly.

This patch converts the loaded instruction into native-endianess prior
to encoding and then converts back to little-endian byteorder before
updating memory.
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Tested-by: default avatarMatthew Leach <matthew.leach@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent dab7ea36
...@@ -111,6 +111,9 @@ static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm) ...@@ -111,6 +111,9 @@ static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm)
u32 immlo, immhi, lomask, himask, mask; u32 immlo, immhi, lomask, himask, mask;
int shift; int shift;
/* The instruction stream is always little endian. */
insn = le32_to_cpu(insn);
switch (type) { switch (type) {
case INSN_IMM_MOVNZ: case INSN_IMM_MOVNZ:
/* /*
...@@ -179,7 +182,7 @@ static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm) ...@@ -179,7 +182,7 @@ static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm)
insn &= ~(mask << shift); insn &= ~(mask << shift);
insn |= (imm & mask) << shift; insn |= (imm & mask) << shift;
return insn; return cpu_to_le32(insn);
} }
static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val, static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment