Commit b07de94d authored by Palmer Dabbelt's avatar Palmer Dabbelt

Merge patch series "Allow calls in alternatives"

Heiko Stuebner <heiko@sntech.de> says:

From: Heiko Stuebner <heiko.stuebner@vrull.eu>

This series is split out of my work on optimizing string functions
and provides the basics to:

- actually allowing calls in alternatives
  Function calls use auipc + jalr to reach those 32bit relative
  addresses but when they're compiled the offset will be wrong
  as alternatives live in a different section. So when the patch
  gets applied the address will point to the wrong location.

  So similar to arm64 the target addresses need to be updated.

  This is probably also helpful for other things needing more
  complex code in alternatives.

For v2 I got into some sort of cleanup spree for the general instruction
parsing that already existed. A number of places do their own
instruction parsing and I tried consolidating some of them.

Noteable, the kvm parts still do, but I had to stop somewhere :-)

* b4-shazam-merge:
  RISC-V: fix auipc-jalr addresses in patched alternatives
  RISC-V: add helpers for handling immediates in U-type and I-type pairs
  RISC-V: add rd reg parsing to insn.h header
  RISC-V: add U-type imm parsing to insn.h header
  RISC-V: kprobes: use central defined funct3 constants
  RISC-V: rename parse_asm.h to insn.h
  RISC-V: Move riscv_insn_is_* macros into a common header
  RISC-V: add auipc elements to parse_asm header
  RISC-V: add ebreak instructions to definitions
  RISC-V: detach funct-values from their offset
  RISC-V: add prefix to all constants/macros in parse_asm.h
  RISC-V: fix funct4 definition for c.jalr in parse_asm.h

Link: https://lore.kernel.org/r/20221223221332.4127602-1-heiko@sntech.deSigned-off-by: default avatarPalmer Dabbelt <palmer@rivosinc.com>
parents 4e1ce304 27c653c0
...@@ -27,6 +27,9 @@ void __init apply_boot_alternatives(void); ...@@ -27,6 +27,9 @@ void __init apply_boot_alternatives(void);
void __init apply_early_boot_alternatives(void); void __init apply_early_boot_alternatives(void);
void apply_module_alternatives(void *start, size_t length); void apply_module_alternatives(void *start, size_t length);
void riscv_alternative_fix_offsets(void *alt_ptr, unsigned int len,
int patch_offset);
struct alt_entry { struct alt_entry {
void *old_ptr; /* address of original instruciton or data */ void *old_ptr; /* address of original instruciton or data */
void *alt_ptr; /* address of replacement instruction or data */ void *alt_ptr; /* address of replacement instruction or data */
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020 SiFive
*/
#include <linux/bits.h>
/* The bit field of immediate value in I-type instruction */
#define I_IMM_SIGN_OPOFF 31
#define I_IMM_11_0_OPOFF 20
#define I_IMM_SIGN_OFF 12
#define I_IMM_11_0_OFF 0
#define I_IMM_11_0_MASK GENMASK(11, 0)
/* The bit field of immediate value in J-type instruction */
#define J_IMM_SIGN_OPOFF 31
#define J_IMM_10_1_OPOFF 21
#define J_IMM_11_OPOFF 20
#define J_IMM_19_12_OPOFF 12
#define J_IMM_SIGN_OFF 20
#define J_IMM_10_1_OFF 1
#define J_IMM_11_OFF 11
#define J_IMM_19_12_OFF 12
#define J_IMM_10_1_MASK GENMASK(9, 0)
#define J_IMM_11_MASK GENMASK(0, 0)
#define J_IMM_19_12_MASK GENMASK(7, 0)
/* The bit field of immediate value in B-type instruction */
#define B_IMM_SIGN_OPOFF 31
#define B_IMM_10_5_OPOFF 25
#define B_IMM_4_1_OPOFF 8
#define B_IMM_11_OPOFF 7
#define B_IMM_SIGN_OFF 12
#define B_IMM_10_5_OFF 5
#define B_IMM_4_1_OFF 1
#define B_IMM_11_OFF 11
#define B_IMM_10_5_MASK GENMASK(5, 0)
#define B_IMM_4_1_MASK GENMASK(3, 0)
#define B_IMM_11_MASK GENMASK(0, 0)
/* The register offset in RVG instruction */
#define RVG_RS1_OPOFF 15
#define RVG_RS2_OPOFF 20
#define RVG_RD_OPOFF 7
/* The bit field of immediate value in RVC J instruction */
#define RVC_J_IMM_SIGN_OPOFF 12
#define RVC_J_IMM_4_OPOFF 11
#define RVC_J_IMM_9_8_OPOFF 9
#define RVC_J_IMM_10_OPOFF 8
#define RVC_J_IMM_6_OPOFF 7
#define RVC_J_IMM_7_OPOFF 6
#define RVC_J_IMM_3_1_OPOFF 3
#define RVC_J_IMM_5_OPOFF 2
#define RVC_J_IMM_SIGN_OFF 11
#define RVC_J_IMM_4_OFF 4
#define RVC_J_IMM_9_8_OFF 8
#define RVC_J_IMM_10_OFF 10
#define RVC_J_IMM_6_OFF 6
#define RVC_J_IMM_7_OFF 7
#define RVC_J_IMM_3_1_OFF 1
#define RVC_J_IMM_5_OFF 5
#define RVC_J_IMM_4_MASK GENMASK(0, 0)
#define RVC_J_IMM_9_8_MASK GENMASK(1, 0)
#define RVC_J_IMM_10_MASK GENMASK(0, 0)
#define RVC_J_IMM_6_MASK GENMASK(0, 0)
#define RVC_J_IMM_7_MASK GENMASK(0, 0)
#define RVC_J_IMM_3_1_MASK GENMASK(2, 0)
#define RVC_J_IMM_5_MASK GENMASK(0, 0)
/* The bit field of immediate value in RVC B instruction */
#define RVC_B_IMM_SIGN_OPOFF 12
#define RVC_B_IMM_4_3_OPOFF 10
#define RVC_B_IMM_7_6_OPOFF 5
#define RVC_B_IMM_2_1_OPOFF 3
#define RVC_B_IMM_5_OPOFF 2
#define RVC_B_IMM_SIGN_OFF 8
#define RVC_B_IMM_4_3_OFF 3
#define RVC_B_IMM_7_6_OFF 6
#define RVC_B_IMM_2_1_OFF 1
#define RVC_B_IMM_5_OFF 5
#define RVC_B_IMM_4_3_MASK GENMASK(1, 0)
#define RVC_B_IMM_7_6_MASK GENMASK(1, 0)
#define RVC_B_IMM_2_1_MASK GENMASK(1, 0)
#define RVC_B_IMM_5_MASK GENMASK(0, 0)
/* The register offset in RVC op=C0 instruction */
#define RVC_C0_RS1_OPOFF 7
#define RVC_C0_RS2_OPOFF 2
#define RVC_C0_RD_OPOFF 2
/* The register offset in RVC op=C1 instruction */
#define RVC_C1_RS1_OPOFF 7
#define RVC_C1_RS2_OPOFF 2
#define RVC_C1_RD_OPOFF 7
/* The register offset in RVC op=C2 instruction */
#define RVC_C2_RS1_OPOFF 7
#define RVC_C2_RS2_OPOFF 2
#define RVC_C2_RD_OPOFF 7
/* parts of opcode for RVG*/
#define OPCODE_BRANCH 0x63
#define OPCODE_JALR 0x67
#define OPCODE_JAL 0x6f
#define OPCODE_SYSTEM 0x73
/* parts of opcode for RVC*/
#define OPCODE_C_0 0x0
#define OPCODE_C_1 0x1
#define OPCODE_C_2 0x2
/* parts of funct3 code for I, M, A extension*/
#define FUNCT3_JALR 0x0
#define FUNCT3_BEQ 0x0
#define FUNCT3_BNE 0x1000
#define FUNCT3_BLT 0x4000
#define FUNCT3_BGE 0x5000
#define FUNCT3_BLTU 0x6000
#define FUNCT3_BGEU 0x7000
/* parts of funct3 code for C extension*/
#define FUNCT3_C_BEQZ 0xc000
#define FUNCT3_C_BNEZ 0xe000
#define FUNCT3_C_J 0xa000
#define FUNCT3_C_JAL 0x2000
#define FUNCT4_C_JR 0x8000
#define FUNCT4_C_JALR 0xf000
#define FUNCT12_SRET 0x10200000
#define MATCH_JALR (FUNCT3_JALR | OPCODE_JALR)
#define MATCH_JAL (OPCODE_JAL)
#define MATCH_BEQ (FUNCT3_BEQ | OPCODE_BRANCH)
#define MATCH_BNE (FUNCT3_BNE | OPCODE_BRANCH)
#define MATCH_BLT (FUNCT3_BLT | OPCODE_BRANCH)
#define MATCH_BGE (FUNCT3_BGE | OPCODE_BRANCH)
#define MATCH_BLTU (FUNCT3_BLTU | OPCODE_BRANCH)
#define MATCH_BGEU (FUNCT3_BGEU | OPCODE_BRANCH)
#define MATCH_SRET (FUNCT12_SRET | OPCODE_SYSTEM)
#define MATCH_C_BEQZ (FUNCT3_C_BEQZ | OPCODE_C_1)
#define MATCH_C_BNEZ (FUNCT3_C_BNEZ | OPCODE_C_1)
#define MATCH_C_J (FUNCT3_C_J | OPCODE_C_1)
#define MATCH_C_JAL (FUNCT3_C_JAL | OPCODE_C_1)
#define MATCH_C_JR (FUNCT4_C_JR | OPCODE_C_2)
#define MATCH_C_JALR (FUNCT4_C_JALR | OPCODE_C_2)
#define MASK_JALR 0x707f
#define MASK_JAL 0x7f
#define MASK_C_JALR 0xf07f
#define MASK_C_JR 0xf07f
#define MASK_C_JAL 0xe003
#define MASK_C_J 0xe003
#define MASK_BEQ 0x707f
#define MASK_BNE 0x707f
#define MASK_BLT 0x707f
#define MASK_BGE 0x707f
#define MASK_BLTU 0x707f
#define MASK_BGEU 0x707f
#define MASK_C_BEQZ 0xe003
#define MASK_C_BNEZ 0xe003
#define MASK_SRET 0xffffffff
#define __INSN_LENGTH_MASK _UL(0x3)
#define __INSN_LENGTH_GE_32 _UL(0x3)
#define __INSN_OPCODE_MASK _UL(0x7F)
#define __INSN_BRANCH_OPCODE _UL(OPCODE_BRANCH)
/* Define a series of is_XXX_insn functions to check if the value INSN
* is an instance of instruction XXX.
*/
#define DECLARE_INSN(INSN_NAME, INSN_MATCH, INSN_MASK) \
static inline bool is_ ## INSN_NAME ## _insn(long insn) \
{ \
return (insn & (INSN_MASK)) == (INSN_MATCH); \
}
#define RV_IMM_SIGN(x) (-(((x) >> 31) & 1))
#define RVC_IMM_SIGN(x) (-(((x) >> 12) & 1))
#define RV_X(X, s, mask) (((X) >> (s)) & (mask))
#define RVC_X(X, s, mask) RV_X(X, s, mask)
#define EXTRACT_JTYPE_IMM(x) \
({typeof(x) x_ = (x); \
(RV_X(x_, J_IMM_10_1_OPOFF, J_IMM_10_1_MASK) << J_IMM_10_1_OFF) | \
(RV_X(x_, J_IMM_11_OPOFF, J_IMM_11_MASK) << J_IMM_11_OFF) | \
(RV_X(x_, J_IMM_19_12_OPOFF, J_IMM_19_12_MASK) << J_IMM_19_12_OFF) | \
(RV_IMM_SIGN(x_) << J_IMM_SIGN_OFF); })
#define EXTRACT_ITYPE_IMM(x) \
({typeof(x) x_ = (x); \
(RV_X(x_, I_IMM_11_0_OPOFF, I_IMM_11_0_MASK)) | \
(RV_IMM_SIGN(x_) << I_IMM_SIGN_OFF); })
#define EXTRACT_BTYPE_IMM(x) \
({typeof(x) x_ = (x); \
(RV_X(x_, B_IMM_4_1_OPOFF, B_IMM_4_1_MASK) << B_IMM_4_1_OFF) | \
(RV_X(x_, B_IMM_10_5_OPOFF, B_IMM_10_5_MASK) << B_IMM_10_5_OFF) | \
(RV_X(x_, B_IMM_11_OPOFF, B_IMM_11_MASK) << B_IMM_11_OFF) | \
(RV_IMM_SIGN(x_) << B_IMM_SIGN_OFF); })
#define EXTRACT_RVC_J_IMM(x) \
({typeof(x) x_ = (x); \
(RVC_X(x_, RVC_J_IMM_3_1_OPOFF, RVC_J_IMM_3_1_MASK) << RVC_J_IMM_3_1_OFF) | \
(RVC_X(x_, RVC_J_IMM_4_OPOFF, RVC_J_IMM_4_MASK) << RVC_J_IMM_4_OFF) | \
(RVC_X(x_, RVC_J_IMM_5_OPOFF, RVC_J_IMM_5_MASK) << RVC_J_IMM_5_OFF) | \
(RVC_X(x_, RVC_J_IMM_6_OPOFF, RVC_J_IMM_6_MASK) << RVC_J_IMM_6_OFF) | \
(RVC_X(x_, RVC_J_IMM_7_OPOFF, RVC_J_IMM_7_MASK) << RVC_J_IMM_7_OFF) | \
(RVC_X(x_, RVC_J_IMM_9_8_OPOFF, RVC_J_IMM_9_8_MASK) << RVC_J_IMM_9_8_OFF) | \
(RVC_X(x_, RVC_J_IMM_10_OPOFF, RVC_J_IMM_10_MASK) << RVC_J_IMM_10_OFF) | \
(RVC_IMM_SIGN(x_) << RVC_J_IMM_SIGN_OFF); })
#define EXTRACT_RVC_B_IMM(x) \
({typeof(x) x_ = (x); \
(RVC_X(x_, RVC_B_IMM_2_1_OPOFF, RVC_B_IMM_2_1_MASK) << RVC_B_IMM_2_1_OFF) | \
(RVC_X(x_, RVC_B_IMM_4_3_OPOFF, RVC_B_IMM_4_3_MASK) << RVC_B_IMM_4_3_OFF) | \
(RVC_X(x_, RVC_B_IMM_5_OPOFF, RVC_B_IMM_5_MASK) << RVC_B_IMM_5_OFF) | \
(RVC_X(x_, RVC_B_IMM_7_6_OPOFF, RVC_B_IMM_7_6_MASK) << RVC_B_IMM_7_6_OFF) | \
(RVC_IMM_SIGN(x_) << RVC_B_IMM_SIGN_OFF); })
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
#include <asm/vendorid_list.h> #include <asm/vendorid_list.h>
#include <asm/sbi.h> #include <asm/sbi.h>
#include <asm/csr.h> #include <asm/csr.h>
#include <asm/insn.h>
#include <asm/patch.h>
struct cpu_manufacturer_info_t { struct cpu_manufacturer_info_t {
unsigned long vendor_id; unsigned long vendor_id;
...@@ -53,6 +55,60 @@ static void __init_or_module riscv_fill_cpu_mfr_info(struct cpu_manufacturer_inf ...@@ -53,6 +55,60 @@ static void __init_or_module riscv_fill_cpu_mfr_info(struct cpu_manufacturer_inf
} }
} }
static u32 riscv_instruction_at(void *p)
{
u16 *parcel = p;
return (u32)parcel[0] | (u32)parcel[1] << 16;
}
static void riscv_alternative_fix_auipc_jalr(void *ptr, u32 auipc_insn,
u32 jalr_insn, int patch_offset)
{
u32 call[2] = { auipc_insn, jalr_insn };
s32 imm;
/* get and adjust new target address */
imm = riscv_insn_extract_utype_itype_imm(auipc_insn, jalr_insn);
imm -= patch_offset;
/* update instructions */
riscv_insn_insert_utype_itype_imm(&call[0], &call[1], imm);
/* patch the call place again */
patch_text_nosync(ptr, call, sizeof(u32) * 2);
}
void riscv_alternative_fix_offsets(void *alt_ptr, unsigned int len,
int patch_offset)
{
int num_insn = len / sizeof(u32);
int i;
for (i = 0; i < num_insn; i++) {
u32 insn = riscv_instruction_at(alt_ptr + i * sizeof(u32));
/*
* May be the start of an auipc + jalr pair
* Needs to check that at least one more instruction
* is in the list.
*/
if (riscv_insn_is_auipc(insn) && i < num_insn - 1) {
u32 insn2 = riscv_instruction_at(alt_ptr + (i + 1) * sizeof(u32));
if (!riscv_insn_is_jalr(insn2))
continue;
/* if instruction pair is a call, it will use the ra register */
if (RV_EXTRACT_RD_REG(insn) != 1)
continue;
riscv_alternative_fix_auipc_jalr(alt_ptr + i * sizeof(u32),
insn, insn2, patch_offset);
}
}
}
/* /*
* This is called very early in the boot process (directly after we run * This is called very early in the boot process (directly after we run
* a feature detect on the boot CPU). No need to worry about other CPUs * a feature detect on the boot CPU). No need to worry about other CPUs
......
...@@ -339,8 +339,11 @@ void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin, ...@@ -339,8 +339,11 @@ void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin,
} }
tmp = (1U << alt->errata_id); tmp = (1U << alt->errata_id);
if (cpu_req_feature & tmp) if (cpu_req_feature & tmp) {
patch_text_nosync(alt->old_ptr, alt->alt_ptr, alt->alt_len); patch_text_nosync(alt->old_ptr, alt->alt_ptr, alt->alt_len);
riscv_alternative_fix_offsets(alt->old_ptr, alt->alt_len,
alt->old_ptr - alt->alt_ptr);
}
} }
} }
#endif #endif
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#include <linux/string.h> #include <linux/string.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/gdb_xml.h> #include <asm/gdb_xml.h>
#include <asm/parse_asm.h> #include <asm/insn.h>
enum { enum {
NOT_KGDB_BREAK = 0, NOT_KGDB_BREAK = 0,
...@@ -23,27 +23,6 @@ enum { ...@@ -23,27 +23,6 @@ enum {
static unsigned long stepped_address; static unsigned long stepped_address;
static unsigned int stepped_opcode; static unsigned int stepped_opcode;
#if __riscv_xlen == 32
/* C.JAL is an RV32C-only instruction */
DECLARE_INSN(c_jal, MATCH_C_JAL, MASK_C_JAL)
#else
#define is_c_jal_insn(opcode) 0
#endif
DECLARE_INSN(jalr, MATCH_JALR, MASK_JALR)
DECLARE_INSN(jal, MATCH_JAL, MASK_JAL)
DECLARE_INSN(c_jr, MATCH_C_JR, MASK_C_JR)
DECLARE_INSN(c_jalr, MATCH_C_JALR, MASK_C_JALR)
DECLARE_INSN(c_j, MATCH_C_J, MASK_C_J)
DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ)
DECLARE_INSN(bne, MATCH_BNE, MASK_BNE)
DECLARE_INSN(blt, MATCH_BLT, MASK_BLT)
DECLARE_INSN(bge, MATCH_BGE, MASK_BGE)
DECLARE_INSN(bltu, MATCH_BLTU, MASK_BLTU)
DECLARE_INSN(bgeu, MATCH_BGEU, MASK_BGEU)
DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ)
DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ)
DECLARE_INSN(sret, MATCH_SRET, MASK_SRET)
static int decode_register_index(unsigned long opcode, int offset) static int decode_register_index(unsigned long opcode, int offset)
{ {
return (opcode >> offset) & 0x1F; return (opcode >> offset) & 0x1F;
...@@ -65,23 +44,25 @@ static int get_step_address(struct pt_regs *regs, unsigned long *next_addr) ...@@ -65,23 +44,25 @@ static int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
if (get_kernel_nofault(op_code, (void *)pc)) if (get_kernel_nofault(op_code, (void *)pc))
return -EINVAL; return -EINVAL;
if ((op_code & __INSN_LENGTH_MASK) != __INSN_LENGTH_GE_32) { if ((op_code & __INSN_LENGTH_MASK) != __INSN_LENGTH_GE_32) {
if (is_c_jalr_insn(op_code) || is_c_jr_insn(op_code)) { if (riscv_insn_is_c_jalr(op_code) ||
riscv_insn_is_c_jr(op_code)) {
rs1_num = decode_register_index(op_code, RVC_C2_RS1_OPOFF); rs1_num = decode_register_index(op_code, RVC_C2_RS1_OPOFF);
*next_addr = regs_ptr[rs1_num]; *next_addr = regs_ptr[rs1_num];
} else if (is_c_j_insn(op_code) || is_c_jal_insn(op_code)) { } else if (riscv_insn_is_c_j(op_code) ||
*next_addr = EXTRACT_RVC_J_IMM(op_code) + pc; riscv_insn_is_c_jal(op_code)) {
} else if (is_c_beqz_insn(op_code)) { *next_addr = RVC_EXTRACT_JTYPE_IMM(op_code) + pc;
} else if (riscv_insn_is_c_beqz(op_code)) {
rs1_num = decode_register_index_short(op_code, rs1_num = decode_register_index_short(op_code,
RVC_C1_RS1_OPOFF); RVC_C1_RS1_OPOFF);
if (!rs1_num || regs_ptr[rs1_num] == 0) if (!rs1_num || regs_ptr[rs1_num] == 0)
*next_addr = EXTRACT_RVC_B_IMM(op_code) + pc; *next_addr = RVC_EXTRACT_BTYPE_IMM(op_code) + pc;
else else
*next_addr = pc + 2; *next_addr = pc + 2;
} else if (is_c_bnez_insn(op_code)) { } else if (riscv_insn_is_c_bnez(op_code)) {
rs1_num = rs1_num =
decode_register_index_short(op_code, RVC_C1_RS1_OPOFF); decode_register_index_short(op_code, RVC_C1_RS1_OPOFF);
if (rs1_num && regs_ptr[rs1_num] != 0) if (rs1_num && regs_ptr[rs1_num] != 0)
*next_addr = EXTRACT_RVC_B_IMM(op_code) + pc; *next_addr = RVC_EXTRACT_BTYPE_IMM(op_code) + pc;
else else
*next_addr = pc + 2; *next_addr = pc + 2;
} else { } else {
...@@ -90,7 +71,7 @@ static int get_step_address(struct pt_regs *regs, unsigned long *next_addr) ...@@ -90,7 +71,7 @@ static int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
} else { } else {
if ((op_code & __INSN_OPCODE_MASK) == __INSN_BRANCH_OPCODE) { if ((op_code & __INSN_OPCODE_MASK) == __INSN_BRANCH_OPCODE) {
bool result = false; bool result = false;
long imm = EXTRACT_BTYPE_IMM(op_code); long imm = RV_EXTRACT_BTYPE_IMM(op_code);
unsigned long rs1_val = 0, rs2_val = 0; unsigned long rs1_val = 0, rs2_val = 0;
rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF); rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF);
...@@ -100,34 +81,34 @@ static int get_step_address(struct pt_regs *regs, unsigned long *next_addr) ...@@ -100,34 +81,34 @@ static int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
if (rs2_num) if (rs2_num)
rs2_val = regs_ptr[rs2_num]; rs2_val = regs_ptr[rs2_num];
if (is_beq_insn(op_code)) if (riscv_insn_is_beq(op_code))
result = (rs1_val == rs2_val) ? true : false; result = (rs1_val == rs2_val) ? true : false;
else if (is_bne_insn(op_code)) else if (riscv_insn_is_bne(op_code))
result = (rs1_val != rs2_val) ? true : false; result = (rs1_val != rs2_val) ? true : false;
else if (is_blt_insn(op_code)) else if (riscv_insn_is_blt(op_code))
result = result =
((long)rs1_val < ((long)rs1_val <
(long)rs2_val) ? true : false; (long)rs2_val) ? true : false;
else if (is_bge_insn(op_code)) else if (riscv_insn_is_bge(op_code))
result = result =
((long)rs1_val >= ((long)rs1_val >=
(long)rs2_val) ? true : false; (long)rs2_val) ? true : false;
else if (is_bltu_insn(op_code)) else if (riscv_insn_is_bltu(op_code))
result = (rs1_val < rs2_val) ? true : false; result = (rs1_val < rs2_val) ? true : false;
else if (is_bgeu_insn(op_code)) else if (riscv_insn_is_bgeu(op_code))
result = (rs1_val >= rs2_val) ? true : false; result = (rs1_val >= rs2_val) ? true : false;
if (result) if (result)
*next_addr = imm + pc; *next_addr = imm + pc;
else else
*next_addr = pc + 4; *next_addr = pc + 4;
} else if (is_jal_insn(op_code)) { } else if (riscv_insn_is_jal(op_code)) {
*next_addr = EXTRACT_JTYPE_IMM(op_code) + pc; *next_addr = RV_EXTRACT_JTYPE_IMM(op_code) + pc;
} else if (is_jalr_insn(op_code)) { } else if (riscv_insn_is_jalr(op_code)) {
rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF); rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF);
if (rs1_num) if (rs1_num)
*next_addr = ((unsigned long *)regs)[rs1_num]; *next_addr = ((unsigned long *)regs)[rs1_num];
*next_addr += EXTRACT_ITYPE_IMM(op_code); *next_addr += RV_EXTRACT_ITYPE_IMM(op_code);
} else if (is_sret_insn(op_code)) { } else if (riscv_insn_is_sret(op_code)) {
*next_addr = pc; *next_addr = pc;
} else { } else {
*next_addr = pc + 4; *next_addr = pc + 4;
......
...@@ -136,13 +136,6 @@ bool __kprobes simulate_auipc(u32 opcode, unsigned long addr, struct pt_regs *re ...@@ -136,13 +136,6 @@ bool __kprobes simulate_auipc(u32 opcode, unsigned long addr, struct pt_regs *re
#define branch_offset(opcode) \ #define branch_offset(opcode) \
sign_extend32((branch_imm(opcode)), 12) sign_extend32((branch_imm(opcode)), 12)
#define BRANCH_BEQ 0x0
#define BRANCH_BNE 0x1
#define BRANCH_BLT 0x4
#define BRANCH_BGE 0x5
#define BRANCH_BLTU 0x6
#define BRANCH_BGEU 0x7
bool __kprobes simulate_branch(u32 opcode, unsigned long addr, struct pt_regs *regs) bool __kprobes simulate_branch(u32 opcode, unsigned long addr, struct pt_regs *regs)
{ {
/* /*
...@@ -169,22 +162,22 @@ bool __kprobes simulate_branch(u32 opcode, unsigned long addr, struct pt_regs *r ...@@ -169,22 +162,22 @@ bool __kprobes simulate_branch(u32 opcode, unsigned long addr, struct pt_regs *r
offset_tmp = branch_offset(opcode); offset_tmp = branch_offset(opcode);
switch (branch_funct3(opcode)) { switch (branch_funct3(opcode)) {
case BRANCH_BEQ: case RVG_FUNCT3_BEQ:
offset = (rs1_val == rs2_val) ? offset_tmp : 4; offset = (rs1_val == rs2_val) ? offset_tmp : 4;
break; break;
case BRANCH_BNE: case RVG_FUNCT3_BNE:
offset = (rs1_val != rs2_val) ? offset_tmp : 4; offset = (rs1_val != rs2_val) ? offset_tmp : 4;
break; break;
case BRANCH_BLT: case RVG_FUNCT3_BLT:
offset = ((long)rs1_val < (long)rs2_val) ? offset_tmp : 4; offset = ((long)rs1_val < (long)rs2_val) ? offset_tmp : 4;
break; break;
case BRANCH_BGE: case RVG_FUNCT3_BGE:
offset = ((long)rs1_val >= (long)rs2_val) ? offset_tmp : 4; offset = ((long)rs1_val >= (long)rs2_val) ? offset_tmp : 4;
break; break;
case BRANCH_BLTU: case RVG_FUNCT3_BLTU:
offset = (rs1_val < rs2_val) ? offset_tmp : 4; offset = (rs1_val < rs2_val) ? offset_tmp : 4;
break; break;
case BRANCH_BGEU: case RVG_FUNCT3_BGEU:
offset = (rs1_val >= rs2_val) ? offset_tmp : 4; offset = (rs1_val >= rs2_val) ? offset_tmp : 4;
break; break;
default: default:
......
...@@ -3,14 +3,7 @@ ...@@ -3,14 +3,7 @@
#ifndef _RISCV_KERNEL_PROBES_SIMULATE_INSN_H #ifndef _RISCV_KERNEL_PROBES_SIMULATE_INSN_H
#define _RISCV_KERNEL_PROBES_SIMULATE_INSN_H #define _RISCV_KERNEL_PROBES_SIMULATE_INSN_H
#define __RISCV_INSN_FUNCS(name, mask, val) \ #include <asm/insn.h>
static __always_inline bool riscv_insn_is_##name(probe_opcode_t code) \
{ \
BUILD_BUG_ON(~(mask) & (val)); \
return (code & (mask)) == (val); \
} \
bool simulate_##name(u32 opcode, unsigned long addr, \
struct pt_regs *regs)
#define RISCV_INSN_REJECTED(name, code) \ #define RISCV_INSN_REJECTED(name, code) \
do { \ do { \
...@@ -30,18 +23,9 @@ __RISCV_INSN_FUNCS(fence, 0x7f, 0x0f); ...@@ -30,18 +23,9 @@ __RISCV_INSN_FUNCS(fence, 0x7f, 0x0f);
} \ } \
} while (0) } while (0)
__RISCV_INSN_FUNCS(c_j, 0xe003, 0xa001); bool simulate_auipc(u32 opcode, unsigned long addr, struct pt_regs *regs);
__RISCV_INSN_FUNCS(c_jr, 0xf007, 0x8002); bool simulate_branch(u32 opcode, unsigned long addr, struct pt_regs *regs);
__RISCV_INSN_FUNCS(c_jal, 0xe003, 0x2001); bool simulate_jal(u32 opcode, unsigned long addr, struct pt_regs *regs);
__RISCV_INSN_FUNCS(c_jalr, 0xf007, 0x9002); bool simulate_jalr(u32 opcode, unsigned long addr, struct pt_regs *regs);
__RISCV_INSN_FUNCS(c_beqz, 0xe003, 0xc001);
__RISCV_INSN_FUNCS(c_bnez, 0xe003, 0xe001);
__RISCV_INSN_FUNCS(c_ebreak, 0xffff, 0x9002);
__RISCV_INSN_FUNCS(auipc, 0x7f, 0x17);
__RISCV_INSN_FUNCS(branch, 0x7f, 0x63);
__RISCV_INSN_FUNCS(jal, 0x7f, 0x6f);
__RISCV_INSN_FUNCS(jalr, 0x707f, 0x67);
#endif /* _RISCV_KERNEL_PROBES_SIMULATE_INSN_H */ #endif /* _RISCV_KERNEL_PROBES_SIMULATE_INSN_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment