Commit 51781ce8 authored by Samuel Holland's avatar Samuel Holland Committed by Palmer Dabbelt

riscv: Pass patch_text() the length in bytes

patch_text_nosync() already handles an arbitrary length of code, so this
removes a superfluous loop and reduces the number of icache flushes.
Reviewed-by: default avatarBjörn Töpel <bjorn@rivosinc.com>
Signed-off-by: default avatarSamuel Holland <samuel.holland@sifive.com>
Reviewed-by: default avatarConor Dooley <conor.dooley@microchip.com>
Link: https://lore.kernel.org/r/20240327160520.791322-6-samuel.holland@sifive.comSigned-off-by: default avatarPalmer Dabbelt <palmer@rivosinc.com>
parent 5080ca0f
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
int patch_insn_write(void *addr, const void *insn, size_t len); int patch_insn_write(void *addr, const void *insn, size_t len);
int patch_text_nosync(void *addr, const void *insns, size_t len); int patch_text_nosync(void *addr, const void *insns, size_t len);
int patch_text_set_nosync(void *addr, u8 c, size_t len); int patch_text_set_nosync(void *addr, u8 c, size_t len);
int patch_text(void *addr, u32 *insns, int ninsns); int patch_text(void *addr, u32 *insns, size_t len);
extern int riscv_patch_in_stop_machine; extern int riscv_patch_in_stop_machine;
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
struct patch_insn { struct patch_insn {
void *addr; void *addr;
u32 *insns; u32 *insns;
int ninsns; size_t len;
atomic_t cpu_count; atomic_t cpu_count;
}; };
...@@ -239,14 +239,10 @@ NOKPROBE_SYMBOL(patch_text_nosync); ...@@ -239,14 +239,10 @@ NOKPROBE_SYMBOL(patch_text_nosync);
static int patch_text_cb(void *data) static int patch_text_cb(void *data)
{ {
struct patch_insn *patch = data; struct patch_insn *patch = data;
unsigned long len; int ret = 0;
int i, ret = 0;
if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) { if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
for (i = 0; ret == 0 && i < patch->ninsns; i++) { ret = patch_insn_write(patch->addr, patch->insns, patch->len);
len = GET_INSN_LENGTH(patch->insns[i]);
ret = patch_insn_write(patch->addr + i * len, &patch->insns[i], len);
}
/* /*
* Make sure the patching store is effective *before* we * Make sure the patching store is effective *before* we
* increment the counter which releases all waiting CPUs * increment the counter which releases all waiting CPUs
...@@ -266,13 +262,13 @@ static int patch_text_cb(void *data) ...@@ -266,13 +262,13 @@ static int patch_text_cb(void *data)
} }
NOKPROBE_SYMBOL(patch_text_cb); NOKPROBE_SYMBOL(patch_text_cb);
int patch_text(void *addr, u32 *insns, int ninsns) int patch_text(void *addr, u32 *insns, size_t len)
{ {
int ret; int ret;
struct patch_insn patch = { struct patch_insn patch = {
.addr = addr, .addr = addr,
.insns = insns, .insns = insns,
.ninsns = ninsns, .len = len,
.cpu_count = ATOMIC_INIT(0), .cpu_count = ATOMIC_INIT(0),
}; };
......
...@@ -24,13 +24,13 @@ post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *); ...@@ -24,13 +24,13 @@ post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *);
static void __kprobes arch_prepare_ss_slot(struct kprobe *p) static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
{ {
size_t len = GET_INSN_LENGTH(p->opcode);
u32 insn = __BUG_INSN_32; u32 insn = __BUG_INSN_32;
unsigned long offset = GET_INSN_LENGTH(p->opcode);
p->ainsn.api.restore = (unsigned long)p->addr + offset; p->ainsn.api.restore = (unsigned long)p->addr + len;
patch_text_nosync(p->ainsn.api.insn, &p->opcode, 1); patch_text_nosync(p->ainsn.api.insn, &p->opcode, len);
patch_text_nosync(p->ainsn.api.insn + offset, &insn, 1); patch_text_nosync(p->ainsn.api.insn + len, &insn, GET_INSN_LENGTH(insn));
} }
static void __kprobes arch_prepare_simulate(struct kprobe *p) static void __kprobes arch_prepare_simulate(struct kprobe *p)
...@@ -107,16 +107,18 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) ...@@ -107,16 +107,18 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
/* install breakpoint in text */ /* install breakpoint in text */
void __kprobes arch_arm_kprobe(struct kprobe *p) void __kprobes arch_arm_kprobe(struct kprobe *p)
{ {
u32 insn = (p->opcode & __INSN_LENGTH_MASK) == __INSN_LENGTH_32 ? size_t len = GET_INSN_LENGTH(p->opcode);
__BUG_INSN_32 : __BUG_INSN_16; u32 insn = len == 4 ? __BUG_INSN_32 : __BUG_INSN_16;
patch_text(p->addr, &insn, 1); patch_text(p->addr, &insn, len);
} }
/* remove breakpoint from text */ /* remove breakpoint from text */
void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_disarm_kprobe(struct kprobe *p)
{ {
patch_text(p->addr, &p->opcode, 1); size_t len = GET_INSN_LENGTH(p->opcode);
patch_text(p->addr, &p->opcode, len);
} }
void __kprobes arch_remove_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p)
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include "bpf_jit.h" #include "bpf_jit.h"
#define RV_FENTRY_NINSNS 2 #define RV_FENTRY_NINSNS 2
#define RV_FENTRY_NBYTES (RV_FENTRY_NINSNS * 4)
#define RV_REG_TCC RV_REG_A6 #define RV_REG_TCC RV_REG_A6
#define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */ #define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */
...@@ -672,7 +673,7 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, ...@@ -672,7 +673,7 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
if (ret) if (ret)
return ret; return ret;
if (memcmp(ip, old_insns, RV_FENTRY_NINSNS * 4)) if (memcmp(ip, old_insns, RV_FENTRY_NBYTES))
return -EFAULT; return -EFAULT;
ret = gen_jump_or_nops(new_addr, ip, new_insns, is_call); ret = gen_jump_or_nops(new_addr, ip, new_insns, is_call);
...@@ -681,8 +682,8 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, ...@@ -681,8 +682,8 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
cpus_read_lock(); cpus_read_lock();
mutex_lock(&text_mutex); mutex_lock(&text_mutex);
if (memcmp(ip, new_insns, RV_FENTRY_NINSNS * 4)) if (memcmp(ip, new_insns, RV_FENTRY_NBYTES))
ret = patch_text(ip, new_insns, RV_FENTRY_NINSNS); ret = patch_text(ip, new_insns, RV_FENTRY_NBYTES);
mutex_unlock(&text_mutex); mutex_unlock(&text_mutex);
cpus_read_unlock(); cpus_read_unlock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment