Commit c2249707 authored by Pratyush Anand's avatar Pratyush Anand Committed by Catalin Marinas

arm64: kprobe: protect/rename few definitions to be reused by uprobe

decode-insn code has to be reused by arm64 uprobe implementation as well.
Therefore, this patch protects some portion of kprobe code and renames few
other, so that decode-insn functionality can be reused by uprobe even when
CONFIG_KPROBES is not defined.

kprobe_opcode_t and struct arch_specific_insn are also defined by
linux/kprobes.h, when CONFIG_KPROBES is not defined. So, protect these
definitions in asm/probes.h.

linux/kprobes.h already includes asm/kprobes.h. Therefore, remove inclusion
of asm/kprobes.h from decode-insn.c.

There are some definitions like kprobe_insn and kprobes_handler_t etc can
be re-used by uprobe. So, it would be better to remove 'k' from their
names.

struct arch_specific_insn is specific to kprobe. Therefore, introduce a new
struct arch_probe_insn which will be common for both kprobe and uprobe, so
that decode-insn code can be shared. Modify kprobe code accordingly.

Function arm_probe_decode_insn() will be needed by uprobe as well. So make
it global.
Signed-off-by: default avatarPratyush Anand <panand@redhat.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 1404d6f1
...@@ -17,19 +17,22 @@ ...@@ -17,19 +17,22 @@
#include <asm/opcodes.h> #include <asm/opcodes.h>
struct kprobe; typedef u32 probe_opcode_t;
struct arch_specific_insn; typedef void (probes_handler_t) (u32 opcode, long addr, struct pt_regs *);
typedef u32 kprobe_opcode_t;
typedef void (kprobes_handler_t) (u32 opcode, long addr, struct pt_regs *);
/* architecture specific copy of original instruction */ /* architecture specific copy of original instruction */
struct arch_specific_insn { struct arch_probe_insn {
kprobe_opcode_t *insn; probe_opcode_t *insn;
pstate_check_t *pstate_cc; pstate_check_t *pstate_cc;
kprobes_handler_t *handler; probes_handler_t *handler;
/* restore address after step xol */ /* restore address after step xol */
unsigned long restore; unsigned long restore;
}; };
#ifdef CONFIG_KPROBES
typedef u32 kprobe_opcode_t;
struct arch_specific_insn {
struct arch_probe_insn api;
};
#endif
#endif #endif
...@@ -78,8 +78,8 @@ static bool __kprobes aarch64_insn_is_steppable(u32 insn) ...@@ -78,8 +78,8 @@ static bool __kprobes aarch64_insn_is_steppable(u32 insn)
* INSN_GOOD If instruction is supported and uses instruction slot, * INSN_GOOD If instruction is supported and uses instruction slot,
* INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot. * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
*/ */
static enum kprobe_insn __kprobes enum probe_insn __kprobes
arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) arm_probe_decode_insn(probe_opcode_t insn, struct arch_probe_insn *api)
{ {
/* /*
* Instructions reading or modifying the PC won't work from the XOL * Instructions reading or modifying the PC won't work from the XOL
...@@ -89,26 +89,26 @@ arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) ...@@ -89,26 +89,26 @@ arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
return INSN_GOOD; return INSN_GOOD;
if (aarch64_insn_is_bcond(insn)) { if (aarch64_insn_is_bcond(insn)) {
asi->handler = simulate_b_cond; api->handler = simulate_b_cond;
} else if (aarch64_insn_is_cbz(insn) || } else if (aarch64_insn_is_cbz(insn) ||
aarch64_insn_is_cbnz(insn)) { aarch64_insn_is_cbnz(insn)) {
asi->handler = simulate_cbz_cbnz; api->handler = simulate_cbz_cbnz;
} else if (aarch64_insn_is_tbz(insn) || } else if (aarch64_insn_is_tbz(insn) ||
aarch64_insn_is_tbnz(insn)) { aarch64_insn_is_tbnz(insn)) {
asi->handler = simulate_tbz_tbnz; api->handler = simulate_tbz_tbnz;
} else if (aarch64_insn_is_adr_adrp(insn)) { } else if (aarch64_insn_is_adr_adrp(insn)) {
asi->handler = simulate_adr_adrp; api->handler = simulate_adr_adrp;
} else if (aarch64_insn_is_b(insn) || } else if (aarch64_insn_is_b(insn) ||
aarch64_insn_is_bl(insn)) { aarch64_insn_is_bl(insn)) {
asi->handler = simulate_b_bl; api->handler = simulate_b_bl;
} else if (aarch64_insn_is_br(insn) || } else if (aarch64_insn_is_br(insn) ||
aarch64_insn_is_blr(insn) || aarch64_insn_is_blr(insn) ||
aarch64_insn_is_ret(insn)) { aarch64_insn_is_ret(insn)) {
asi->handler = simulate_br_blr_ret; api->handler = simulate_br_blr_ret;
} else if (aarch64_insn_is_ldr_lit(insn)) { } else if (aarch64_insn_is_ldr_lit(insn)) {
asi->handler = simulate_ldr_literal; api->handler = simulate_ldr_literal;
} else if (aarch64_insn_is_ldrsw_lit(insn)) { } else if (aarch64_insn_is_ldrsw_lit(insn)) {
asi->handler = simulate_ldrsw_literal; api->handler = simulate_ldrsw_literal;
} else { } else {
/* /*
* Instruction cannot be stepped out-of-line and we don't * Instruction cannot be stepped out-of-line and we don't
...@@ -120,6 +120,7 @@ arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) ...@@ -120,6 +120,7 @@ arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
return INSN_GOOD_NO_SLOT; return INSN_GOOD_NO_SLOT;
} }
#ifdef CONFIG_KPROBES
static bool __kprobes static bool __kprobes
is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end) is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end)
{ {
...@@ -138,12 +139,12 @@ is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end) ...@@ -138,12 +139,12 @@ is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end)
return false; return false;
} }
enum kprobe_insn __kprobes enum probe_insn __kprobes
arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi) arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
{ {
enum kprobe_insn decoded; enum probe_insn decoded;
kprobe_opcode_t insn = le32_to_cpu(*addr); probe_opcode_t insn = le32_to_cpu(*addr);
kprobe_opcode_t *scan_end = NULL; probe_opcode_t *scan_end = NULL;
unsigned long size = 0, offset = 0; unsigned long size = 0, offset = 0;
/* /*
...@@ -162,7 +163,7 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi) ...@@ -162,7 +163,7 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
else else
scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE; scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE;
} }
decoded = arm_probe_decode_insn(insn, asi); decoded = arm_probe_decode_insn(insn, &asi->api);
if (decoded != INSN_REJECTED && scan_end) if (decoded != INSN_REJECTED && scan_end)
if (is_probed_address_atomic(addr - 1, scan_end)) if (is_probed_address_atomic(addr - 1, scan_end))
...@@ -170,3 +171,4 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi) ...@@ -170,3 +171,4 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
return decoded; return decoded;
} }
#endif
...@@ -23,13 +23,17 @@ ...@@ -23,13 +23,17 @@
*/ */
#define MAX_ATOMIC_CONTEXT_SIZE (128 / sizeof(kprobe_opcode_t)) #define MAX_ATOMIC_CONTEXT_SIZE (128 / sizeof(kprobe_opcode_t))
enum kprobe_insn { enum probe_insn {
INSN_REJECTED, INSN_REJECTED,
INSN_GOOD_NO_SLOT, INSN_GOOD_NO_SLOT,
INSN_GOOD, INSN_GOOD,
}; };
enum kprobe_insn __kprobes #ifdef CONFIG_KPROBES
enum probe_insn __kprobes
arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi); arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi);
#endif
enum probe_insn __kprobes
arm_probe_decode_insn(probe_opcode_t insn, struct arch_probe_insn *asi);
#endif /* _ARM_KERNEL_KPROBES_ARM64_H */ #endif /* _ARM_KERNEL_KPROBES_ARM64_H */
...@@ -44,31 +44,31 @@ post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); ...@@ -44,31 +44,31 @@ post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
static void __kprobes arch_prepare_ss_slot(struct kprobe *p) static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
{ {
/* prepare insn slot */ /* prepare insn slot */
p->ainsn.insn[0] = cpu_to_le32(p->opcode); p->ainsn.api.insn[0] = cpu_to_le32(p->opcode);
flush_icache_range((uintptr_t) (p->ainsn.insn), flush_icache_range((uintptr_t) (p->ainsn.api.insn),
(uintptr_t) (p->ainsn.insn) + (uintptr_t) (p->ainsn.api.insn) +
MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
/* /*
* Needs restoring of return address after stepping xol. * Needs restoring of return address after stepping xol.
*/ */
p->ainsn.restore = (unsigned long) p->addr + p->ainsn.api.restore = (unsigned long) p->addr +
sizeof(kprobe_opcode_t); sizeof(kprobe_opcode_t);
} }
static void __kprobes arch_prepare_simulate(struct kprobe *p) static void __kprobes arch_prepare_simulate(struct kprobe *p)
{ {
/* This instructions is not executed xol. No need to adjust the PC */ /* This instructions is not executed xol. No need to adjust the PC */
p->ainsn.restore = 0; p->ainsn.api.restore = 0;
} }
static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
{ {
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if (p->ainsn.handler) if (p->ainsn.api.handler)
p->ainsn.handler((u32)p->opcode, (long)p->addr, regs); p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs);
/* single step simulated, now go for post processing */ /* single step simulated, now go for post processing */
post_kprobe_handler(kcb, regs); post_kprobe_handler(kcb, regs);
...@@ -98,18 +98,18 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) ...@@ -98,18 +98,18 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
return -EINVAL; return -EINVAL;
case INSN_GOOD_NO_SLOT: /* insn need simulation */ case INSN_GOOD_NO_SLOT: /* insn need simulation */
p->ainsn.insn = NULL; p->ainsn.api.insn = NULL;
break; break;
case INSN_GOOD: /* instruction uses slot */ case INSN_GOOD: /* instruction uses slot */
p->ainsn.insn = get_insn_slot(); p->ainsn.api.insn = get_insn_slot();
if (!p->ainsn.insn) if (!p->ainsn.api.insn)
return -ENOMEM; return -ENOMEM;
break; break;
}; };
/* prepare the instruction */ /* prepare the instruction */
if (p->ainsn.insn) if (p->ainsn.api.insn)
arch_prepare_ss_slot(p); arch_prepare_ss_slot(p);
else else
arch_prepare_simulate(p); arch_prepare_simulate(p);
...@@ -142,9 +142,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) ...@@ -142,9 +142,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
void __kprobes arch_remove_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p)
{ {
if (p->ainsn.insn) { if (p->ainsn.api.insn) {
free_insn_slot(p->ainsn.insn, 0); free_insn_slot(p->ainsn.api.insn, 0);
p->ainsn.insn = NULL; p->ainsn.api.insn = NULL;
} }
} }
...@@ -244,9 +244,9 @@ static void __kprobes setup_singlestep(struct kprobe *p, ...@@ -244,9 +244,9 @@ static void __kprobes setup_singlestep(struct kprobe *p,
} }
if (p->ainsn.insn) { if (p->ainsn.api.insn) {
/* prepare for single stepping */ /* prepare for single stepping */
slot = (unsigned long)p->ainsn.insn; slot = (unsigned long)p->ainsn.api.insn;
set_ss_context(kcb, slot); /* mark pending ss */ set_ss_context(kcb, slot); /* mark pending ss */
...@@ -295,8 +295,8 @@ post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs) ...@@ -295,8 +295,8 @@ post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
return; return;
/* return addr restore if non-branching insn */ /* return addr restore if non-branching insn */
if (cur->ainsn.restore != 0) if (cur->ainsn.api.restore != 0)
instruction_pointer_set(regs, cur->ainsn.restore); instruction_pointer_set(regs, cur->ainsn.api.restore);
/* restore back original saved kprobe variables and continue */ /* restore back original saved kprobe variables and continue */
if (kcb->kprobe_status == KPROBE_REENTER) { if (kcb->kprobe_status == KPROBE_REENTER) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment