Commit c42421e2 authored by Masami Hiramatsu's avatar Masami Hiramatsu Committed by Steven Rostedt (VMware)

kprobes: treewide: Use 'kprobe_opcode_t *' for the code address in get_optimized_kprobe()

Since get_optimized_kprobe() is only used inside kprobes,
it doesn't need to use 'unsigned long' type for 'addr' parameter.
Make it use 'kprobe_opcode_t *' for the 'addr' parameter and
subsequent call of arch_within_optimized_kprobe() also should use
'kprobe_opcode_t *'.

Note that MAX_OPTIMIZED_LENGTH and RELATIVEJUMP_SIZE are defined
by byte-size, but the size of 'kprobe_opcode_t' depends on the
architecture. Therefore, we must be careful when calculating
addresses using those macros.

Link: https://lkml.kernel.org/r/163163040680.489837.12133032364499833736.stgit@devnote2Signed-off-by: default avatarMasami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
parent 57d4e317
...@@ -347,10 +347,11 @@ void arch_unoptimize_kprobes(struct list_head *oplist, ...@@ -347,10 +347,11 @@ void arch_unoptimize_kprobes(struct list_head *oplist,
} }
int arch_within_optimized_kprobe(struct optimized_kprobe *op, int arch_within_optimized_kprobe(struct optimized_kprobe *op,
unsigned long addr) kprobe_opcode_t *addr)
{ {
return ((unsigned long)op->kp.addr <= addr && return (op->kp.addr <= addr &&
(unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr); op->kp.addr + (RELATIVEJUMP_SIZE / sizeof(kprobe_opcode_t)) > addr);
} }
void arch_remove_optimized_kprobe(struct optimized_kprobe *op) void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
......
...@@ -301,8 +301,8 @@ void arch_unoptimize_kprobes(struct list_head *oplist, struct list_head *done_li ...@@ -301,8 +301,8 @@ void arch_unoptimize_kprobes(struct list_head *oplist, struct list_head *done_li
} }
} }
int arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr) int arch_within_optimized_kprobe(struct optimized_kprobe *op, kprobe_opcode_t *addr)
{ {
return ((unsigned long)op->kp.addr <= addr && return (op->kp.addr <= addr &&
(unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr); op->kp.addr + (RELATIVEJUMP_SIZE / sizeof(kprobe_opcode_t)) > addr);
} }
...@@ -367,10 +367,10 @@ int arch_check_optimized_kprobe(struct optimized_kprobe *op) ...@@ -367,10 +367,10 @@ int arch_check_optimized_kprobe(struct optimized_kprobe *op)
/* Check the addr is within the optimized instructions. */ /* Check the addr is within the optimized instructions. */
int arch_within_optimized_kprobe(struct optimized_kprobe *op, int arch_within_optimized_kprobe(struct optimized_kprobe *op,
unsigned long addr) kprobe_opcode_t *addr)
{ {
return ((unsigned long)op->kp.addr <= addr && return (op->kp.addr <= addr &&
(unsigned long)op->kp.addr + op->optinsn.size > addr); op->kp.addr + op->optinsn.size > addr);
} }
/* Free optimized instruction slot */ /* Free optimized instruction slot */
......
...@@ -329,7 +329,7 @@ extern void arch_unoptimize_kprobes(struct list_head *oplist, ...@@ -329,7 +329,7 @@ extern void arch_unoptimize_kprobes(struct list_head *oplist,
struct list_head *done_list); struct list_head *done_list);
extern void arch_unoptimize_kprobe(struct optimized_kprobe *op); extern void arch_unoptimize_kprobe(struct optimized_kprobe *op);
extern int arch_within_optimized_kprobe(struct optimized_kprobe *op, extern int arch_within_optimized_kprobe(struct optimized_kprobe *op,
unsigned long addr); kprobe_opcode_t *addr);
extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs); extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
......
...@@ -485,15 +485,15 @@ static int kprobe_queued(struct kprobe *p) ...@@ -485,15 +485,15 @@ static int kprobe_queued(struct kprobe *p)
* Return an optimized kprobe whose optimizing code replaces * Return an optimized kprobe whose optimizing code replaces
* instructions including 'addr' (exclude breakpoint). * instructions including 'addr' (exclude breakpoint).
*/ */
static struct kprobe *get_optimized_kprobe(unsigned long addr) static struct kprobe *get_optimized_kprobe(kprobe_opcode_t *addr)
{ {
int i; int i;
struct kprobe *p = NULL; struct kprobe *p = NULL;
struct optimized_kprobe *op; struct optimized_kprobe *op;
/* Don't check i == 0, since that is a breakpoint case. */ /* Don't check i == 0, since that is a breakpoint case. */
for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++) for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH / sizeof(kprobe_opcode_t); i++)
p = get_kprobe((void *)(addr - i)); p = get_kprobe(addr - i);
if (p && kprobe_optready(p)) { if (p && kprobe_optready(p)) {
op = container_of(p, struct optimized_kprobe, kp); op = container_of(p, struct optimized_kprobe, kp);
...@@ -967,7 +967,7 @@ static void __arm_kprobe(struct kprobe *p) ...@@ -967,7 +967,7 @@ static void __arm_kprobe(struct kprobe *p)
lockdep_assert_held(&text_mutex); lockdep_assert_held(&text_mutex);
/* Find the overlapping optimized kprobes. */ /* Find the overlapping optimized kprobes. */
_p = get_optimized_kprobe((unsigned long)p->addr); _p = get_optimized_kprobe(p->addr);
if (unlikely(_p)) if (unlikely(_p))
/* Fallback to unoptimized kprobe */ /* Fallback to unoptimized kprobe */
unoptimize_kprobe(_p, true); unoptimize_kprobe(_p, true);
...@@ -989,7 +989,7 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt) ...@@ -989,7 +989,7 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt)
if (!kprobe_queued(p)) { if (!kprobe_queued(p)) {
arch_disarm_kprobe(p); arch_disarm_kprobe(p);
/* If another kprobe was blocked, re-optimize it. */ /* If another kprobe was blocked, re-optimize it. */
_p = get_optimized_kprobe((unsigned long)p->addr); _p = get_optimized_kprobe(p->addr);
if (unlikely(_p) && reopt) if (unlikely(_p) && reopt)
optimize_kprobe(_p); optimize_kprobe(_p);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment