Commit f38adf86 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/optprobes: Compact code source a bit.

Now that lines can be up to 100 chars long, minimise the
amount of split lines to increase readability.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/8ebbd977ea8cf8d706d82458f2a21acd44562a99.1621516826.git.christophe.leroy@csgroup.eu
parent afd3287c
...@@ -18,18 +18,12 @@ ...@@ -18,18 +18,12 @@
#include <asm/ppc-opcode.h> #include <asm/ppc-opcode.h>
#include <asm/inst.h> #include <asm/inst.h>
#define TMPL_CALL_HDLR_IDX \ #define TMPL_CALL_HDLR_IDX (optprobe_template_call_handler - optprobe_template_entry)
(optprobe_template_call_handler - optprobe_template_entry) #define TMPL_EMULATE_IDX (optprobe_template_call_emulate - optprobe_template_entry)
#define TMPL_EMULATE_IDX \ #define TMPL_RET_IDX (optprobe_template_ret - optprobe_template_entry)
(optprobe_template_call_emulate - optprobe_template_entry) #define TMPL_OP_IDX (optprobe_template_op_address - optprobe_template_entry)
#define TMPL_RET_IDX \ #define TMPL_INSN_IDX (optprobe_template_insn - optprobe_template_entry)
(optprobe_template_ret - optprobe_template_entry) #define TMPL_END_IDX (optprobe_template_end - optprobe_template_entry)
#define TMPL_OP_IDX \
(optprobe_template_op_address - optprobe_template_entry)
#define TMPL_INSN_IDX \
(optprobe_template_insn - optprobe_template_entry)
#define TMPL_END_IDX \
(optprobe_template_end - optprobe_template_entry)
static bool insn_page_in_use; static bool insn_page_in_use;
...@@ -267,8 +261,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) ...@@ -267,8 +261,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
*/ */
patch_branch(buff + TMPL_RET_IDX, nip, 0); patch_branch(buff + TMPL_RET_IDX, nip, 0);
flush_icache_range((unsigned long)buff, flush_icache_range((unsigned long)buff, (unsigned long)(&buff[TMPL_END_IDX]));
(unsigned long)(&buff[TMPL_END_IDX]));
op->optinsn.insn = buff; op->optinsn.insn = buff;
...@@ -306,10 +299,8 @@ void arch_optimize_kprobes(struct list_head *oplist) ...@@ -306,10 +299,8 @@ void arch_optimize_kprobes(struct list_head *oplist)
* Backup instructions which will be replaced * Backup instructions which will be replaced
* by jump address * by jump address
*/ */
memcpy(op->optinsn.copied_insn, op->kp.addr, memcpy(op->optinsn.copied_insn, op->kp.addr, RELATIVEJUMP_SIZE);
RELATIVEJUMP_SIZE); create_branch(&instr, op->kp.addr, (unsigned long)op->optinsn.insn, 0);
create_branch(&instr, op->kp.addr,
(unsigned long)op->optinsn.insn, 0);
patch_instruction(op->kp.addr, instr); patch_instruction(op->kp.addr, instr);
list_del_init(&op->list); list_del_init(&op->list);
} }
...@@ -320,8 +311,7 @@ void arch_unoptimize_kprobe(struct optimized_kprobe *op) ...@@ -320,8 +311,7 @@ void arch_unoptimize_kprobe(struct optimized_kprobe *op)
arch_arm_kprobe(&op->kp); arch_arm_kprobe(&op->kp);
} }
void arch_unoptimize_kprobes(struct list_head *oplist, void arch_unoptimize_kprobes(struct list_head *oplist, struct list_head *done_list)
struct list_head *done_list)
{ {
struct optimized_kprobe *op; struct optimized_kprobe *op;
struct optimized_kprobe *tmp; struct optimized_kprobe *tmp;
...@@ -332,8 +322,7 @@ void arch_unoptimize_kprobes(struct list_head *oplist, ...@@ -332,8 +322,7 @@ void arch_unoptimize_kprobes(struct list_head *oplist,
} }
} }
int arch_within_optimized_kprobe(struct optimized_kprobe *op, int arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr)
unsigned long addr)
{ {
return ((unsigned long)op->kp.addr <= addr && return ((unsigned long)op->kp.addr <= addr &&
(unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr); (unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment