Commit 9cdc3b6a authored by Youling Tang's avatar Youling Tang Committed by Huacai Chen

LoongArch: ftrace: Add direct call support

Select the HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS to provide the
register_ftrace_direct[_multi] interfaces allowing users to register
the customed trampoline (direct_caller) as the mcount for one or more
target functions. And modify_ftrace_direct[_multi] are also provided
for modifying direct_caller.

There are a few cases to distinguish:
- If a direct call ops is the only one tracing a function AND the direct
  called trampoline is within the reach of a 'bl' instruction
  -> the ftrace patchsite jumps to the trampoline
- Else
  -> the ftrace patchsite jumps to the ftrace_regs_caller trampoline points
     to ftrace_list_ops so it iterates over all registered ftrace ops,
     including the direct call ops and calls its call_direct_funcs handler
     which stores the direct called trampoline's address in the ftrace_regs
     and the ftrace_regs_caller trampoline will return to that address
     instead of returning to the traced function
Signed-off-by: default avatarQing Zhang <zhangqing@loongson.cn>
Signed-off-by: default avatarYouling Tang <tangyouling@loongson.cn>
Signed-off-by: default avatarHuacai Chen <chenhuacai@loongson.cn>
parent 24d4f527
...@@ -93,6 +93,7 @@ config LOONGARCH ...@@ -93,6 +93,7 @@ config LOONGARCH
select HAVE_DMA_CONTIGUOUS select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_ARGS select HAVE_DYNAMIC_FTRACE_WITH_ARGS
select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
select HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_EBPF_JIT select HAVE_EBPF_JIT
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !ARCH_STRICT_ALIGN select HAVE_EFFICIENT_UNALIGNED_ACCESS if !ARCH_STRICT_ALIGN
......
...@@ -82,6 +82,18 @@ ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, unsigned long ip) ...@@ -82,6 +82,18 @@ ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, unsigned long ip)
#define ftrace_graph_func ftrace_graph_func #define ftrace_graph_func ftrace_graph_func
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs); struct ftrace_ops *op, struct ftrace_regs *fregs);
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
static inline void
__arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr)
{
regs->regs[13] = addr; /* t1 */
}
#define arch_ftrace_set_direct_caller(fregs, addr) \
__arch_ftrace_set_direct_caller(&(fregs)->regs, addr)
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
#endif #endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -65,6 +65,14 @@ static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, struct module *mod ...@@ -65,6 +65,14 @@ static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, struct module *mod
unsigned long pc = rec->ip + LOONGARCH_INSN_SIZE; unsigned long pc = rec->ip + LOONGARCH_INSN_SIZE;
struct plt_entry *plt; struct plt_entry *plt;
/*
* If a custom trampoline is unreachable, rely on the ftrace_regs_caller
* trampoline which knows how to indirectly reach that trampoline through
* ops->direct_call.
*/
if (*addr != FTRACE_ADDR && *addr != FTRACE_REGS_ADDR && !reachable_by_bl(*addr, pc))
*addr = FTRACE_REGS_ADDR;
/* /*
* When the target is within range of the 'bl' instruction, use 'addr' * When the target is within range of the 'bl' instruction, use 'addr'
* as-is and branch to that directly. * as-is and branch to that directly.
......
...@@ -42,7 +42,6 @@ ...@@ -42,7 +42,6 @@
.if \allregs .if \allregs
PTR_S tp, sp, PT_R2 PTR_S tp, sp, PT_R2
PTR_S t0, sp, PT_R12 PTR_S t0, sp, PT_R12
PTR_S t1, sp, PT_R13
PTR_S t2, sp, PT_R14 PTR_S t2, sp, PT_R14
PTR_S t3, sp, PT_R15 PTR_S t3, sp, PT_R15
PTR_S t4, sp, PT_R16 PTR_S t4, sp, PT_R16
...@@ -64,6 +63,8 @@ ...@@ -64,6 +63,8 @@
PTR_S zero, sp, PT_R0 PTR_S zero, sp, PT_R0
.endif .endif
PTR_S ra, sp, PT_ERA /* Save trace function ra at PT_ERA */ PTR_S ra, sp, PT_ERA /* Save trace function ra at PT_ERA */
move t1, zero
PTR_S t1, sp, PT_R13
PTR_ADDI t8, sp, PT_SIZE PTR_ADDI t8, sp, PT_SIZE
PTR_S t8, sp, PT_R3 PTR_S t8, sp, PT_R3
.endm .endm
...@@ -104,8 +105,12 @@ ftrace_common_return: ...@@ -104,8 +105,12 @@ ftrace_common_return:
PTR_L a7, sp, PT_R11 PTR_L a7, sp, PT_R11
PTR_L fp, sp, PT_R22 PTR_L fp, sp, PT_R22
PTR_L t0, sp, PT_ERA PTR_L t0, sp, PT_ERA
PTR_L t1, sp, PT_R13
PTR_ADDI sp, sp, PT_SIZE PTR_ADDI sp, sp, PT_SIZE
bnez t1, .Ldirect
jr t0 jr t0
.Ldirect:
jr t1
SYM_CODE_END(ftrace_common) SYM_CODE_END(ftrace_common)
SYM_CODE_START(ftrace_caller) SYM_CODE_START(ftrace_caller)
...@@ -147,3 +152,9 @@ SYM_CODE_START(return_to_handler) ...@@ -147,3 +152,9 @@ SYM_CODE_START(return_to_handler)
jr ra jr ra
SYM_CODE_END(return_to_handler) SYM_CODE_END(return_to_handler)
#endif #endif
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
SYM_CODE_START(ftrace_stub_direct_tramp)
jr t0
SYM_CODE_END(ftrace_stub_direct_tramp)
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment