x86/ftrace: Do not jump to direct code in created trampolines

When creating a trampoline based on the ftrace_regs_caller code, nop out the
jnz test that would jmup to the code that would return to a direct caller
(stored in the ORIG_RAX field) and not back to the function that called it.

Link: http://lkml.kernel.org/r/20200422162750.638839749@goodmis.org

Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
parent 5da7cd11
...@@ -286,6 +286,7 @@ extern void ftrace_regs_caller_ret(void); ...@@ -286,6 +286,7 @@ extern void ftrace_regs_caller_ret(void);
extern void ftrace_caller_end(void); extern void ftrace_caller_end(void);
extern void ftrace_caller_op_ptr(void); extern void ftrace_caller_op_ptr(void);
extern void ftrace_regs_caller_op_ptr(void); extern void ftrace_regs_caller_op_ptr(void);
extern void ftrace_regs_caller_jmp(void);
/* movq function_trace_op(%rip), %rdx */ /* movq function_trace_op(%rip), %rdx */
/* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */ /* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
...@@ -316,6 +317,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) ...@@ -316,6 +317,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
unsigned long end_offset; unsigned long end_offset;
unsigned long op_offset; unsigned long op_offset;
unsigned long call_offset; unsigned long call_offset;
unsigned long jmp_offset;
unsigned long offset; unsigned long offset;
unsigned long npages; unsigned long npages;
unsigned long size; unsigned long size;
...@@ -333,11 +335,13 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) ...@@ -333,11 +335,13 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
end_offset = (unsigned long)ftrace_regs_caller_end; end_offset = (unsigned long)ftrace_regs_caller_end;
op_offset = (unsigned long)ftrace_regs_caller_op_ptr; op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
call_offset = (unsigned long)ftrace_regs_call; call_offset = (unsigned long)ftrace_regs_call;
jmp_offset = (unsigned long)ftrace_regs_caller_jmp;
} else { } else {
start_offset = (unsigned long)ftrace_caller; start_offset = (unsigned long)ftrace_caller;
end_offset = (unsigned long)ftrace_caller_end; end_offset = (unsigned long)ftrace_caller_end;
op_offset = (unsigned long)ftrace_caller_op_ptr; op_offset = (unsigned long)ftrace_caller_op_ptr;
call_offset = (unsigned long)ftrace_call; call_offset = (unsigned long)ftrace_call;
jmp_offset = 0;
} }
size = end_offset - start_offset; size = end_offset - start_offset;
...@@ -367,6 +371,17 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) ...@@ -367,6 +371,17 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
if (WARN_ON(ret < 0)) if (WARN_ON(ret < 0))
goto fail; goto fail;
/* No need to test direct calls on created trampolines */
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
/* NOP the jnz 1f; but make sure it's a 2 byte jnz */
ip = trampoline + (jmp_offset - start_offset);
if (WARN_ON(*(char *)ip != 0x75))
goto fail;
ret = copy_from_kernel_nofault(ip, ideal_nops[2], 2);
if (ret < 0)
goto fail;
}
/* /*
* The address of the ftrace_ops that is used for this trampoline * The address of the ftrace_ops that is used for this trampoline
* is stored at the end of the trampoline. This will be used to * is stored at the end of the trampoline. This will be used to
......
...@@ -241,6 +241,7 @@ SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL) ...@@ -241,6 +241,7 @@ SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
*/ */
movq ORIG_RAX(%rsp), %rax movq ORIG_RAX(%rsp), %rax
testq %rax, %rax testq %rax, %rax
SYM_INNER_LABEL(ftrace_regs_caller_jmp, SYM_L_GLOBAL)
jnz 1f jnz 1f
restore_mcount_regs restore_mcount_regs
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment