Commit ebc27aac authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'trace-v6.5-rc1-3' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace

Pull tracing fixes from Steven Rostedt:

 - Fix some missing-prototype warnings

 - Fix user events struct args (did not include size of struct)

   When creating a user event, the "struct" keyword is to denote that
   the size of the field will be passed in. But the parsing failed to
   handle this case.

 - Add selftest to struct sizes for user events

 - Fix sample code for direct trampolines.

   The sample code for direct trampolines attached to handle_mm_fault().
   But the prototype changed and the direct trampoline sample code was
   not updated. Direct trampolines needs to have the arguments correct
   otherwise it can fail or crash the system.

 - Remove unused ftrace_regs_caller_ret() prototype.

 - Quiet false positive of FORTIFY_SOURCE

   Due to backward compatibility, the structure used to save stack
   traces in the kernel had a fixed size of 8. This structure is
   exported to user space via the tracing format file. A change was made
   to allow more than 8 functions to be recorded, and user space now
   uses the size field to know how many functions are actually in the
   stack.

   But the structure still has size of 8 (even though it points into the
   ring buffer that has the required amount allocated to hold a full
   stack.

   This was fine until the fortifier noticed that the
   memcpy(&entry->caller, stack, size) was greater than the 8 functions
   and would complain at runtime about it.

   Hide this by using a pointer to the stack location on the ring buffer
   instead of using the address of the entry structure caller field.

 - Fix a deadloop in reading trace_pipe that was caused by a mismatch
   between ring_buffer_empty() returning false which then asked to read
   the data, but the read code uses rb_num_of_entries() that returned
   zero, and causing a infinite "retry".

 - Fix a warning caused by not using all pages allocated to store ftrace
   functions, where this can happen if the linker inserts a bunch of
   "NULL" entries, causing the accounting of how many pages needed to be
   off.

 - Fix histogram synthetic event crashing when the start event is
   removed and the end event is still using a variable from it

 - Fix memory leak in freeing iter->temp in tracing_release_pipe()

* tag 'trace-v6.5-rc1-3' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace:
  tracing: Fix memory leak of iter->temp when reading trace_pipe
  tracing/histograms: Add histograms to hist_vars if they have referenced variables
  tracing: Stop FORTIFY_SOURCE complaining about stack trace caller
  ftrace: Fix possible warning on checking all pages used in ftrace_process_locs()
  ring-buffer: Fix deadloop issue on reading trace_pipe
  tracing: arm64: Avoid missing-prototype warnings
  selftests/user_events: Test struct size match cases
  tracing/user_events: Fix struct arg size match check
  x86/ftrace: Remove unsued extern declaration ftrace_regs_caller_ret()
  arm64: ftrace: Add direct call trampoline samples support
  samples: ftrace: Save required argument registers in sample trampolines
parents 15999328 d5a82189
...@@ -197,6 +197,8 @@ config ARM64 ...@@ -197,6 +197,8 @@ config ARM64
!CC_OPTIMIZE_FOR_SIZE) !CC_OPTIMIZE_FOR_SIZE)
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \ select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
if DYNAMIC_FTRACE_WITH_ARGS if DYNAMIC_FTRACE_WITH_ARGS
select HAVE_SAMPLE_FTRACE_DIRECT
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
select HAVE_EFFICIENT_UNALIGNED_ACCESS select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_FAST_GUP select HAVE_FAST_GUP
select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FTRACE_MCOUNT_RECORD
......
...@@ -211,6 +211,10 @@ static inline unsigned long fgraph_ret_regs_frame_pointer(struct fgraph_ret_regs ...@@ -211,6 +211,10 @@ static inline unsigned long fgraph_ret_regs_frame_pointer(struct fgraph_ret_regs
{ {
return ret_regs->fp; return ret_regs->fp;
} }
void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
unsigned long frame_pointer);
#endif /* ifdef CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* ifdef CONFIG_FUNCTION_GRAPH_TRACER */
#endif #endif
......
...@@ -85,4 +85,7 @@ static inline int syscall_get_arch(struct task_struct *task) ...@@ -85,4 +85,7 @@ static inline int syscall_get_arch(struct task_struct *task)
return AUDIT_ARCH_AARCH64; return AUDIT_ARCH_AARCH64;
} }
int syscall_trace_enter(struct pt_regs *regs);
void syscall_trace_exit(struct pt_regs *regs);
#endif /* __ASM_SYSCALL_H */ #endif /* __ASM_SYSCALL_H */
...@@ -75,9 +75,6 @@ static inline bool has_syscall_work(unsigned long flags) ...@@ -75,9 +75,6 @@ static inline bool has_syscall_work(unsigned long flags)
return unlikely(flags & _TIF_SYSCALL_WORK); return unlikely(flags & _TIF_SYSCALL_WORK);
} }
int syscall_trace_enter(struct pt_regs *regs);
void syscall_trace_exit(struct pt_regs *regs);
static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
const syscall_fn_t syscall_table[]) const syscall_fn_t syscall_table[])
{ {
......
...@@ -282,7 +282,6 @@ static inline void tramp_free(void *tramp) { } ...@@ -282,7 +282,6 @@ static inline void tramp_free(void *tramp) { }
/* Defined as markers to the end of the ftrace default trampolines */ /* Defined as markers to the end of the ftrace default trampolines */
extern void ftrace_regs_caller_end(void); extern void ftrace_regs_caller_end(void);
extern void ftrace_regs_caller_ret(void);
extern void ftrace_caller_end(void); extern void ftrace_caller_end(void);
extern void ftrace_caller_op_ptr(void); extern void ftrace_caller_op_ptr(void);
extern void ftrace_regs_caller_op_ptr(void); extern void ftrace_regs_caller_op_ptr(void);
......
...@@ -41,6 +41,15 @@ struct ftrace_ops; ...@@ -41,6 +41,15 @@ struct ftrace_ops;
struct ftrace_regs; struct ftrace_regs;
struct dyn_ftrace; struct dyn_ftrace;
char *arch_ftrace_match_adjust(char *str, const char *search);
#ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
struct fgraph_ret_regs;
unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs);
#else
unsigned long ftrace_return_to_handler(unsigned long frame_pointer);
#endif
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
/* /*
* If the arch's mcount caller does not support all of ftrace's * If the arch's mcount caller does not support all of ftrace's
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <trace/events/sched.h> #include <trace/events/sched.h>
#include "ftrace_internal.h" #include "ftrace_internal.h"
#include "trace.h"
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
#define ASSIGN_OPS_HASH(opsname, val) \ #define ASSIGN_OPS_HASH(opsname, val) \
......
...@@ -3305,6 +3305,22 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count) ...@@ -3305,6 +3305,22 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
return cnt; return cnt;
} }
static void ftrace_free_pages(struct ftrace_page *pages)
{
struct ftrace_page *pg = pages;
while (pg) {
if (pg->records) {
free_pages((unsigned long)pg->records, pg->order);
ftrace_number_of_pages -= 1 << pg->order;
}
pages = pg->next;
kfree(pg);
pg = pages;
ftrace_number_of_groups--;
}
}
static struct ftrace_page * static struct ftrace_page *
ftrace_allocate_pages(unsigned long num_to_init) ftrace_allocate_pages(unsigned long num_to_init)
{ {
...@@ -3343,17 +3359,7 @@ ftrace_allocate_pages(unsigned long num_to_init) ...@@ -3343,17 +3359,7 @@ ftrace_allocate_pages(unsigned long num_to_init)
return start_pg; return start_pg;
free_pages: free_pages:
pg = start_pg; ftrace_free_pages(start_pg);
while (pg) {
if (pg->records) {
free_pages((unsigned long)pg->records, pg->order);
ftrace_number_of_pages -= 1 << pg->order;
}
start_pg = pg->next;
kfree(pg);
pg = start_pg;
ftrace_number_of_groups--;
}
pr_info("ftrace: FAILED to allocate memory for functions\n"); pr_info("ftrace: FAILED to allocate memory for functions\n");
return NULL; return NULL;
} }
...@@ -6471,9 +6477,11 @@ static int ftrace_process_locs(struct module *mod, ...@@ -6471,9 +6477,11 @@ static int ftrace_process_locs(struct module *mod,
unsigned long *start, unsigned long *start,
unsigned long *end) unsigned long *end)
{ {
struct ftrace_page *pg_unuse = NULL;
struct ftrace_page *start_pg; struct ftrace_page *start_pg;
struct ftrace_page *pg; struct ftrace_page *pg;
struct dyn_ftrace *rec; struct dyn_ftrace *rec;
unsigned long skipped = 0;
unsigned long count; unsigned long count;
unsigned long *p; unsigned long *p;
unsigned long addr; unsigned long addr;
...@@ -6536,8 +6544,10 @@ static int ftrace_process_locs(struct module *mod, ...@@ -6536,8 +6544,10 @@ static int ftrace_process_locs(struct module *mod,
* object files to satisfy alignments. * object files to satisfy alignments.
* Skip any NULL pointers. * Skip any NULL pointers.
*/ */
if (!addr) if (!addr) {
skipped++;
continue; continue;
}
end_offset = (pg->index+1) * sizeof(pg->records[0]); end_offset = (pg->index+1) * sizeof(pg->records[0]);
if (end_offset > PAGE_SIZE << pg->order) { if (end_offset > PAGE_SIZE << pg->order) {
...@@ -6551,8 +6561,10 @@ static int ftrace_process_locs(struct module *mod, ...@@ -6551,8 +6561,10 @@ static int ftrace_process_locs(struct module *mod,
rec->ip = addr; rec->ip = addr;
} }
/* We should have used all pages */ if (pg->next) {
WARN_ON(pg->next); pg_unuse = pg->next;
pg->next = NULL;
}
/* Assign the last page to ftrace_pages */ /* Assign the last page to ftrace_pages */
ftrace_pages = pg; ftrace_pages = pg;
...@@ -6574,6 +6586,11 @@ static int ftrace_process_locs(struct module *mod, ...@@ -6574,6 +6586,11 @@ static int ftrace_process_locs(struct module *mod,
out: out:
mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_lock);
/* We should have used all pages unless we skipped some */
if (pg_unuse) {
WARN_ON(!skipped);
ftrace_free_pages(pg_unuse);
}
return ret; return ret;
} }
......
...@@ -2,6 +2,9 @@ ...@@ -2,6 +2,9 @@
#ifndef _LINUX_KERNEL_FTRACE_INTERNAL_H #ifndef _LINUX_KERNEL_FTRACE_INTERNAL_H
#define _LINUX_KERNEL_FTRACE_INTERNAL_H #define _LINUX_KERNEL_FTRACE_INTERNAL_H
int __register_ftrace_function(struct ftrace_ops *ops);
int __unregister_ftrace_function(struct ftrace_ops *ops);
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
extern struct mutex ftrace_lock; extern struct mutex ftrace_lock;
...@@ -15,8 +18,6 @@ int ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs); ...@@ -15,8 +18,6 @@ int ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs);
#else /* !CONFIG_DYNAMIC_FTRACE */ #else /* !CONFIG_DYNAMIC_FTRACE */
int __register_ftrace_function(struct ftrace_ops *ops);
int __unregister_ftrace_function(struct ftrace_ops *ops);
/* Keep as macros so we do not need to define the commands */ /* Keep as macros so we do not need to define the commands */
# define ftrace_startup(ops, command) \ # define ftrace_startup(ops, command) \
({ \ ({ \
......
...@@ -5242,28 +5242,34 @@ unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu) ...@@ -5242,28 +5242,34 @@ unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
} }
EXPORT_SYMBOL_GPL(ring_buffer_size); EXPORT_SYMBOL_GPL(ring_buffer_size);
static void rb_clear_buffer_page(struct buffer_page *page)
{
local_set(&page->write, 0);
local_set(&page->entries, 0);
rb_init_page(page->page);
page->read = 0;
}
static void static void
rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
{ {
struct buffer_page *page;
rb_head_page_deactivate(cpu_buffer); rb_head_page_deactivate(cpu_buffer);
cpu_buffer->head_page cpu_buffer->head_page
= list_entry(cpu_buffer->pages, struct buffer_page, list); = list_entry(cpu_buffer->pages, struct buffer_page, list);
local_set(&cpu_buffer->head_page->write, 0); rb_clear_buffer_page(cpu_buffer->head_page);
local_set(&cpu_buffer->head_page->entries, 0); list_for_each_entry(page, cpu_buffer->pages, list) {
local_set(&cpu_buffer->head_page->page->commit, 0); rb_clear_buffer_page(page);
}
cpu_buffer->head_page->read = 0;
cpu_buffer->tail_page = cpu_buffer->head_page; cpu_buffer->tail_page = cpu_buffer->head_page;
cpu_buffer->commit_page = cpu_buffer->head_page; cpu_buffer->commit_page = cpu_buffer->head_page;
INIT_LIST_HEAD(&cpu_buffer->reader_page->list); INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
INIT_LIST_HEAD(&cpu_buffer->new_pages); INIT_LIST_HEAD(&cpu_buffer->new_pages);
local_set(&cpu_buffer->reader_page->write, 0); rb_clear_buffer_page(cpu_buffer->reader_page);
local_set(&cpu_buffer->reader_page->entries, 0);
local_set(&cpu_buffer->reader_page->page->commit, 0);
cpu_buffer->reader_page->read = 0;
local_set(&cpu_buffer->entries_bytes, 0); local_set(&cpu_buffer->entries_bytes, 0);
local_set(&cpu_buffer->overrun, 0); local_set(&cpu_buffer->overrun, 0);
......
...@@ -3118,6 +3118,7 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer, ...@@ -3118,6 +3118,7 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
struct ftrace_stack *fstack; struct ftrace_stack *fstack;
struct stack_entry *entry; struct stack_entry *entry;
int stackidx; int stackidx;
void *ptr;
/* /*
* Add one, for this function and the call to save_stack_trace() * Add one, for this function and the call to save_stack_trace()
...@@ -3161,9 +3162,25 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer, ...@@ -3161,9 +3162,25 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
trace_ctx); trace_ctx);
if (!event) if (!event)
goto out; goto out;
entry = ring_buffer_event_data(event); ptr = ring_buffer_event_data(event);
entry = ptr;
/*
* For backward compatibility reasons, the entry->caller is an
* array of 8 slots to store the stack. This is also exported
* to user space. The amount allocated on the ring buffer actually
* holds enough for the stack specified by nr_entries. This will
* go into the location of entry->caller. Due to string fortifiers
* checking the size of the destination of memcpy() it triggers
* when it detects that size is greater than 8. To hide this from
* the fortifiers, we use "ptr" and pointer arithmetic to assign caller.
*
* The below is really just:
* memcpy(&entry->caller, fstack->calls, size);
*/
ptr += offsetof(typeof(*entry), caller);
memcpy(ptr, fstack->calls, size);
memcpy(&entry->caller, fstack->calls, size);
entry->size = nr_entries; entry->size = nr_entries;
if (!call_filter_check_discard(call, entry, buffer, event)) if (!call_filter_check_discard(call, entry, buffer, event))
...@@ -6764,6 +6781,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file) ...@@ -6764,6 +6781,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
free_cpumask_var(iter->started); free_cpumask_var(iter->started);
kfree(iter->fmt); kfree(iter->fmt);
kfree(iter->temp);
mutex_destroy(&iter->mutex); mutex_destroy(&iter->mutex);
kfree(iter); kfree(iter);
......
...@@ -6663,13 +6663,15 @@ static int event_hist_trigger_parse(struct event_command *cmd_ops, ...@@ -6663,13 +6663,15 @@ static int event_hist_trigger_parse(struct event_command *cmd_ops,
if (get_named_trigger_data(trigger_data)) if (get_named_trigger_data(trigger_data))
goto enable; goto enable;
if (has_hist_vars(hist_data))
save_hist_vars(hist_data);
ret = create_actions(hist_data); ret = create_actions(hist_data);
if (ret) if (ret)
goto out_unreg; goto out_unreg;
if (has_hist_vars(hist_data) || hist_data->n_var_refs) {
if (save_hist_vars(hist_data))
goto out_unreg;
}
ret = tracing_map_init(hist_data->map); ret = tracing_map_init(hist_data->map);
if (ret) if (ret)
goto out_unreg; goto out_unreg;
......
...@@ -1317,6 +1317,9 @@ static int user_field_set_string(struct ftrace_event_field *field, ...@@ -1317,6 +1317,9 @@ static int user_field_set_string(struct ftrace_event_field *field,
pos += snprintf(buf + pos, LEN_OR_ZERO, " "); pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->name); pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->name);
if (str_has_prefix(field->type, "struct "))
pos += snprintf(buf + pos, LEN_OR_ZERO, " %d", field->size);
if (colon) if (colon)
pos += snprintf(buf + pos, LEN_OR_ZERO, ";"); pos += snprintf(buf + pos, LEN_OR_ZERO, ";");
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include "trace_kprobe_selftest.h"
/* /*
* Function used during the kprobe self test. This function is in a separate * Function used during the kprobe self test. This function is in a separate
* compile unit so it can be compile with CC_FLAGS_FTRACE to ensure that it * compile unit so it can be compile with CC_FLAGS_FTRACE to ensure that it
......
...@@ -2,7 +2,9 @@ ...@@ -2,7 +2,9 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#ifndef CONFIG_ARM64
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#endif
extern void my_direct_func1(void); extern void my_direct_func1(void);
extern void my_direct_func2(void); extern void my_direct_func2(void);
...@@ -96,6 +98,38 @@ asm ( ...@@ -96,6 +98,38 @@ asm (
#endif /* CONFIG_S390 */ #endif /* CONFIG_S390 */
#ifdef CONFIG_ARM64
asm (
" .pushsection .text, \"ax\", @progbits\n"
" .type my_tramp1, @function\n"
" .globl my_tramp1\n"
" my_tramp1:"
" bti c\n"
" sub sp, sp, #16\n"
" stp x9, x30, [sp]\n"
" bl my_direct_func1\n"
" ldp x30, x9, [sp]\n"
" add sp, sp, #16\n"
" ret x9\n"
" .size my_tramp1, .-my_tramp1\n"
" .type my_tramp2, @function\n"
" .globl my_tramp2\n"
" my_tramp2:"
" bti c\n"
" sub sp, sp, #16\n"
" stp x9, x30, [sp]\n"
" bl my_direct_func2\n"
" ldp x30, x9, [sp]\n"
" add sp, sp, #16\n"
" ret x9\n"
" .size my_tramp2, .-my_tramp2\n"
" .popsection\n"
);
#endif /* CONFIG_ARM64 */
#ifdef CONFIG_LOONGARCH #ifdef CONFIG_LOONGARCH
asm ( asm (
......
...@@ -2,7 +2,9 @@ ...@@ -2,7 +2,9 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#ifndef CONFIG_ARM64
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#endif
extern void my_direct_func1(unsigned long ip); extern void my_direct_func1(unsigned long ip);
extern void my_direct_func2(unsigned long ip); extern void my_direct_func2(unsigned long ip);
...@@ -103,6 +105,44 @@ asm ( ...@@ -103,6 +105,44 @@ asm (
#endif /* CONFIG_S390 */ #endif /* CONFIG_S390 */
#ifdef CONFIG_ARM64
asm (
" .pushsection .text, \"ax\", @progbits\n"
" .type my_tramp1, @function\n"
" .globl my_tramp1\n"
" my_tramp1:"
" bti c\n"
" sub sp, sp, #32\n"
" stp x9, x30, [sp]\n"
" str x0, [sp, #16]\n"
" mov x0, x30\n"
" bl my_direct_func1\n"
" ldp x30, x9, [sp]\n"
" ldr x0, [sp, #16]\n"
" add sp, sp, #32\n"
" ret x9\n"
" .size my_tramp1, .-my_tramp1\n"
" .type my_tramp2, @function\n"
" .globl my_tramp2\n"
" my_tramp2:"
" bti c\n"
" sub sp, sp, #32\n"
" stp x9, x30, [sp]\n"
" str x0, [sp, #16]\n"
" mov x0, x30\n"
" bl my_direct_func2\n"
" ldp x30, x9, [sp]\n"
" ldr x0, [sp, #16]\n"
" add sp, sp, #32\n"
" ret x9\n"
" .size my_tramp2, .-my_tramp2\n"
" .popsection\n"
);
#endif /* CONFIG_ARM64 */
#ifdef CONFIG_LOONGARCH #ifdef CONFIG_LOONGARCH
#include <asm/asm.h> #include <asm/asm.h>
......
...@@ -4,7 +4,9 @@ ...@@ -4,7 +4,9 @@
#include <linux/mm.h> /* for handle_mm_fault() */ #include <linux/mm.h> /* for handle_mm_fault() */
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/sched/stat.h> #include <linux/sched/stat.h>
#ifndef CONFIG_ARM64
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#endif
extern void my_direct_func(unsigned long ip); extern void my_direct_func(unsigned long ip);
...@@ -66,6 +68,29 @@ asm ( ...@@ -66,6 +68,29 @@ asm (
#endif /* CONFIG_S390 */ #endif /* CONFIG_S390 */
#ifdef CONFIG_ARM64
asm (
" .pushsection .text, \"ax\", @progbits\n"
" .type my_tramp, @function\n"
" .globl my_tramp\n"
" my_tramp:"
" bti c\n"
" sub sp, sp, #32\n"
" stp x9, x30, [sp]\n"
" str x0, [sp, #16]\n"
" mov x0, x30\n"
" bl my_direct_func\n"
" ldp x30, x9, [sp]\n"
" ldr x0, [sp, #16]\n"
" add sp, sp, #32\n"
" ret x9\n"
" .size my_tramp, .-my_tramp\n"
" .popsection\n"
);
#endif /* CONFIG_ARM64 */
#ifdef CONFIG_LOONGARCH #ifdef CONFIG_LOONGARCH
#include <asm/asm.h> #include <asm/asm.h>
......
...@@ -3,16 +3,18 @@ ...@@ -3,16 +3,18 @@
#include <linux/mm.h> /* for handle_mm_fault() */ #include <linux/mm.h> /* for handle_mm_fault() */
#include <linux/ftrace.h> #include <linux/ftrace.h>
#ifndef CONFIG_ARM64
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#endif
extern void my_direct_func(struct vm_area_struct *vma, extern void my_direct_func(struct vm_area_struct *vma, unsigned long address,
unsigned long address, unsigned int flags); unsigned int flags, struct pt_regs *regs);
void my_direct_func(struct vm_area_struct *vma, void my_direct_func(struct vm_area_struct *vma, unsigned long address,
unsigned long address, unsigned int flags) unsigned int flags, struct pt_regs *regs)
{ {
trace_printk("handle mm fault vma=%p address=%lx flags=%x\n", trace_printk("handle mm fault vma=%p address=%lx flags=%x regs=%p\n",
vma, address, flags); vma, address, flags, regs);
} }
extern void my_tramp(void *); extern void my_tramp(void *);
...@@ -34,7 +36,9 @@ asm ( ...@@ -34,7 +36,9 @@ asm (
" pushq %rdi\n" " pushq %rdi\n"
" pushq %rsi\n" " pushq %rsi\n"
" pushq %rdx\n" " pushq %rdx\n"
" pushq %rcx\n"
" call my_direct_func\n" " call my_direct_func\n"
" popq %rcx\n"
" popq %rdx\n" " popq %rdx\n"
" popq %rsi\n" " popq %rsi\n"
" popq %rdi\n" " popq %rdi\n"
...@@ -70,6 +74,30 @@ asm ( ...@@ -70,6 +74,30 @@ asm (
#endif /* CONFIG_S390 */ #endif /* CONFIG_S390 */
#ifdef CONFIG_ARM64
asm (
" .pushsection .text, \"ax\", @progbits\n"
" .type my_tramp, @function\n"
" .globl my_tramp\n"
" my_tramp:"
" bti c\n"
" sub sp, sp, #48\n"
" stp x9, x30, [sp]\n"
" stp x0, x1, [sp, #16]\n"
" stp x2, x3, [sp, #32]\n"
" bl my_direct_func\n"
" ldp x30, x9, [sp]\n"
" ldp x0, x1, [sp, #16]\n"
" ldp x2, x3, [sp, #32]\n"
" add sp, sp, #48\n"
" ret x9\n"
" .size my_tramp, .-my_tramp\n"
" .popsection\n"
);
#endif /* CONFIG_ARM64 */
#ifdef CONFIG_LOONGARCH #ifdef CONFIG_LOONGARCH
asm ( asm (
......
...@@ -3,7 +3,9 @@ ...@@ -3,7 +3,9 @@
#include <linux/sched.h> /* for wake_up_process() */ #include <linux/sched.h> /* for wake_up_process() */
#include <linux/ftrace.h> #include <linux/ftrace.h>
#ifndef CONFIG_ARM64
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#endif
extern void my_direct_func(struct task_struct *p); extern void my_direct_func(struct task_struct *p);
...@@ -63,6 +65,28 @@ asm ( ...@@ -63,6 +65,28 @@ asm (
#endif /* CONFIG_S390 */ #endif /* CONFIG_S390 */
#ifdef CONFIG_ARM64
asm (
" .pushsection .text, \"ax\", @progbits\n"
" .type my_tramp, @function\n"
" .globl my_tramp\n"
" my_tramp:"
" bti c\n"
" sub sp, sp, #32\n"
" stp x9, x30, [sp]\n"
" str x0, [sp, #16]\n"
" bl my_direct_func\n"
" ldp x30, x9, [sp]\n"
" ldr x0, [sp, #16]\n"
" add sp, sp, #32\n"
" ret x9\n"
" .size my_tramp, .-my_tramp\n"
" .popsection\n"
);
#endif /* CONFIG_ARM64 */
#ifdef CONFIG_LOONGARCH #ifdef CONFIG_LOONGARCH
asm ( asm (
......
...@@ -217,6 +217,18 @@ TEST_F(user, matching) { ...@@ -217,6 +217,18 @@ TEST_F(user, matching) {
/* Types don't match */ /* Types don't match */
TEST_NMATCH("__test_event u64 a; u64 b", TEST_NMATCH("__test_event u64 a; u64 b",
"__test_event u32 a; u32 b"); "__test_event u32 a; u32 b");
/* Struct name and size matches */
TEST_MATCH("__test_event struct my_struct a 20",
"__test_event struct my_struct a 20");
/* Struct name don't match */
TEST_NMATCH("__test_event struct my_struct a 20",
"__test_event struct my_struct b 20");
/* Struct size don't match */
TEST_NMATCH("__test_event struct my_struct a 20",
"__test_event struct my_struct a 21");
} }
int main(int argc, char **argv) int main(int argc, char **argv)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment