Commit e92d51af authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Thomas Gleixner:

 - Prevent a potential inconistency in the perf user space access which
   might lead to evading sanity checks.

 - Prevent perf recording function trace entries twice

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/ftrace: Fix double traces of perf on ftrace:function
  perf/core: Fix potential double-fetch bug
parents d0d6ab53 75e83876
...@@ -1201,7 +1201,7 @@ extern void perf_event_init(void); ...@@ -1201,7 +1201,7 @@ extern void perf_event_init(void);
extern void perf_tp_event(u16 event_type, u64 count, void *record, extern void perf_tp_event(u16 event_type, u64 count, void *record,
int entry_size, struct pt_regs *regs, int entry_size, struct pt_regs *regs,
struct hlist_head *head, int rctx, struct hlist_head *head, int rctx,
struct task_struct *task); struct task_struct *task, struct perf_event *event);
extern void perf_bp_event(struct perf_event *event, void *data); extern void perf_bp_event(struct perf_event *event, void *data);
#ifndef perf_misc_flags #ifndef perf_misc_flags
......
...@@ -508,9 +508,9 @@ void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx, ...@@ -508,9 +508,9 @@ void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
static inline void static inline void
perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type, perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
u64 count, struct pt_regs *regs, void *head, u64 count, struct pt_regs *regs, void *head,
struct task_struct *task) struct task_struct *task, struct perf_event *event)
{ {
perf_tp_event(type, count, raw_data, size, regs, head, rctx, task); perf_tp_event(type, count, raw_data, size, regs, head, rctx, task, event);
} }
#endif #endif
......
...@@ -7906,16 +7906,15 @@ void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx, ...@@ -7906,16 +7906,15 @@ void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
} }
} }
perf_tp_event(call->event.type, count, raw_data, size, regs, head, perf_tp_event(call->event.type, count, raw_data, size, regs, head,
rctx, task); rctx, task, NULL);
} }
EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit); EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size, void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
struct pt_regs *regs, struct hlist_head *head, int rctx, struct pt_regs *regs, struct hlist_head *head, int rctx,
struct task_struct *task) struct task_struct *task, struct perf_event *event)
{ {
struct perf_sample_data data; struct perf_sample_data data;
struct perf_event *event;
struct perf_raw_record raw = { struct perf_raw_record raw = {
.frag = { .frag = {
...@@ -7929,10 +7928,16 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size, ...@@ -7929,10 +7928,16 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
perf_trace_buf_update(record, event_type); perf_trace_buf_update(record, event_type);
/* Use the given event instead of the hlist */
if (event) {
if (perf_tp_event_match(event, &data, regs))
perf_swevent_event(event, count, &data, regs);
} else {
hlist_for_each_entry_rcu(event, head, hlist_entry) { hlist_for_each_entry_rcu(event, head, hlist_entry) {
if (perf_tp_event_match(event, &data, regs)) if (perf_tp_event_match(event, &data, regs))
perf_swevent_event(event, count, &data, regs); perf_swevent_event(event, count, &data, regs);
} }
}
/* /*
* If we got specified a target task, also iterate its context and * If we got specified a target task, also iterate its context and
...@@ -9611,6 +9616,8 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr, ...@@ -9611,6 +9616,8 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
if (ret) if (ret)
return -EFAULT; return -EFAULT;
attr->size = size;
if (attr->__reserved_1) if (attr->__reserved_1)
return -EINVAL; return -EINVAL;
......
...@@ -306,6 +306,7 @@ static void ...@@ -306,6 +306,7 @@ static void
perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip, perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct pt_regs *pt_regs) struct ftrace_ops *ops, struct pt_regs *pt_regs)
{ {
struct perf_event *event;
struct ftrace_entry *entry; struct ftrace_entry *entry;
struct hlist_head *head; struct hlist_head *head;
struct pt_regs regs; struct pt_regs regs;
...@@ -329,8 +330,9 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip, ...@@ -329,8 +330,9 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
entry->ip = ip; entry->ip = ip;
entry->parent_ip = parent_ip; entry->parent_ip = parent_ip;
event = container_of(ops, struct perf_event, ftrace_ops);
perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN, perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
1, &regs, head, NULL); 1, &regs, head, NULL, event);
#undef ENTRY_SIZE #undef ENTRY_SIZE
} }
......
...@@ -1200,7 +1200,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) ...@@ -1200,7 +1200,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
memset(&entry[1], 0, dsize); memset(&entry[1], 0, dsize);
store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
head, NULL); head, NULL, NULL);
} }
NOKPROBE_SYMBOL(kprobe_perf_func); NOKPROBE_SYMBOL(kprobe_perf_func);
...@@ -1236,7 +1236,7 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, ...@@ -1236,7 +1236,7 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
entry->ret_ip = (unsigned long)ri->ret_addr; entry->ret_ip = (unsigned long)ri->ret_addr;
store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
head, NULL); head, NULL, NULL);
} }
NOKPROBE_SYMBOL(kretprobe_perf_func); NOKPROBE_SYMBOL(kretprobe_perf_func);
#endif /* CONFIG_PERF_EVENTS */ #endif /* CONFIG_PERF_EVENTS */
......
...@@ -596,7 +596,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) ...@@ -596,7 +596,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
(unsigned long *)&rec->args); (unsigned long *)&rec->args);
perf_trace_buf_submit(rec, size, rctx, perf_trace_buf_submit(rec, size, rctx,
sys_data->enter_event->event.type, 1, regs, sys_data->enter_event->event.type, 1, regs,
head, NULL); head, NULL, NULL);
} }
static int perf_sysenter_enable(struct trace_event_call *call) static int perf_sysenter_enable(struct trace_event_call *call)
...@@ -667,7 +667,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) ...@@ -667,7 +667,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
rec->nr = syscall_nr; rec->nr = syscall_nr;
rec->ret = syscall_get_return_value(current, regs); rec->ret = syscall_get_return_value(current, regs);
perf_trace_buf_submit(rec, size, rctx, sys_data->exit_event->event.type, perf_trace_buf_submit(rec, size, rctx, sys_data->exit_event->event.type,
1, regs, head, NULL); 1, regs, head, NULL, NULL);
} }
static int perf_sysexit_enable(struct trace_event_call *call) static int perf_sysexit_enable(struct trace_event_call *call)
......
...@@ -1156,7 +1156,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu, ...@@ -1156,7 +1156,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
} }
perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
head, NULL); head, NULL, NULL);
out: out:
preempt_enable(); preempt_enable();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment