Commit 29db00c2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'trace-v6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace

Pull tracing fixes from Steven Rostedt:

 - Do not allow histogram values to have modifies. They can cause a NULL
   pointer dereference if they do.

 - Warn if hist_field_name() is passed a NULL. Prevent the NULL pointer
   dereference mentioned above.

 - Fix invalid address look up race in lookup_rec()

 - Define ftrace_stub_graph conditionally to prevent linker errors

 - Always check if RCU is watching at all tracepoint locations

* tag 'trace-v6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace:
  tracing: Make tracepoint lockdep check actually test something
  ftrace,kcfi: Define ftrace_stub_graph conditionally
  ftrace: Fix invalid address access in lookup_rec() when index is 0
  tracing: Check field value in hist_field_name()
  tracing: Do not let histogram values have some modifiers
parents ed38ff16 c2679254
...@@ -136,10 +136,12 @@ SYM_TYPED_FUNC_START(ftrace_stub) ...@@ -136,10 +136,12 @@ SYM_TYPED_FUNC_START(ftrace_stub)
RET RET
SYM_FUNC_END(ftrace_stub) SYM_FUNC_END(ftrace_stub)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
SYM_TYPED_FUNC_START(ftrace_stub_graph) SYM_TYPED_FUNC_START(ftrace_stub_graph)
CALL_DEPTH_ACCOUNT CALL_DEPTH_ACCOUNT
RET RET
SYM_FUNC_END(ftrace_stub_graph) SYM_FUNC_END(ftrace_stub_graph)
#endif
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
......
...@@ -242,12 +242,11 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) ...@@ -242,12 +242,11 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
* not add unwanted padding between the beginning of the section and the * not add unwanted padding between the beginning of the section and the
* structure. Force alignment to the same alignment as the section start. * structure. Force alignment to the same alignment as the section start.
* *
* When lockdep is enabled, we make sure to always do the RCU portions of * When lockdep is enabled, we make sure to always test if RCU is
* the tracepoint code, regardless of whether tracing is on. However, * "watching" regardless if the tracepoint is enabled or not. Tracepoints
* don't check if the condition is false, due to interaction with idle * require RCU to be active, and it should always warn at the tracepoint
* instrumentation. This lets us find RCU issues triggered with tracepoints * site if it is not watching, as it will need to be active when the
* even when this tracepoint is off. This code has no purpose other than * tracepoint is enabled.
* poking RCU a bit.
*/ */
#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \ #define __DECLARE_TRACE(name, proto, args, cond, data_proto) \
extern int __traceiter_##name(data_proto); \ extern int __traceiter_##name(data_proto); \
...@@ -260,9 +259,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) ...@@ -260,9 +259,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
TP_ARGS(args), \ TP_ARGS(args), \
TP_CONDITION(cond), 0); \ TP_CONDITION(cond), 0); \
if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \ if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
rcu_read_lock_sched_notrace(); \ WARN_ON_ONCE(!rcu_is_watching()); \
rcu_dereference_sched(__tracepoint_##name.funcs);\
rcu_read_unlock_sched_notrace(); \
} \ } \
} \ } \
__DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \ __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \
......
...@@ -1564,7 +1564,8 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end) ...@@ -1564,7 +1564,8 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
key.flags = end; /* overload flags, as it is unsigned long */ key.flags = end; /* overload flags, as it is unsigned long */
for (pg = ftrace_pages_start; pg; pg = pg->next) { for (pg = ftrace_pages_start; pg; pg = pg->next) {
if (end < pg->records[0].ip || if (pg->index == 0 ||
end < pg->records[0].ip ||
start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
continue; continue;
rec = bsearch(&key, pg->records, pg->index, rec = bsearch(&key, pg->records, pg->index,
......
...@@ -1331,6 +1331,9 @@ static const char *hist_field_name(struct hist_field *field, ...@@ -1331,6 +1331,9 @@ static const char *hist_field_name(struct hist_field *field,
{ {
const char *field_name = ""; const char *field_name = "";
if (WARN_ON_ONCE(!field))
return field_name;
if (level > 1) if (level > 1)
return field_name; return field_name;
...@@ -4235,6 +4238,15 @@ static int __create_val_field(struct hist_trigger_data *hist_data, ...@@ -4235,6 +4238,15 @@ static int __create_val_field(struct hist_trigger_data *hist_data,
goto out; goto out;
} }
/* Some types cannot be a value */
if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT |
HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2 |
HIST_FIELD_FL_SYM | HIST_FIELD_FL_SYM_OFFSET |
HIST_FIELD_FL_SYSCALL | HIST_FIELD_FL_STACKTRACE)) {
hist_err(file->tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(field_str));
ret = -EINVAL;
}
hist_data->fields[val_idx] = hist_field; hist_data->fields[val_idx] = hist_field;
++hist_data->n_vals; ++hist_data->n_vals;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment