Commit 04243787 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'trace-v4.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing fixes from Steven Rostedt:
 "Various fix-ups:

   - comment fixes

   - build fix

   - better memory alloction (don't use NR_CPUS)

   - configuration fix

   - build warning fix

   - enhanced callback parameter (to simplify users of trace hooks)

   - give up on stack tracing when RCU isn't watching (it's a lost
     cause)"

* tag 'trace-v4.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  tracing: Have stack trace not record if RCU is not watching
  tracing: Pass export pointer as argument to ->write()
  ring-buffer: Remove unused function __rb_data_page_index()
  tracing: make PREEMPTIRQ_EVENTS depend on TRACING
  tracing: Allocate mask_str buffer dynamically
  tracing: always define trace_{irq,preempt}_{enable_disable}
  tracing: Fix code comments in trace.c
parents c4f988ee b00d607b
...@@ -42,9 +42,11 @@ static struct stm_ftrace { ...@@ -42,9 +42,11 @@ static struct stm_ftrace {
* @len: length of the data packet * @len: length of the data packet
*/ */
static void notrace static void notrace
stm_ftrace_write(const void *buf, unsigned int len) stm_ftrace_write(struct trace_export *export, const void *buf, unsigned int len)
{ {
stm_source_write(&stm_ftrace.data, STM_FTRACE_CHAN, buf, len); struct stm_ftrace *stm = container_of(export, struct stm_ftrace, ftrace);
stm_source_write(&stm->data, STM_FTRACE_CHAN, buf, len);
} }
static int stm_ftrace_link(struct stm_source_data *data) static int stm_ftrace_link(struct stm_source_data *data)
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
*/ */
struct trace_export { struct trace_export {
struct trace_export __rcu *next; struct trace_export __rcu *next;
void (*write)(const void *, unsigned int); void (*write)(struct trace_export *, const void *, unsigned int);
}; };
int register_ftrace_export(struct trace_export *export); int register_ftrace_export(struct trace_export *export);
......
...@@ -56,15 +56,18 @@ DEFINE_EVENT(preemptirq_template, preempt_enable, ...@@ -56,15 +56,18 @@ DEFINE_EVENT(preemptirq_template, preempt_enable,
#include <trace/define_trace.h> #include <trace/define_trace.h>
#else /* !CONFIG_PREEMPTIRQ_EVENTS */ #endif /* !CONFIG_PREEMPTIRQ_EVENTS */
#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || defined(CONFIG_PROVE_LOCKING)
#define trace_irq_enable(...) #define trace_irq_enable(...)
#define trace_irq_disable(...) #define trace_irq_disable(...)
#define trace_preempt_enable(...)
#define trace_preempt_disable(...)
#define trace_irq_enable_rcuidle(...) #define trace_irq_enable_rcuidle(...)
#define trace_irq_disable_rcuidle(...) #define trace_irq_disable_rcuidle(...)
#endif
#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || !defined(CONFIG_DEBUG_PREEMPT)
#define trace_preempt_enable(...)
#define trace_preempt_disable(...)
#define trace_preempt_enable_rcuidle(...) #define trace_preempt_enable_rcuidle(...)
#define trace_preempt_disable_rcuidle(...) #define trace_preempt_disable_rcuidle(...)
#endif #endif
...@@ -164,6 +164,7 @@ config PREEMPTIRQ_EVENTS ...@@ -164,6 +164,7 @@ config PREEMPTIRQ_EVENTS
bool "Enable trace events for preempt and irq disable/enable" bool "Enable trace events for preempt and irq disable/enable"
select TRACE_IRQFLAGS select TRACE_IRQFLAGS
depends on DEBUG_PREEMPT || !PROVE_LOCKING depends on DEBUG_PREEMPT || !PROVE_LOCKING
depends on TRACING
default n default n
help help
Enable tracing of disable and enable events for preemption and irqs. Enable tracing of disable and enable events for preemption and irqs.
......
...@@ -1799,12 +1799,6 @@ void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val) ...@@ -1799,12 +1799,6 @@ void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
} }
EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite); EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
static __always_inline void *
__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
{
return bpage->data + index;
}
static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
{ {
return bpage->page->data + index; return bpage->page->data + index;
......
...@@ -362,7 +362,7 @@ trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct ...@@ -362,7 +362,7 @@ trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct
} }
/** /**
* trace_pid_filter_add_remove - Add or remove a task from a pid_list * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list
* @pid_list: The list to modify * @pid_list: The list to modify
* @self: The current task for fork or NULL for exit * @self: The current task for fork or NULL for exit
* @task: The task to add or remove * @task: The task to add or remove
...@@ -925,7 +925,7 @@ static void tracing_snapshot_instance(struct trace_array *tr) ...@@ -925,7 +925,7 @@ static void tracing_snapshot_instance(struct trace_array *tr)
} }
/** /**
* trace_snapshot - take a snapshot of the current buffer. * tracing_snapshot - take a snapshot of the current buffer.
* *
* This causes a swap between the snapshot buffer and the current live * This causes a swap between the snapshot buffer and the current live
* tracing buffer. You can use this to take snapshots of the live * tracing buffer. You can use this to take snapshots of the live
...@@ -1004,9 +1004,9 @@ int tracing_alloc_snapshot(void) ...@@ -1004,9 +1004,9 @@ int tracing_alloc_snapshot(void)
EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
/** /**
* trace_snapshot_alloc - allocate and take a snapshot of the current buffer. * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
* *
* This is similar to trace_snapshot(), but it will allocate the * This is similar to tracing_snapshot(), but it will allocate the
* snapshot buffer if it isn't already allocated. Use this only * snapshot buffer if it isn't already allocated. Use this only
* where it is safe to sleep, as the allocation may sleep. * where it is safe to sleep, as the allocation may sleep.
* *
...@@ -1303,7 +1303,7 @@ unsigned long __read_mostly tracing_thresh; ...@@ -1303,7 +1303,7 @@ unsigned long __read_mostly tracing_thresh;
/* /*
* Copy the new maximum trace into the separate maximum-trace * Copy the new maximum trace into the separate maximum-trace
* structure. (this way the maximum trace is permanently saved, * structure. (this way the maximum trace is permanently saved,
* for later retrieval via /sys/kernel/debug/tracing/latency_trace) * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
*/ */
static void static void
__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
...@@ -2415,7 +2415,7 @@ trace_process_export(struct trace_export *export, ...@@ -2415,7 +2415,7 @@ trace_process_export(struct trace_export *export,
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
size = ring_buffer_event_length(event); size = ring_buffer_event_length(event);
export->write(entry, size); export->write(export, entry, size);
} }
static DEFINE_MUTEX(ftrace_export_lock); static DEFINE_MUTEX(ftrace_export_lock);
...@@ -4178,37 +4178,30 @@ static const struct file_operations show_traces_fops = { ...@@ -4178,37 +4178,30 @@ static const struct file_operations show_traces_fops = {
.llseek = seq_lseek, .llseek = seq_lseek,
}; };
/*
* The tracer itself will not take this lock, but still we want
* to provide a consistent cpumask to user-space:
*/
static DEFINE_MUTEX(tracing_cpumask_update_lock);
/*
* Temporary storage for the character representation of the
* CPU bitmask (and one more byte for the newline):
*/
static char mask_str[NR_CPUS + 1];
static ssize_t static ssize_t
tracing_cpumask_read(struct file *filp, char __user *ubuf, tracing_cpumask_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
struct trace_array *tr = file_inode(filp)->i_private; struct trace_array *tr = file_inode(filp)->i_private;
char *mask_str;
int len; int len;
mutex_lock(&tracing_cpumask_update_lock); len = snprintf(NULL, 0, "%*pb\n",
cpumask_pr_args(tr->tracing_cpumask)) + 1;
mask_str = kmalloc(len, GFP_KERNEL);
if (!mask_str)
return -ENOMEM;
len = snprintf(mask_str, count, "%*pb\n", len = snprintf(mask_str, len, "%*pb\n",
cpumask_pr_args(tr->tracing_cpumask)); cpumask_pr_args(tr->tracing_cpumask));
if (len >= count) { if (len >= count) {
count = -EINVAL; count = -EINVAL;
goto out_err; goto out_err;
} }
count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1); count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
out_err: out_err:
mutex_unlock(&tracing_cpumask_update_lock); kfree(mask_str);
return count; return count;
} }
...@@ -4228,8 +4221,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, ...@@ -4228,8 +4221,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
if (err) if (err)
goto err_unlock; goto err_unlock;
mutex_lock(&tracing_cpumask_update_lock);
local_irq_disable(); local_irq_disable();
arch_spin_lock(&tr->max_lock); arch_spin_lock(&tr->max_lock);
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
...@@ -4252,8 +4243,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, ...@@ -4252,8 +4243,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
local_irq_enable(); local_irq_enable();
cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
mutex_unlock(&tracing_cpumask_update_lock);
free_cpumask_var(tracing_cpumask_new); free_cpumask_var(tracing_cpumask_new);
return count; return count;
......
...@@ -209,6 +209,10 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip, ...@@ -209,6 +209,10 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
if (__this_cpu_read(disable_stack_tracer) != 1) if (__this_cpu_read(disable_stack_tracer) != 1)
goto out; goto out;
/* If rcu is not watching, then save stack trace can fail */
if (!rcu_is_watching())
goto out;
ip += MCOUNT_INSN_SIZE; ip += MCOUNT_INSN_SIZE;
check_stack(ip, &stack); check_stack(ip, &stack);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment