Commit 8aaf1ee7 authored by Steven Rostedt (VMware)'s avatar Steven Rostedt (VMware)

tracing: Rename trace_active to disable_stack_tracer and inline its modification

In order to eliminate a function call, make "trace_active" into
"disable_stack_tracer" and convert stack_tracer_disable() and friends into
static inline functions.
Acked-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
parent 5367278c
...@@ -287,8 +287,40 @@ stack_trace_sysctl(struct ctl_table *table, int write, ...@@ -287,8 +287,40 @@ stack_trace_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, void __user *buffer, size_t *lenp,
loff_t *ppos); loff_t *ppos);
void stack_tracer_disable(void); /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
void stack_tracer_enable(void); DECLARE_PER_CPU(int, disable_stack_tracer);
/**
* stack_tracer_disable - temporarily disable the stack tracer
*
* There's a few locations (namely in RCU) where stack tracing
* cannot be executed. This function is used to disable stack
* tracing during those critical sections.
*
* This function must be called with preemption or interrupts
* disabled and stack_tracer_enable() must be called shortly after
* while preemption or interrupts are still disabled.
*/
static inline void stack_tracer_disable(void)
{
/* Preemption or interupts must be disabled */
if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
this_cpu_inc(disable_stack_tracer);
}
/**
* stack_tracer_enable - re-enable the stack tracer
*
* After stack_tracer_disable() is called, stack_tracer_enable()
* must be called shortly afterward.
*/
static inline void stack_tracer_enable(void)
{
if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
this_cpu_dec(disable_stack_tracer);
}
#else #else
static inline void stack_tracer_disable(void) { } static inline void stack_tracer_disable(void) { }
static inline void stack_tracer_enable(void) { } static inline void stack_tracer_enable(void) { }
......
...@@ -35,44 +35,12 @@ unsigned long stack_trace_max_size; ...@@ -35,44 +35,12 @@ unsigned long stack_trace_max_size;
arch_spinlock_t stack_trace_max_lock = arch_spinlock_t stack_trace_max_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static DEFINE_PER_CPU(int, trace_active); DEFINE_PER_CPU(int, disable_stack_tracer);
static DEFINE_MUTEX(stack_sysctl_mutex); static DEFINE_MUTEX(stack_sysctl_mutex);
int stack_tracer_enabled; int stack_tracer_enabled;
static int last_stack_tracer_enabled; static int last_stack_tracer_enabled;
/**
* stack_tracer_disable - temporarily disable the stack tracer
*
* There's a few locations (namely in RCU) where stack tracing
* cannot be executed. This function is used to disable stack
* tracing during those critical sections.
*
* This function must be called with preemption or interrupts
* disabled and stack_tracer_enable() must be called shortly after
* while preemption or interrupts are still disabled.
*/
void stack_tracer_disable(void)
{
/* Preemption or interupts must be disabled */
if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
this_cpu_inc(trace_active);
}
/**
* stack_tracer_enable - re-enable the stack tracer
*
* After stack_tracer_disable() is called, stack_tracer_enable()
* must be called shortly afterward.
*/
void stack_tracer_enable(void)
{
if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
this_cpu_dec(trace_active);
}
void stack_trace_print(void) void stack_trace_print(void)
{ {
long i; long i;
...@@ -243,8 +211,8 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip, ...@@ -243,8 +211,8 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
preempt_disable_notrace(); preempt_disable_notrace();
/* no atomic needed, we only modify this variable by this cpu */ /* no atomic needed, we only modify this variable by this cpu */
__this_cpu_inc(trace_active); __this_cpu_inc(disable_stack_tracer);
if (__this_cpu_read(trace_active) != 1) if (__this_cpu_read(disable_stack_tracer) != 1)
goto out; goto out;
ip += MCOUNT_INSN_SIZE; ip += MCOUNT_INSN_SIZE;
...@@ -252,7 +220,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip, ...@@ -252,7 +220,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
check_stack(ip, &stack); check_stack(ip, &stack);
out: out:
__this_cpu_dec(trace_active); __this_cpu_dec(disable_stack_tracer);
/* prevent recursion in schedule */ /* prevent recursion in schedule */
preempt_enable_notrace(); preempt_enable_notrace();
} }
...@@ -294,15 +262,15 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, ...@@ -294,15 +262,15 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
/* /*
* In case we trace inside arch_spin_lock() or after (NMI), * In case we trace inside arch_spin_lock() or after (NMI),
* we will cause circular lock, so we also need to increase * we will cause circular lock, so we also need to increase
* the percpu trace_active here. * the percpu disable_stack_tracer here.
*/ */
__this_cpu_inc(trace_active); __this_cpu_inc(disable_stack_tracer);
arch_spin_lock(&stack_trace_max_lock); arch_spin_lock(&stack_trace_max_lock);
*ptr = val; *ptr = val;
arch_spin_unlock(&stack_trace_max_lock); arch_spin_unlock(&stack_trace_max_lock);
__this_cpu_dec(trace_active); __this_cpu_dec(disable_stack_tracer);
local_irq_restore(flags); local_irq_restore(flags);
return count; return count;
...@@ -338,7 +306,7 @@ static void *t_start(struct seq_file *m, loff_t *pos) ...@@ -338,7 +306,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
{ {
local_irq_disable(); local_irq_disable();
__this_cpu_inc(trace_active); __this_cpu_inc(disable_stack_tracer);
arch_spin_lock(&stack_trace_max_lock); arch_spin_lock(&stack_trace_max_lock);
...@@ -352,7 +320,7 @@ static void t_stop(struct seq_file *m, void *p) ...@@ -352,7 +320,7 @@ static void t_stop(struct seq_file *m, void *p)
{ {
arch_spin_unlock(&stack_trace_max_lock); arch_spin_unlock(&stack_trace_max_lock);
__this_cpu_dec(trace_active); __this_cpu_dec(disable_stack_tracer);
local_irq_enable(); local_irq_enable();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment