Commit 81adbdc0 authored by Steven Rostedt's avatar Steven Rostedt Committed by Ingo Molnar

ftrace: only have ftrace_kill atomic

When an anomaly is detected, we need a way to completely disable
ftrace. Right now we have two functions: ftrace_kill and ftrace_kill_atomic.
The ftrace_kill tries to do it in a "nice" way by converting everything
back to a nop.

The "nice" way is dangerous itself, so this patch removes it and only
has the "atomic" version, which is all that is needed.
Signed-off-by: default avatarSteven Rostedt <srostedt@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent ab9a0918
......@@ -40,7 +40,7 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1);
# define register_ftrace_function(ops) do { } while (0)
# define unregister_ftrace_function(ops) do { } while (0)
# define clear_ftrace_function(ops) do { } while (0)
static inline void ftrace_kill_atomic(void) { }
static inline void ftrace_kill(void) { }
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_DYNAMIC_FTRACE
......@@ -117,7 +117,6 @@ static inline void ftrace_release(void *start, unsigned long size) { }
/* totally disable ftrace - can not re-enable after this */
void ftrace_kill(void);
void ftrace_kill_atomic(void);
static inline void tracer_disable(void)
{
......
......@@ -1549,22 +1549,6 @@ int ftrace_force_update(void)
return ret;
}
static void ftrace_force_shutdown(void)
{
struct task_struct *task;
int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
mutex_lock(&ftraced_lock);
task = ftraced_task;
ftraced_task = NULL;
ftraced_suspend = -1;
ftrace_run_update_code(command);
mutex_unlock(&ftraced_lock);
if (task)
kthread_stop(task);
}
static __init int ftrace_init_debugfs(void)
{
struct dentry *d_tracer;
......@@ -1795,17 +1779,16 @@ core_initcall(ftrace_dynamic_init);
# define ftrace_shutdown() do { } while (0)
# define ftrace_startup_sysctl() do { } while (0)
# define ftrace_shutdown_sysctl() do { } while (0)
# define ftrace_force_shutdown() do { } while (0)
#endif /* CONFIG_DYNAMIC_FTRACE */
/**
* ftrace_kill_atomic - kill ftrace from critical sections
* ftrace_kill - kill ftrace
*
* This function should be used by panic code. It stops ftrace
* but in a not so nice way. If you need to simply kill ftrace
* from a non-atomic section, use ftrace_kill.
*/
void ftrace_kill_atomic(void)
void ftrace_kill(void)
{
ftrace_disabled = 1;
ftrace_enabled = 0;
......@@ -1815,27 +1798,6 @@ void ftrace_kill_atomic(void)
clear_ftrace_function();
}
/**
* ftrace_kill - totally shutdown ftrace
*
* This is a safety measure. If something was detected that seems
* wrong, calling this function will keep ftrace from doing
* any more modifications, and updates.
* used when something went wrong.
*/
void ftrace_kill(void)
{
mutex_lock(&ftrace_sysctl_lock);
ftrace_disabled = 1;
ftrace_enabled = 0;
clear_ftrace_function();
mutex_unlock(&ftrace_sysctl_lock);
/* Try to totally disable ftrace */
ftrace_force_shutdown();
}
/**
* register_ftrace_function - register a function for profiling
* @ops - ops structure that holds the function for profiling.
......
......@@ -3097,7 +3097,7 @@ void ftrace_dump(void)
dump_ran = 1;
/* No turning back! */
ftrace_kill_atomic();
ftrace_kill();
for_each_tracing_cpu(cpu) {
atomic_inc(&global_trace.data[cpu]->disabled);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment