tracing: Clean up the hwlat binding code

Instead of initializing the affinity of the hwlat kthread in the thread
itself, simply set up the initial affinity at thread creation. This
simplifies the code.
Signed-off-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
parent 79c6f448
...@@ -266,24 +266,13 @@ static int get_sample(void) ...@@ -266,24 +266,13 @@ static int get_sample(void)
static struct cpumask save_cpumask; static struct cpumask save_cpumask;
static bool disable_migrate; static bool disable_migrate;
static void move_to_next_cpu(bool initmask) static void move_to_next_cpu(void)
{ {
static struct cpumask *current_mask; struct cpumask *current_mask = &save_cpumask;
int next_cpu; int next_cpu;
if (disable_migrate) if (disable_migrate)
return; return;
/* Just pick the first CPU on first iteration */
if (initmask) {
current_mask = &save_cpumask;
get_online_cpus();
cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
put_online_cpus();
next_cpu = cpumask_first(current_mask);
goto set_affinity;
}
/* /*
* If for some reason the user modifies the CPU affinity * If for some reason the user modifies the CPU affinity
* of this thread, than stop migrating for the duration * of this thread, than stop migrating for the duration
...@@ -300,7 +289,6 @@ static void move_to_next_cpu(bool initmask) ...@@ -300,7 +289,6 @@ static void move_to_next_cpu(bool initmask)
if (next_cpu >= nr_cpu_ids) if (next_cpu >= nr_cpu_ids)
next_cpu = cpumask_first(current_mask); next_cpu = cpumask_first(current_mask);
set_affinity:
if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */ if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */
goto disable; goto disable;
...@@ -330,12 +318,10 @@ static void move_to_next_cpu(bool initmask) ...@@ -330,12 +318,10 @@ static void move_to_next_cpu(bool initmask)
static int kthread_fn(void *data) static int kthread_fn(void *data)
{ {
u64 interval; u64 interval;
bool initmask = true;
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
move_to_next_cpu(initmask); move_to_next_cpu();
initmask = false;
local_irq_disable(); local_irq_disable();
get_sample(); get_sample();
...@@ -366,13 +352,27 @@ static int kthread_fn(void *data) ...@@ -366,13 +352,27 @@ static int kthread_fn(void *data)
*/ */
static int start_kthread(struct trace_array *tr) static int start_kthread(struct trace_array *tr)
{ {
struct cpumask *current_mask = &save_cpumask;
struct task_struct *kthread; struct task_struct *kthread;
int next_cpu;
/* Just pick the first CPU on first iteration */
current_mask = &save_cpumask;
get_online_cpus();
cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
put_online_cpus();
next_cpu = cpumask_first(current_mask);
kthread = kthread_create(kthread_fn, NULL, "hwlatd"); kthread = kthread_create(kthread_fn, NULL, "hwlatd");
if (IS_ERR(kthread)) { if (IS_ERR(kthread)) {
pr_err(BANNER "could not start sampling thread\n"); pr_err(BANNER "could not start sampling thread\n");
return -ENOMEM; return -ENOMEM;
} }
cpumask_clear(current_mask);
cpumask_set_cpu(next_cpu, current_mask);
sched_setaffinity(kthread->pid, current_mask);
hwlat_kthread = kthread; hwlat_kthread = kthread;
wake_up_process(kthread); wake_up_process(kthread);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment