Commit 9e01c1b7 authored by Rusty Russell's avatar Rusty Russell

cpumask: convert kernel trace functions

Impact: Reduce future memory usage, use new cpumask API.

(Eventually, cpumask_var_t will be allocated based on nr_cpu_ids, not NR_CPUS).

Convert kernel trace functions to use struct cpumask API:
1) Use cpumask_copy/cpumask_test_cpu/for_each_cpu.
2) Use cpumask_var_t and alloc_cpumask_var/free_cpumask_var everywhere.
3) Use on_each_cpu instead of playing with current->cpus_allowed.
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarMike Travis <travis@sgi.com>
Acked-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent 333af153
...@@ -195,7 +195,7 @@ void *ring_buffer_event_data(struct ring_buffer_event *event) ...@@ -195,7 +195,7 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
EXPORT_SYMBOL_GPL(ring_buffer_event_data); EXPORT_SYMBOL_GPL(ring_buffer_event_data);
#define for_each_buffer_cpu(buffer, cpu) \ #define for_each_buffer_cpu(buffer, cpu) \
for_each_cpu_mask(cpu, buffer->cpumask) for_each_cpu(cpu, buffer->cpumask)
#define TS_SHIFT 27 #define TS_SHIFT 27
#define TS_MASK ((1ULL << TS_SHIFT) - 1) #define TS_MASK ((1ULL << TS_SHIFT) - 1)
...@@ -267,7 +267,7 @@ struct ring_buffer { ...@@ -267,7 +267,7 @@ struct ring_buffer {
unsigned pages; unsigned pages;
unsigned flags; unsigned flags;
int cpus; int cpus;
cpumask_t cpumask; cpumask_var_t cpumask;
atomic_t record_disabled; atomic_t record_disabled;
struct mutex mutex; struct mutex mutex;
...@@ -458,6 +458,9 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) ...@@ -458,6 +458,9 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
if (!buffer) if (!buffer)
return NULL; return NULL;
if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
goto fail_free_buffer;
buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
buffer->flags = flags; buffer->flags = flags;
...@@ -465,14 +468,14 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) ...@@ -465,14 +468,14 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
if (buffer->pages == 1) if (buffer->pages == 1)
buffer->pages++; buffer->pages++;
buffer->cpumask = cpu_possible_map; cpumask_copy(buffer->cpumask, cpu_possible_mask);
buffer->cpus = nr_cpu_ids; buffer->cpus = nr_cpu_ids;
bsize = sizeof(void *) * nr_cpu_ids; bsize = sizeof(void *) * nr_cpu_ids;
buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
GFP_KERNEL); GFP_KERNEL);
if (!buffer->buffers) if (!buffer->buffers)
goto fail_free_buffer; goto fail_free_cpumask;
for_each_buffer_cpu(buffer, cpu) { for_each_buffer_cpu(buffer, cpu) {
buffer->buffers[cpu] = buffer->buffers[cpu] =
...@@ -492,6 +495,9 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) ...@@ -492,6 +495,9 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
} }
kfree(buffer->buffers); kfree(buffer->buffers);
fail_free_cpumask:
free_cpumask_var(buffer->cpumask);
fail_free_buffer: fail_free_buffer:
kfree(buffer); kfree(buffer);
return NULL; return NULL;
...@@ -510,6 +516,8 @@ ring_buffer_free(struct ring_buffer *buffer) ...@@ -510,6 +516,8 @@ ring_buffer_free(struct ring_buffer *buffer)
for_each_buffer_cpu(buffer, cpu) for_each_buffer_cpu(buffer, cpu)
rb_free_cpu_buffer(buffer->buffers[cpu]); rb_free_cpu_buffer(buffer->buffers[cpu]);
free_cpumask_var(buffer->cpumask);
kfree(buffer); kfree(buffer);
} }
EXPORT_SYMBOL_GPL(ring_buffer_free); EXPORT_SYMBOL_GPL(ring_buffer_free);
...@@ -1283,7 +1291,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, ...@@ -1283,7 +1291,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
if (!cpu_isset(cpu, buffer->cpumask)) if (!cpumask_test_cpu(cpu, buffer->cpumask))
goto out; goto out;
cpu_buffer = buffer->buffers[cpu]; cpu_buffer = buffer->buffers[cpu];
...@@ -1396,7 +1404,7 @@ int ring_buffer_write(struct ring_buffer *buffer, ...@@ -1396,7 +1404,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
if (!cpu_isset(cpu, buffer->cpumask)) if (!cpumask_test_cpu(cpu, buffer->cpumask))
goto out; goto out;
cpu_buffer = buffer->buffers[cpu]; cpu_buffer = buffer->buffers[cpu];
...@@ -1478,7 +1486,7 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) ...@@ -1478,7 +1486,7 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
if (!cpu_isset(cpu, buffer->cpumask)) if (!cpumask_test_cpu(cpu, buffer->cpumask))
return; return;
cpu_buffer = buffer->buffers[cpu]; cpu_buffer = buffer->buffers[cpu];
...@@ -1498,7 +1506,7 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) ...@@ -1498,7 +1506,7 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
if (!cpu_isset(cpu, buffer->cpumask)) if (!cpumask_test_cpu(cpu, buffer->cpumask))
return; return;
cpu_buffer = buffer->buffers[cpu]; cpu_buffer = buffer->buffers[cpu];
...@@ -1515,7 +1523,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) ...@@ -1515,7 +1523,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
if (!cpu_isset(cpu, buffer->cpumask)) if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0; return 0;
cpu_buffer = buffer->buffers[cpu]; cpu_buffer = buffer->buffers[cpu];
...@@ -1532,7 +1540,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) ...@@ -1532,7 +1540,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
if (!cpu_isset(cpu, buffer->cpumask)) if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0; return 0;
cpu_buffer = buffer->buffers[cpu]; cpu_buffer = buffer->buffers[cpu];
...@@ -1850,7 +1858,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) ...@@ -1850,7 +1858,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
struct buffer_page *reader; struct buffer_page *reader;
int nr_loops = 0; int nr_loops = 0;
if (!cpu_isset(cpu, buffer->cpumask)) if (!cpumask_test_cpu(cpu, buffer->cpumask))
return NULL; return NULL;
cpu_buffer = buffer->buffers[cpu]; cpu_buffer = buffer->buffers[cpu];
...@@ -2025,7 +2033,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) ...@@ -2025,7 +2033,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
struct ring_buffer_event *event; struct ring_buffer_event *event;
unsigned long flags; unsigned long flags;
if (!cpu_isset(cpu, buffer->cpumask)) if (!cpumask_test_cpu(cpu, buffer->cpumask))
return NULL; return NULL;
spin_lock_irqsave(&cpu_buffer->reader_lock, flags); spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
...@@ -2062,7 +2070,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) ...@@ -2062,7 +2070,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
struct ring_buffer_iter *iter; struct ring_buffer_iter *iter;
unsigned long flags; unsigned long flags;
if (!cpu_isset(cpu, buffer->cpumask)) if (!cpumask_test_cpu(cpu, buffer->cpumask))
return NULL; return NULL;
iter = kmalloc(sizeof(*iter), GFP_KERNEL); iter = kmalloc(sizeof(*iter), GFP_KERNEL);
...@@ -2172,7 +2180,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) ...@@ -2172,7 +2180,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
unsigned long flags; unsigned long flags;
if (!cpu_isset(cpu, buffer->cpumask)) if (!cpumask_test_cpu(cpu, buffer->cpumask))
return; return;
spin_lock_irqsave(&cpu_buffer->reader_lock, flags); spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
...@@ -2228,7 +2236,7 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) ...@@ -2228,7 +2236,7 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
if (!cpu_isset(cpu, buffer->cpumask)) if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 1; return 1;
cpu_buffer = buffer->buffers[cpu]; cpu_buffer = buffer->buffers[cpu];
...@@ -2252,8 +2260,8 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, ...@@ -2252,8 +2260,8 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
struct ring_buffer_per_cpu *cpu_buffer_a; struct ring_buffer_per_cpu *cpu_buffer_a;
struct ring_buffer_per_cpu *cpu_buffer_b; struct ring_buffer_per_cpu *cpu_buffer_b;
if (!cpu_isset(cpu, buffer_a->cpumask) || if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
!cpu_isset(cpu, buffer_b->cpumask)) !cpumask_test_cpu(cpu, buffer_b->cpumask))
return -EINVAL; return -EINVAL;
/* At least make sure the two buffers are somewhat the same */ /* At least make sure the two buffers are somewhat the same */
......
...@@ -89,10 +89,10 @@ static inline void ftrace_enable_cpu(void) ...@@ -89,10 +89,10 @@ static inline void ftrace_enable_cpu(void)
preempt_enable(); preempt_enable();
} }
static cpumask_t __read_mostly tracing_buffer_mask; static cpumask_var_t __read_mostly tracing_buffer_mask;
#define for_each_tracing_cpu(cpu) \ #define for_each_tracing_cpu(cpu) \
for_each_cpu_mask(cpu, tracing_buffer_mask) for_each_cpu(cpu, tracing_buffer_mask)
/* /*
* ftrace_dump_on_oops - variable to dump ftrace buffer on oops * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
...@@ -2646,13 +2646,7 @@ static struct file_operations show_traces_fops = { ...@@ -2646,13 +2646,7 @@ static struct file_operations show_traces_fops = {
/* /*
* Only trace on a CPU if the bitmask is set: * Only trace on a CPU if the bitmask is set:
*/ */
static cpumask_t tracing_cpumask = CPU_MASK_ALL; static cpumask_var_t tracing_cpumask;
/*
* When tracing/tracing_cpu_mask is modified then this holds
* the new bitmask we are about to install:
*/
static cpumask_t tracing_cpumask_new;
/* /*
* The tracer itself will not take this lock, but still we want * The tracer itself will not take this lock, but still we want
...@@ -2674,7 +2668,7 @@ tracing_cpumask_read(struct file *filp, char __user *ubuf, ...@@ -2674,7 +2668,7 @@ tracing_cpumask_read(struct file *filp, char __user *ubuf,
mutex_lock(&tracing_cpumask_update_lock); mutex_lock(&tracing_cpumask_update_lock);
len = cpumask_scnprintf(mask_str, count, &tracing_cpumask); len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
if (count - len < 2) { if (count - len < 2) {
count = -EINVAL; count = -EINVAL;
goto out_err; goto out_err;
...@@ -2693,9 +2687,13 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, ...@@ -2693,9 +2687,13 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
int err, cpu; int err, cpu;
cpumask_var_t tracing_cpumask_new;
if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
return -ENOMEM;
mutex_lock(&tracing_cpumask_update_lock); mutex_lock(&tracing_cpumask_update_lock);
err = cpumask_parse_user(ubuf, count, &tracing_cpumask_new); err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
if (err) if (err)
goto err_unlock; goto err_unlock;
...@@ -2706,26 +2704,28 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, ...@@ -2706,26 +2704,28 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
* Increase/decrease the disabled counter if we are * Increase/decrease the disabled counter if we are
* about to flip a bit in the cpumask: * about to flip a bit in the cpumask:
*/ */
if (cpu_isset(cpu, tracing_cpumask) && if (cpumask_test_cpu(cpu, tracing_cpumask) &&
!cpu_isset(cpu, tracing_cpumask_new)) { !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
atomic_inc(&global_trace.data[cpu]->disabled); atomic_inc(&global_trace.data[cpu]->disabled);
} }
if (!cpu_isset(cpu, tracing_cpumask) && if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
cpu_isset(cpu, tracing_cpumask_new)) { cpumask_test_cpu(cpu, tracing_cpumask_new)) {
atomic_dec(&global_trace.data[cpu]->disabled); atomic_dec(&global_trace.data[cpu]->disabled);
} }
} }
__raw_spin_unlock(&ftrace_max_lock); __raw_spin_unlock(&ftrace_max_lock);
local_irq_enable(); local_irq_enable();
tracing_cpumask = tracing_cpumask_new; cpumask_copy(tracing_cpumask, tracing_cpumask_new);
mutex_unlock(&tracing_cpumask_update_lock); mutex_unlock(&tracing_cpumask_update_lock);
free_cpumask_var(tracing_cpumask_new);
return count; return count;
err_unlock: err_unlock:
mutex_unlock(&tracing_cpumask_update_lock); mutex_unlock(&tracing_cpumask_update_lock);
free_cpumask_var(tracing_cpumask);
return err; return err;
} }
...@@ -3752,7 +3752,6 @@ void ftrace_dump(void) ...@@ -3752,7 +3752,6 @@ void ftrace_dump(void)
static DEFINE_SPINLOCK(ftrace_dump_lock); static DEFINE_SPINLOCK(ftrace_dump_lock);
/* use static because iter can be a bit big for the stack */ /* use static because iter can be a bit big for the stack */
static struct trace_iterator iter; static struct trace_iterator iter;
static cpumask_t mask;
static int dump_ran; static int dump_ran;
unsigned long flags; unsigned long flags;
int cnt = 0, cpu; int cnt = 0, cpu;
...@@ -3786,8 +3785,6 @@ void ftrace_dump(void) ...@@ -3786,8 +3785,6 @@ void ftrace_dump(void)
* and then release the locks again. * and then release the locks again.
*/ */
cpus_clear(mask);
while (!trace_empty(&iter)) { while (!trace_empty(&iter)) {
if (!cnt) if (!cnt)
...@@ -3823,19 +3820,28 @@ __init static int tracer_alloc_buffers(void) ...@@ -3823,19 +3820,28 @@ __init static int tracer_alloc_buffers(void)
{ {
struct trace_array_cpu *data; struct trace_array_cpu *data;
int i; int i;
int ret = -ENOMEM;
/* TODO: make the number of buffers hot pluggable with CPUS */ if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
tracing_buffer_mask = cpu_possible_map; goto out;
if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
goto out_free_buffer_mask;
cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
cpumask_copy(tracing_cpumask, cpu_all_mask);
/* TODO: make the number of buffers hot pluggable with CPUS */
global_trace.buffer = ring_buffer_alloc(trace_buf_size, global_trace.buffer = ring_buffer_alloc(trace_buf_size,
TRACE_BUFFER_FLAGS); TRACE_BUFFER_FLAGS);
if (!global_trace.buffer) { if (!global_trace.buffer) {
printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
WARN_ON(1); WARN_ON(1);
return 0; goto out_free_cpumask;
} }
global_trace.entries = ring_buffer_size(global_trace.buffer); global_trace.entries = ring_buffer_size(global_trace.buffer);
#ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE
max_tr.buffer = ring_buffer_alloc(trace_buf_size, max_tr.buffer = ring_buffer_alloc(trace_buf_size,
TRACE_BUFFER_FLAGS); TRACE_BUFFER_FLAGS);
...@@ -3843,7 +3849,7 @@ __init static int tracer_alloc_buffers(void) ...@@ -3843,7 +3849,7 @@ __init static int tracer_alloc_buffers(void)
printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
WARN_ON(1); WARN_ON(1);
ring_buffer_free(global_trace.buffer); ring_buffer_free(global_trace.buffer);
return 0; goto out_free_cpumask;
} }
max_tr.entries = ring_buffer_size(max_tr.buffer); max_tr.entries = ring_buffer_size(max_tr.buffer);
WARN_ON(max_tr.entries != global_trace.entries); WARN_ON(max_tr.entries != global_trace.entries);
...@@ -3873,8 +3879,14 @@ __init static int tracer_alloc_buffers(void) ...@@ -3873,8 +3879,14 @@ __init static int tracer_alloc_buffers(void)
&trace_panic_notifier); &trace_panic_notifier);
register_die_notifier(&trace_die_notifier); register_die_notifier(&trace_die_notifier);
ret = 0;
return 0; out_free_cpumask:
free_cpumask_var(tracing_cpumask);
out_free_buffer_mask:
free_cpumask_var(tracing_buffer_mask);
out:
return ret;
} }
early_initcall(tracer_alloc_buffers); early_initcall(tracer_alloc_buffers);
fs_initcall(tracer_init_debugfs); fs_initcall(tracer_init_debugfs);
...@@ -196,9 +196,9 @@ static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer) ...@@ -196,9 +196,9 @@ static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer)
return HRTIMER_RESTART; return HRTIMER_RESTART;
} }
static void start_stack_timer(int cpu) static void start_stack_timer(void *unused)
{ {
struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu); struct hrtimer *hrtimer = &__get_cpu_var(stack_trace_hrtimer);
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer->function = stack_trace_timer_fn; hrtimer->function = stack_trace_timer_fn;
...@@ -208,14 +208,7 @@ static void start_stack_timer(int cpu) ...@@ -208,14 +208,7 @@ static void start_stack_timer(int cpu)
static void start_stack_timers(void) static void start_stack_timers(void)
{ {
cpumask_t saved_mask = current->cpus_allowed; on_each_cpu(start_stack_timer, NULL, 1);
int cpu;
for_each_online_cpu(cpu) {
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
start_stack_timer(cpu);
}
set_cpus_allowed_ptr(current, &saved_mask);
} }
static void stop_stack_timer(int cpu) static void stop_stack_timer(int cpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment