Commit 438ced17 authored by Vaibhav Nagarnaik's avatar Vaibhav Nagarnaik Committed by Steven Rostedt

ring-buffer: Add per_cpu ring buffer control files

Add a debugfs entry under per_cpu/ folder for each cpu called
buffer_size_kb to control the ring buffer size for each CPU
independently.

If the global file buffer_size_kb is used to set size, the individual
ring buffers will be adjusted to the given size. The buffer_size_kb will
report the common size to maintain backward compatibility.

If the buffer_size_kb file under the per_cpu/ directory is used to
change buffer size for a specific CPU, only the size of the respective
ring buffer is updated. When tracing/buffer_size_kb is read, it reports
'X' to indicate that sizes of per_cpu ring buffers are not equivalent.

Link: http://lkml.kernel.org/r/1328212844-11889-1-git-send-email-vnagarnaik@google.com

Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Michael Rubin <mrubin@google.com>
Cc: David Sharp <dhsharp@google.com>
Cc: Justin Teravest <teravest@google.com>
Signed-off-by: default avatarVaibhav Nagarnaik <vnagarnaik@google.com>
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent 5a26c8f0
...@@ -96,9 +96,11 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k ...@@ -96,9 +96,11 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
__ring_buffer_alloc((size), (flags), &__key); \ __ring_buffer_alloc((size), (flags), &__key); \
}) })
#define RING_BUFFER_ALL_CPUS -1
void ring_buffer_free(struct ring_buffer *buffer); void ring_buffer_free(struct ring_buffer *buffer);
int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, int cpu);
void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val); void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val);
...@@ -129,7 +131,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts); ...@@ -129,7 +131,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts);
void ring_buffer_iter_reset(struct ring_buffer_iter *iter); void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
int ring_buffer_iter_empty(struct ring_buffer_iter *iter); int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
unsigned long ring_buffer_size(struct ring_buffer *buffer); unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu);
void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu); void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu);
void ring_buffer_reset(struct ring_buffer *buffer); void ring_buffer_reset(struct ring_buffer *buffer);
......
This diff is collapsed.
...@@ -838,7 +838,8 @@ __acquires(kernel_lock) ...@@ -838,7 +838,8 @@ __acquires(kernel_lock)
/* If we expanded the buffers, make sure the max is expanded too */ /* If we expanded the buffers, make sure the max is expanded too */
if (ring_buffer_expanded && type->use_max_tr) if (ring_buffer_expanded && type->use_max_tr)
ring_buffer_resize(max_tr.buffer, trace_buf_size); ring_buffer_resize(max_tr.buffer, trace_buf_size,
RING_BUFFER_ALL_CPUS);
/* the test is responsible for initializing and enabling */ /* the test is responsible for initializing and enabling */
pr_info("Testing tracer %s: ", type->name); pr_info("Testing tracer %s: ", type->name);
...@@ -854,7 +855,8 @@ __acquires(kernel_lock) ...@@ -854,7 +855,8 @@ __acquires(kernel_lock)
/* Shrink the max buffer again */ /* Shrink the max buffer again */
if (ring_buffer_expanded && type->use_max_tr) if (ring_buffer_expanded && type->use_max_tr)
ring_buffer_resize(max_tr.buffer, 1); ring_buffer_resize(max_tr.buffer, 1,
RING_BUFFER_ALL_CPUS);
printk(KERN_CONT "PASSED\n"); printk(KERN_CONT "PASSED\n");
} }
...@@ -3053,7 +3055,14 @@ int tracer_init(struct tracer *t, struct trace_array *tr) ...@@ -3053,7 +3055,14 @@ int tracer_init(struct tracer *t, struct trace_array *tr)
return t->init(tr); return t->init(tr);
} }
static int __tracing_resize_ring_buffer(unsigned long size) static void set_buffer_entries(struct trace_array *tr, unsigned long val)
{
int cpu;
for_each_tracing_cpu(cpu)
tr->data[cpu]->entries = val;
}
static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
{ {
int ret; int ret;
...@@ -3064,19 +3073,32 @@ static int __tracing_resize_ring_buffer(unsigned long size) ...@@ -3064,19 +3073,32 @@ static int __tracing_resize_ring_buffer(unsigned long size)
*/ */
ring_buffer_expanded = 1; ring_buffer_expanded = 1;
ret = ring_buffer_resize(global_trace.buffer, size); ret = ring_buffer_resize(global_trace.buffer, size, cpu);
if (ret < 0) if (ret < 0)
return ret; return ret;
if (!current_trace->use_max_tr) if (!current_trace->use_max_tr)
goto out; goto out;
ret = ring_buffer_resize(max_tr.buffer, size); ret = ring_buffer_resize(max_tr.buffer, size, cpu);
if (ret < 0) { if (ret < 0) {
int r; int r = 0;
if (cpu == RING_BUFFER_ALL_CPUS) {
int i;
for_each_tracing_cpu(i) {
r = ring_buffer_resize(global_trace.buffer,
global_trace.data[i]->entries,
i);
if (r < 0)
break;
}
} else {
r = ring_buffer_resize(global_trace.buffer,
global_trace.data[cpu]->entries,
cpu);
}
r = ring_buffer_resize(global_trace.buffer,
global_trace.entries);
if (r < 0) { if (r < 0) {
/* /*
* AARGH! We are left with different * AARGH! We are left with different
...@@ -3098,14 +3120,21 @@ static int __tracing_resize_ring_buffer(unsigned long size) ...@@ -3098,14 +3120,21 @@ static int __tracing_resize_ring_buffer(unsigned long size)
return ret; return ret;
} }
max_tr.entries = size; if (cpu == RING_BUFFER_ALL_CPUS)
set_buffer_entries(&max_tr, size);
else
max_tr.data[cpu]->entries = size;
out: out:
global_trace.entries = size; if (cpu == RING_BUFFER_ALL_CPUS)
set_buffer_entries(&global_trace, size);
else
global_trace.data[cpu]->entries = size;
return ret; return ret;
} }
static ssize_t tracing_resize_ring_buffer(unsigned long size) static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id)
{ {
int cpu, ret = size; int cpu, ret = size;
...@@ -3121,12 +3150,19 @@ static ssize_t tracing_resize_ring_buffer(unsigned long size) ...@@ -3121,12 +3150,19 @@ static ssize_t tracing_resize_ring_buffer(unsigned long size)
atomic_inc(&max_tr.data[cpu]->disabled); atomic_inc(&max_tr.data[cpu]->disabled);
} }
if (size != global_trace.entries) if (cpu_id != RING_BUFFER_ALL_CPUS) {
ret = __tracing_resize_ring_buffer(size); /* make sure, this cpu is enabled in the mask */
if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
ret = -EINVAL;
goto out;
}
}
ret = __tracing_resize_ring_buffer(size, cpu_id);
if (ret < 0) if (ret < 0)
ret = -ENOMEM; ret = -ENOMEM;
out:
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
if (global_trace.data[cpu]) if (global_trace.data[cpu])
atomic_dec(&global_trace.data[cpu]->disabled); atomic_dec(&global_trace.data[cpu]->disabled);
...@@ -3157,7 +3193,8 @@ int tracing_update_buffers(void) ...@@ -3157,7 +3193,8 @@ int tracing_update_buffers(void)
mutex_lock(&trace_types_lock); mutex_lock(&trace_types_lock);
if (!ring_buffer_expanded) if (!ring_buffer_expanded)
ret = __tracing_resize_ring_buffer(trace_buf_size); ret = __tracing_resize_ring_buffer(trace_buf_size,
RING_BUFFER_ALL_CPUS);
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
return ret; return ret;
...@@ -3181,7 +3218,8 @@ static int tracing_set_tracer(const char *buf) ...@@ -3181,7 +3218,8 @@ static int tracing_set_tracer(const char *buf)
mutex_lock(&trace_types_lock); mutex_lock(&trace_types_lock);
if (!ring_buffer_expanded) { if (!ring_buffer_expanded) {
ret = __tracing_resize_ring_buffer(trace_buf_size); ret = __tracing_resize_ring_buffer(trace_buf_size,
RING_BUFFER_ALL_CPUS);
if (ret < 0) if (ret < 0)
goto out; goto out;
ret = 0; ret = 0;
...@@ -3207,8 +3245,8 @@ static int tracing_set_tracer(const char *buf) ...@@ -3207,8 +3245,8 @@ static int tracing_set_tracer(const char *buf)
* The max_tr ring buffer has some state (e.g. ring->clock) and * The max_tr ring buffer has some state (e.g. ring->clock) and
* we want preserve it. * we want preserve it.
*/ */
ring_buffer_resize(max_tr.buffer, 1); ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS);
max_tr.entries = 1; set_buffer_entries(&max_tr, 1);
} }
destroy_trace_option_files(topts); destroy_trace_option_files(topts);
...@@ -3216,10 +3254,17 @@ static int tracing_set_tracer(const char *buf) ...@@ -3216,10 +3254,17 @@ static int tracing_set_tracer(const char *buf)
topts = create_trace_option_files(current_trace); topts = create_trace_option_files(current_trace);
if (current_trace->use_max_tr) { if (current_trace->use_max_tr) {
ret = ring_buffer_resize(max_tr.buffer, global_trace.entries); int cpu;
if (ret < 0) /* we need to make per cpu buffer sizes equivalent */
goto out; for_each_tracing_cpu(cpu) {
max_tr.entries = global_trace.entries; ret = ring_buffer_resize(max_tr.buffer,
global_trace.data[cpu]->entries,
cpu);
if (ret < 0)
goto out;
max_tr.data[cpu]->entries =
global_trace.data[cpu]->entries;
}
} }
if (t->init) { if (t->init) {
...@@ -3721,30 +3766,82 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, ...@@ -3721,30 +3766,82 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
goto out; goto out;
} }
struct ftrace_entries_info {
struct trace_array *tr;
int cpu;
};
static int tracing_entries_open(struct inode *inode, struct file *filp)
{
struct ftrace_entries_info *info;
if (tracing_disabled)
return -ENODEV;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->tr = &global_trace;
info->cpu = (unsigned long)inode->i_private;
filp->private_data = info;
return 0;
}
static ssize_t static ssize_t
tracing_entries_read(struct file *filp, char __user *ubuf, tracing_entries_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos) size_t cnt, loff_t *ppos)
{ {
struct trace_array *tr = filp->private_data; struct ftrace_entries_info *info = filp->private_data;
char buf[96]; struct trace_array *tr = info->tr;
int r; char buf[64];
int r = 0;
ssize_t ret;
mutex_lock(&trace_types_lock); mutex_lock(&trace_types_lock);
if (!ring_buffer_expanded)
r = sprintf(buf, "%lu (expanded: %lu)\n", if (info->cpu == RING_BUFFER_ALL_CPUS) {
tr->entries >> 10, int cpu, buf_size_same;
trace_buf_size >> 10); unsigned long size;
else
r = sprintf(buf, "%lu\n", tr->entries >> 10); size = 0;
buf_size_same = 1;
/* check if all cpu sizes are same */
for_each_tracing_cpu(cpu) {
/* fill in the size from first enabled cpu */
if (size == 0)
size = tr->data[cpu]->entries;
if (size != tr->data[cpu]->entries) {
buf_size_same = 0;
break;
}
}
if (buf_size_same) {
if (!ring_buffer_expanded)
r = sprintf(buf, "%lu (expanded: %lu)\n",
size >> 10,
trace_buf_size >> 10);
else
r = sprintf(buf, "%lu\n", size >> 10);
} else
r = sprintf(buf, "X\n");
} else
r = sprintf(buf, "%lu\n", tr->data[info->cpu]->entries >> 10);
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
return ret;
} }
static ssize_t static ssize_t
tracing_entries_write(struct file *filp, const char __user *ubuf, tracing_entries_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos) size_t cnt, loff_t *ppos)
{ {
struct ftrace_entries_info *info = filp->private_data;
unsigned long val; unsigned long val;
int ret; int ret;
...@@ -3759,7 +3856,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, ...@@ -3759,7 +3856,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
/* value is in KB */ /* value is in KB */
val <<= 10; val <<= 10;
ret = tracing_resize_ring_buffer(val); ret = tracing_resize_ring_buffer(val, info->cpu);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -3768,6 +3865,16 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, ...@@ -3768,6 +3865,16 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
return cnt; return cnt;
} }
static int
tracing_entries_release(struct inode *inode, struct file *filp)
{
struct ftrace_entries_info *info = filp->private_data;
kfree(info);
return 0;
}
static ssize_t static ssize_t
tracing_total_entries_read(struct file *filp, char __user *ubuf, tracing_total_entries_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos) size_t cnt, loff_t *ppos)
...@@ -3779,7 +3886,7 @@ tracing_total_entries_read(struct file *filp, char __user *ubuf, ...@@ -3779,7 +3886,7 @@ tracing_total_entries_read(struct file *filp, char __user *ubuf,
mutex_lock(&trace_types_lock); mutex_lock(&trace_types_lock);
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
size += tr->entries >> 10; size += tr->data[cpu]->entries >> 10;
if (!ring_buffer_expanded) if (!ring_buffer_expanded)
expanded_size += trace_buf_size >> 10; expanded_size += trace_buf_size >> 10;
} }
...@@ -3813,7 +3920,7 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp) ...@@ -3813,7 +3920,7 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
if (trace_flags & TRACE_ITER_STOP_ON_FREE) if (trace_flags & TRACE_ITER_STOP_ON_FREE)
tracing_off(); tracing_off();
/* resize the ring buffer to 0 */ /* resize the ring buffer to 0 */
tracing_resize_ring_buffer(0); tracing_resize_ring_buffer(0, RING_BUFFER_ALL_CPUS);
return 0; return 0;
} }
...@@ -4012,9 +4119,10 @@ static const struct file_operations tracing_pipe_fops = { ...@@ -4012,9 +4119,10 @@ static const struct file_operations tracing_pipe_fops = {
}; };
static const struct file_operations tracing_entries_fops = { static const struct file_operations tracing_entries_fops = {
.open = tracing_open_generic, .open = tracing_entries_open,
.read = tracing_entries_read, .read = tracing_entries_read,
.write = tracing_entries_write, .write = tracing_entries_write,
.release = tracing_entries_release,
.llseek = generic_file_llseek, .llseek = generic_file_llseek,
}; };
...@@ -4466,6 +4574,9 @@ static void tracing_init_debugfs_percpu(long cpu) ...@@ -4466,6 +4574,9 @@ static void tracing_init_debugfs_percpu(long cpu)
trace_create_file("stats", 0444, d_cpu, trace_create_file("stats", 0444, d_cpu,
(void *) cpu, &tracing_stats_fops); (void *) cpu, &tracing_stats_fops);
trace_create_file("buffer_size_kb", 0444, d_cpu,
(void *) cpu, &tracing_entries_fops);
} }
#ifdef CONFIG_FTRACE_SELFTEST #ifdef CONFIG_FTRACE_SELFTEST
...@@ -4795,7 +4906,7 @@ static __init int tracer_init_debugfs(void) ...@@ -4795,7 +4906,7 @@ static __init int tracer_init_debugfs(void)
(void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
trace_create_file("buffer_size_kb", 0644, d_tracer, trace_create_file("buffer_size_kb", 0644, d_tracer,
&global_trace, &tracing_entries_fops); (void *) RING_BUFFER_ALL_CPUS, &tracing_entries_fops);
trace_create_file("buffer_total_size_kb", 0444, d_tracer, trace_create_file("buffer_total_size_kb", 0444, d_tracer,
&global_trace, &tracing_total_entries_fops); &global_trace, &tracing_total_entries_fops);
...@@ -5056,7 +5167,6 @@ __init static int tracer_alloc_buffers(void) ...@@ -5056,7 +5167,6 @@ __init static int tracer_alloc_buffers(void)
WARN_ON(1); WARN_ON(1);
goto out_free_cpumask; goto out_free_cpumask;
} }
global_trace.entries = ring_buffer_size(global_trace.buffer);
if (global_trace.buffer_disabled) if (global_trace.buffer_disabled)
tracing_off(); tracing_off();
...@@ -5069,7 +5179,6 @@ __init static int tracer_alloc_buffers(void) ...@@ -5069,7 +5179,6 @@ __init static int tracer_alloc_buffers(void)
ring_buffer_free(global_trace.buffer); ring_buffer_free(global_trace.buffer);
goto out_free_cpumask; goto out_free_cpumask;
} }
max_tr.entries = 1;
#endif #endif
/* Allocate the first page for all buffers */ /* Allocate the first page for all buffers */
...@@ -5078,6 +5187,11 @@ __init static int tracer_alloc_buffers(void) ...@@ -5078,6 +5187,11 @@ __init static int tracer_alloc_buffers(void)
max_tr.data[i] = &per_cpu(max_tr_data, i); max_tr.data[i] = &per_cpu(max_tr_data, i);
} }
set_buffer_entries(&global_trace, ring_buf_size);
#ifdef CONFIG_TRACER_MAX_TRACE
set_buffer_entries(&max_tr, 1);
#endif
trace_init_cmdlines(); trace_init_cmdlines();
register_tracer(&nop_trace); register_tracer(&nop_trace);
......
...@@ -131,6 +131,7 @@ struct trace_array_cpu { ...@@ -131,6 +131,7 @@ struct trace_array_cpu {
atomic_t disabled; atomic_t disabled;
void *buffer_page; /* ring buffer spare */ void *buffer_page; /* ring buffer spare */
unsigned long entries;
unsigned long saved_latency; unsigned long saved_latency;
unsigned long critical_start; unsigned long critical_start;
unsigned long critical_end; unsigned long critical_end;
...@@ -152,7 +153,6 @@ struct trace_array_cpu { ...@@ -152,7 +153,6 @@ struct trace_array_cpu {
*/ */
struct trace_array { struct trace_array {
struct ring_buffer *buffer; struct ring_buffer *buffer;
unsigned long entries;
int cpu; int cpu;
int buffer_disabled; int buffer_disabled;
cycle_t time_start; cycle_t time_start;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment