Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
d2852b93
Commit
d2852b93
authored
Jan 08, 2009
by
Robert Richter
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'oprofile/ring_buffer' into oprofile/oprofile-for-tip
parents
4a6908a3
14f0ca8e
Changes
13
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
13 changed files
with
566 additions
and
465 deletions
+566
-465
arch/Kconfig
arch/Kconfig
+2
-0
arch/x86/oprofile/op_model_amd.c
arch/x86/oprofile/op_model_amd.c
+91
-133
drivers/oprofile/buffer_sync.c
drivers/oprofile/buffer_sync.c
+81
-148
drivers/oprofile/cpu_buffer.c
drivers/oprofile/cpu_buffer.c
+240
-153
drivers/oprofile/cpu_buffer.h
drivers/oprofile/cpu_buffer.h
+62
-10
drivers/oprofile/event_buffer.c
drivers/oprofile/event_buffer.c
+2
-2
drivers/oprofile/oprof.c
drivers/oprofile/oprof.c
+2
-2
drivers/oprofile/oprof.h
drivers/oprofile/oprof.h
+4
-4
drivers/oprofile/oprofile_files.c
drivers/oprofile/oprofile_files.c
+17
-7
include/linux/oprofile.h
include/linux/oprofile.h
+19
-2
include/linux/ring_buffer.h
include/linux/ring_buffer.h
+2
-0
kernel/trace/ring_buffer.c
kernel/trace/ring_buffer.c
+42
-2
kernel/trace/trace.c
kernel/trace/trace.c
+2
-2
No files found.
arch/Kconfig
View file @
d2852b93
...
...
@@ -6,6 +6,8 @@ config OPROFILE
tristate "OProfile system profiling (EXPERIMENTAL)"
depends on PROFILING
depends on HAVE_OPROFILE
select TRACING
select RING_BUFFER
help
OProfile is a profiling system capable of profiling the
whole system, include the kernel, kernel modules, libraries,
...
...
arch/x86/oprofile/op_model_amd.c
View file @
d2852b93
This diff is collapsed.
Click to expand it.
drivers/oprofile/buffer_sync.c
View file @
d2852b93
/**
* @file buffer_sync.c
*
* @remark Copyright 2002 OProfile authors
* @remark Copyright 2002
-2009
OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
* @author Barry Kasindorf
* @author Robert Richter <robert.richter@amd.com>
*
* This is the core of the buffer management. Each
* CPU buffer is processed and entered into the
...
...
@@ -268,18 +269,6 @@ lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
return
cookie
;
}
static
void
increment_tail
(
struct
oprofile_cpu_buffer
*
b
)
{
unsigned
long
new_tail
=
b
->
tail_pos
+
1
;
rmb
();
/* be sure fifo pointers are synchromized */
if
(
new_tail
<
b
->
buffer_size
)
b
->
tail_pos
=
new_tail
;
else
b
->
tail_pos
=
0
;
}
static
unsigned
long
last_cookie
=
INVALID_COOKIE
;
static
void
add_cpu_switch
(
int
i
)
...
...
@@ -327,84 +316,73 @@ static void add_trace_begin(void)
add_event_entry
(
TRACE_BEGIN_CODE
);
}
#ifdef CONFIG_OPROFILE_IBS
#define IBS_FETCH_CODE_SIZE 2
#define IBS_OP_CODE_SIZE 5
#define IBS_EIP(offset) \
(((struct op_sample *)&cpu_buf->buffer[(offset)])->eip)
#define IBS_EVENT(offset) \
(((struct op_sample *)&cpu_buf->buffer[(offset)])->event)
/*
* Add IBS fetch and op entries to event buffer
*/
static
void
add_ibs_begin
(
struct
oprofile_cpu_buffer
*
cpu_buf
,
int
code
,
struct
mm_struct
*
mm
)
static
void
add_data
(
struct
op_entry
*
entry
,
struct
mm_struct
*
mm
)
{
unsigned
long
rip
;
int
i
,
count
;
unsigned
long
ibs_cookie
=
0
;
unsigned
long
code
,
pc
,
val
;
unsigned
long
cookie
;
off_t
offset
;
increment_tail
(
cpu_buf
);
/* move to RIP entry */
rip
=
IBS_EIP
(
cpu_buf
->
tail_pos
);
#ifdef __LP64__
rip
+=
IBS_EVENT
(
cpu_buf
->
tail_pos
)
<<
32
;
#endif
if
(
!
op_cpu_buffer_get_data
(
entry
,
&
code
))
return
;
if
(
!
op_cpu_buffer_get_data
(
entry
,
&
pc
))
return
;
if
(
!
op_cpu_buffer_get_size
(
entry
))
return
;
if
(
mm
)
{
ibs_cookie
=
lookup_dcookie
(
mm
,
rip
,
&
offset
);
cookie
=
lookup_dcookie
(
mm
,
pc
,
&
offset
);
if
(
ibs_
cookie
==
NO_COOKIE
)
offset
=
rip
;
if
(
ibs_
cookie
==
INVALID_COOKIE
)
{
if
(
cookie
==
NO_COOKIE
)
offset
=
pc
;
if
(
cookie
==
INVALID_COOKIE
)
{
atomic_inc
(
&
oprofile_stats
.
sample_lost_no_mapping
);
offset
=
rip
;
offset
=
pc
;
}
if
(
ibs_
cookie
!=
last_cookie
)
{
add_cookie_switch
(
ibs_
cookie
);
last_cookie
=
ibs_
cookie
;
if
(
cookie
!=
last_cookie
)
{
add_cookie_switch
(
cookie
);
last_cookie
=
cookie
;
}
}
else
offset
=
rip
;
offset
=
pc
;
add_event_entry
(
ESCAPE_CODE
);
add_event_entry
(
code
);
add_event_entry
(
offset
);
/* Offset from Dcookie */
/* we send the Dcookie offset, but send the raw Linear Add also*/
add_event_entry
(
IBS_EIP
(
cpu_buf
->
tail_pos
));
add_event_entry
(
IBS_EVENT
(
cpu_buf
->
tail_pos
));
if
(
code
==
IBS_FETCH_CODE
)
count
=
IBS_FETCH_CODE_SIZE
;
/*IBS FETCH is 2 int64s*/
else
count
=
IBS_OP_CODE_SIZE
;
/*IBS OP is 5 int64s*/
for
(
i
=
0
;
i
<
count
;
i
++
)
{
increment_tail
(
cpu_buf
);
add_event_entry
(
IBS_EIP
(
cpu_buf
->
tail_pos
));
add_event_entry
(
IBS_EVENT
(
cpu_buf
->
tail_pos
));
}
while
(
op_cpu_buffer_get_data
(
entry
,
&
val
))
add_event_entry
(
val
);
}
#endif
static
void
add_sample_entry
(
unsigned
long
offset
,
unsigned
long
event
)
static
inline
void
add_sample_entry
(
unsigned
long
offset
,
unsigned
long
event
)
{
add_event_entry
(
offset
);
add_event_entry
(
event
);
}
static
int
add_us_sample
(
struct
mm_struct
*
mm
,
struct
op_sample
*
s
)
/*
* Add a sample to the global event buffer. If possible the
* sample is converted into a persistent dentry/offset pair
* for later lookup from userspace. Return 0 on failure.
*/
static
int
add_sample
(
struct
mm_struct
*
mm
,
struct
op_sample
*
s
,
int
in_kernel
)
{
unsigned
long
cookie
;
off_t
offset
;
if
(
in_kernel
)
{
add_sample_entry
(
s
->
eip
,
s
->
event
);
return
1
;
}
/* add userspace sample */
if
(
!
mm
)
{
atomic_inc
(
&
oprofile_stats
.
sample_lost_no_mm
);
return
0
;
}
cookie
=
lookup_dcookie
(
mm
,
s
->
eip
,
&
offset
);
if
(
cookie
==
INVALID_COOKIE
)
{
...
...
@@ -423,25 +401,6 @@ static int add_us_sample(struct mm_struct *mm, struct op_sample *s)
}
/* Add a sample to the global event buffer. If possible the
* sample is converted into a persistent dentry/offset pair
* for later lookup from userspace.
*/
static
int
add_sample
(
struct
mm_struct
*
mm
,
struct
op_sample
*
s
,
int
in_kernel
)
{
if
(
in_kernel
)
{
add_sample_entry
(
s
->
eip
,
s
->
event
);
return
1
;
}
else
if
(
mm
)
{
return
add_us_sample
(
mm
,
s
);
}
else
{
atomic_inc
(
&
oprofile_stats
.
sample_lost_no_mm
);
}
return
0
;
}
static
void
release_mm
(
struct
mm_struct
*
mm
)
{
if
(
!
mm
)
...
...
@@ -466,33 +425,6 @@ static inline int is_code(unsigned long val)
}
/* "acquire" as many cpu buffer slots as we can */
static
unsigned
long
get_slots
(
struct
oprofile_cpu_buffer
*
b
)
{
unsigned
long
head
=
b
->
head_pos
;
unsigned
long
tail
=
b
->
tail_pos
;
/*
* Subtle. This resets the persistent last_task
* and in_kernel values used for switching notes.
* BUT, there is a small window between reading
* head_pos, and this call, that means samples
* can appear at the new head position, but not
* be prefixed with the notes for switching
* kernel mode or a task switch. This small hole
* can lead to mis-attribution or samples where
* we don't know if it's in the kernel or not,
* at the start of an event buffer.
*/
cpu_buffer_reset
(
b
);
if
(
head
>=
tail
)
return
head
-
tail
;
return
head
+
(
b
->
buffer_size
-
tail
);
}
/* Move tasks along towards death. Any tasks on dead_tasks
* will definitely have no remaining references in any
* CPU buffers at this point, because we use two lists,
...
...
@@ -559,71 +491,72 @@ typedef enum {
*/
void
sync_buffer
(
int
cpu
)
{
struct
oprofile_cpu_buffer
*
cpu_buf
=
&
per_cpu
(
cpu_buffer
,
cpu
);
struct
mm_struct
*
mm
=
NULL
;
struct
mm_struct
*
oldmm
;
unsigned
long
val
;
struct
task_struct
*
new
;
unsigned
long
cookie
=
0
;
int
in_kernel
=
1
;
sync_buffer_state
state
=
sb_buffer_start
;
#ifndef CONFIG_OPROFILE_IBS
unsigned
int
i
;
unsigned
long
available
;
#endif
unsigned
long
flags
;
struct
op_entry
entry
;
struct
op_sample
*
sample
;
mutex_lock
(
&
buffer_mutex
);
add_cpu_switch
(
cpu
);
/* Remember, only we can modify tail_pos */
#ifndef CONFIG_OPROFILE_IBS
available
=
get_slots
(
cpu_buf
);
op_cpu_buffer_reset
(
cpu
);
available
=
op_cpu_buffer_entries
(
cpu
);
for
(
i
=
0
;
i
<
available
;
++
i
)
{
#else
while
(
get_slots
(
cpu_buf
))
{
#endif
struct
op_sample
*
s
=
&
cpu_buf
->
buffer
[
cpu_buf
->
tail_pos
];
sample
=
op_cpu_buffer_read_entry
(
&
entry
,
cpu
);
if
(
!
sample
)
break
;
if
(
is_code
(
s
->
eip
))
{
if
(
s
->
event
<=
CPU_IS_KERNEL
)
{
if
(
is_code
(
sample
->
eip
))
{
flags
=
sample
->
event
;
if
(
flags
&
TRACE_BEGIN
)
{
state
=
sb_bt_start
;
add_trace_begin
();
}
if
(
flags
&
KERNEL_CTX_SWITCH
)
{
/* kernel/userspace switch */
in_kernel
=
s
->
event
;
in_kernel
=
flags
&
IS_KERNEL
;
if
(
state
==
sb_buffer_start
)
state
=
sb_sample_start
;
add_kernel_ctx_switch
(
s
->
event
);
}
else
if
(
s
->
event
==
CPU_TRACE_BEGIN
)
{
state
=
sb_bt_start
;
add_trace_begin
();
#ifdef CONFIG_OPROFILE_IBS
}
else
if
(
s
->
event
==
IBS_FETCH_BEGIN
)
{
state
=
sb_bt_start
;
add_ibs_begin
(
cpu_buf
,
IBS_FETCH_CODE
,
mm
);
}
else
if
(
s
->
event
==
IBS_OP_BEGIN
)
{
state
=
sb_bt_start
;
add_ibs_begin
(
cpu_buf
,
IBS_OP_CODE
,
mm
);
#endif
}
else
{
struct
mm_struct
*
oldmm
=
mm
;
add_kernel_ctx_switch
(
flags
&
IS_KERNEL
);
}
if
(
flags
&
USER_CTX_SWITCH
&&
op_cpu_buffer_get_data
(
&
entry
,
&
val
))
{
/* userspace context switch */
new
=
(
struct
task_struct
*
)
s
->
event
;
new
=
(
struct
task_struct
*
)
val
;
oldmm
=
mm
;
release_mm
(
oldmm
);
mm
=
take_tasks_mm
(
new
);
if
(
mm
!=
oldmm
)
cookie
=
get_exec_dcookie
(
mm
);
add_user_ctx_switch
(
new
,
cookie
);
}
}
else
if
(
state
>=
sb_bt_start
&&
!
add_sample
(
mm
,
s
,
in_kernel
))
{
if
(
state
==
sb_bt_start
)
{
state
=
sb_bt_ignore
;
atomic_inc
(
&
oprofile_stats
.
bt_lost_no_mapping
);
}
if
(
op_cpu_buffer_get_size
(
&
entry
))
add_data
(
&
entry
,
mm
);
continue
;
}
increment_tail
(
cpu_buf
);
if
(
state
<
sb_bt_start
)
/* ignore sample */
continue
;
if
(
add_sample
(
mm
,
sample
,
in_kernel
))
continue
;
/* ignore backtraces if failed to add a sample */
if
(
state
==
sb_bt_start
)
{
state
=
sb_bt_ignore
;
atomic_inc
(
&
oprofile_stats
.
bt_lost_no_mapping
);
}
}
release_mm
(
mm
);
...
...
drivers/oprofile/cpu_buffer.c
View file @
d2852b93
This diff is collapsed.
Click to expand it.
drivers/oprofile/cpu_buffer.h
View file @
d2852b93
/**
* @file cpu_buffer.h
*
* @remark Copyright 2002 OProfile authors
* @remark Copyright 2002
-2009
OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
* @author Robert Richter <robert.richter@amd.com>
*/
#ifndef OPROFILE_CPU_BUFFER_H
...
...
@@ -15,6 +16,7 @@
#include <linux/workqueue.h>
#include <linux/cache.h>
#include <linux/sched.h>
#include <linux/ring_buffer.h>
struct
task_struct
;
...
...
@@ -30,16 +32,16 @@ void end_cpu_work(void);
struct
op_sample
{
unsigned
long
eip
;
unsigned
long
event
;
unsigned
long
data
[
0
];
};
struct
op_entry
;
struct
oprofile_cpu_buffer
{
volatile
unsigned
long
head_pos
;
volatile
unsigned
long
tail_pos
;
unsigned
long
buffer_size
;
struct
task_struct
*
last_task
;
int
last_is_kernel
;
int
tracing
;
struct
op_sample
*
buffer
;
unsigned
long
sample_received
;
unsigned
long
sample_lost_overflow
;
unsigned
long
backtrace_aborted
;
...
...
@@ -50,12 +52,62 @@ struct oprofile_cpu_buffer {
DECLARE_PER_CPU
(
struct
oprofile_cpu_buffer
,
cpu_buffer
);
void
cpu_buffer_reset
(
struct
oprofile_cpu_buffer
*
cpu_buf
);
/*
* Resets the cpu buffer to a sane state.
*
* reset these to invalid values; the next sample collected will
* populate the buffer with proper values to initialize the buffer
*/
static
inline
void
op_cpu_buffer_reset
(
int
cpu
)
{
struct
oprofile_cpu_buffer
*
cpu_buf
=
&
per_cpu
(
cpu_buffer
,
cpu
);
cpu_buf
->
last_is_kernel
=
-
1
;
cpu_buf
->
last_task
=
NULL
;
}
struct
op_sample
*
op_cpu_buffer_write_reserve
(
struct
op_entry
*
entry
,
unsigned
long
size
);
int
op_cpu_buffer_write_commit
(
struct
op_entry
*
entry
);
struct
op_sample
*
op_cpu_buffer_read_entry
(
struct
op_entry
*
entry
,
int
cpu
);
unsigned
long
op_cpu_buffer_entries
(
int
cpu
);
/* returns the remaining free size of data in the entry */
static
inline
int
op_cpu_buffer_add_data
(
struct
op_entry
*
entry
,
unsigned
long
val
)
{
if
(
!
entry
->
size
)
return
0
;
*
entry
->
data
=
val
;
entry
->
size
--
;
entry
->
data
++
;
return
entry
->
size
;
}
/* returns the size of data in the entry */
static
inline
int
op_cpu_buffer_get_size
(
struct
op_entry
*
entry
)
{
return
entry
->
size
;
}
/* returns 0 if empty or the size of data including the current value */
static
inline
int
op_cpu_buffer_get_data
(
struct
op_entry
*
entry
,
unsigned
long
*
val
)
{
int
size
=
entry
->
size
;
if
(
!
size
)
return
0
;
*
val
=
*
entry
->
data
;
entry
->
size
--
;
entry
->
data
++
;
return
size
;
}
/*
transient events for the CPU buffer -> event buffer
*/
#define
CPU_IS_KERNEL 1
#define
CPU_TRACE_BEGIN 2
#define
IBS_FETCH_BEGIN 3
#define
IBS_OP_BEGIN 4
/*
extra data flags
*/
#define
KERNEL_CTX_SWITCH (1UL << 0)
#define
IS_KERNEL (1UL << 1)
#define
TRACE_BEGIN (1UL << 2)
#define
USER_CTX_SWITCH (1UL << 3)
#endif
/* OPROFILE_CPU_BUFFER_H */
drivers/oprofile/event_buffer.c
View file @
d2852b93
...
...
@@ -73,8 +73,8 @@ int alloc_event_buffer(void)
unsigned
long
flags
;
spin_lock_irqsave
(
&
oprofilefs_lock
,
flags
);
buffer_size
=
fs
_buffer_size
;
buffer_watershed
=
fs
_buffer_watershed
;
buffer_size
=
oprofile
_buffer_size
;
buffer_watershed
=
oprofile
_buffer_watershed
;
spin_unlock_irqrestore
(
&
oprofilefs_lock
,
flags
);
if
(
buffer_watershed
>=
buffer_size
)
...
...
drivers/oprofile/oprof.c
View file @
d2852b93
...
...
@@ -23,7 +23,7 @@
struct
oprofile_operations
oprofile_ops
;
unsigned
long
oprofile_started
;
unsigned
long
backtrace_depth
;
unsigned
long
oprofile_
backtrace_depth
;
static
unsigned
long
is_setup
;
static
DEFINE_MUTEX
(
start_mutex
);
...
...
@@ -172,7 +172,7 @@ int oprofile_set_backtrace(unsigned long val)
goto
out
;
}
backtrace_depth
=
val
;
oprofile_
backtrace_depth
=
val
;
out:
mutex_unlock
(
&
start_mutex
);
...
...
drivers/oprofile/oprof.h
View file @
d2852b93
...
...
@@ -21,12 +21,12 @@ void oprofile_stop(void);
struct
oprofile_operations
;
extern
unsigned
long
fs
_buffer_size
;
extern
unsigned
long
fs
_cpu_buffer_size
;
extern
unsigned
long
fs
_buffer_watershed
;
extern
unsigned
long
oprofile
_buffer_size
;
extern
unsigned
long
oprofile
_cpu_buffer_size
;
extern
unsigned
long
oprofile
_buffer_watershed
;
extern
struct
oprofile_operations
oprofile_ops
;
extern
unsigned
long
oprofile_started
;
extern
unsigned
long
backtrace_depth
;
extern
unsigned
long
oprofile_
backtrace_depth
;
struct
super_block
;
struct
dentry
;
...
...
drivers/oprofile/oprofile_files.c
View file @
d2852b93
...
...
@@ -14,13 +14,18 @@
#include "oprofile_stats.h"
#include "oprof.h"
unsigned
long
fs_buffer_size
=
131072
;
unsigned
long
fs_cpu_buffer_size
=
8192
;
unsigned
long
fs_buffer_watershed
=
32768
;
/* FIXME: tune */
#define BUFFER_SIZE_DEFAULT 131072
#define CPU_BUFFER_SIZE_DEFAULT 8192
#define BUFFER_WATERSHED_DEFAULT 32768
/* FIXME: tune */
unsigned
long
oprofile_buffer_size
;
unsigned
long
oprofile_cpu_buffer_size
;
unsigned
long
oprofile_buffer_watershed
;
static
ssize_t
depth_read
(
struct
file
*
file
,
char
__user
*
buf
,
size_t
count
,
loff_t
*
offset
)
{
return
oprofilefs_ulong_to_user
(
backtrace_depth
,
buf
,
count
,
offset
);
return
oprofilefs_ulong_to_user
(
oprofile_backtrace_depth
,
buf
,
count
,
offset
);
}
...
...
@@ -120,12 +125,17 @@ static const struct file_operations dump_fops = {
void
oprofile_create_files
(
struct
super_block
*
sb
,
struct
dentry
*
root
)
{
/* reinitialize default values */
oprofile_buffer_size
=
BUFFER_SIZE_DEFAULT
;
oprofile_cpu_buffer_size
=
CPU_BUFFER_SIZE_DEFAULT
;
oprofile_buffer_watershed
=
BUFFER_WATERSHED_DEFAULT
;
oprofilefs_create_file
(
sb
,
root
,
"enable"
,
&
enable_fops
);
oprofilefs_create_file_perm
(
sb
,
root
,
"dump"
,
&
dump_fops
,
0666
);
oprofilefs_create_file
(
sb
,
root
,
"buffer"
,
&
event_buffer_fops
);
oprofilefs_create_ulong
(
sb
,
root
,
"buffer_size"
,
&
fs
_buffer_size
);
oprofilefs_create_ulong
(
sb
,
root
,
"buffer_watershed"
,
&
fs
_buffer_watershed
);
oprofilefs_create_ulong
(
sb
,
root
,
"cpu_buffer_size"
,
&
fs
_cpu_buffer_size
);
oprofilefs_create_ulong
(
sb
,
root
,
"buffer_size"
,
&
oprofile
_buffer_size
);
oprofilefs_create_ulong
(
sb
,
root
,
"buffer_watershed"
,
&
oprofile
_buffer_watershed
);
oprofilefs_create_ulong
(
sb
,
root
,
"cpu_buffer_size"
,
&
oprofile
_cpu_buffer_size
);
oprofilefs_create_file
(
sb
,
root
,
"cpu_type"
,
&
cpu_type_fops
);
oprofilefs_create_file
(
sb
,
root
,
"backtrace_depth"
,
&
depth_fops
);
oprofilefs_create_file
(
sb
,
root
,
"pointer_size"
,
&
pointer_size_fops
);
...
...
include/linux/oprofile.h
View file @
d2852b93
...
...
@@ -86,8 +86,7 @@ int oprofile_arch_init(struct oprofile_operations * ops);
void
oprofile_arch_exit
(
void
);
/**
* Add a sample. This may be called from any context. Pass
* smp_processor_id() as cpu.
* Add a sample. This may be called from any context.
*/
void
oprofile_add_sample
(
struct
pt_regs
*
const
regs
,
unsigned
long
event
);
...
...
@@ -165,4 +164,22 @@ void oprofile_put_buff(unsigned long *buf, unsigned int start,
unsigned
long
oprofile_get_cpu_buffer_size
(
void
);
void
oprofile_cpu_buffer_inc_smpl_lost
(
void
);
/* cpu buffer functions */
struct
op_sample
;
struct
op_entry
{
struct
ring_buffer_event
*
event
;
struct
op_sample
*
sample
;
unsigned
long
irq_flags
;
unsigned
long
size
;
unsigned
long
*
data
;
};
void
oprofile_write_reserve
(
struct
op_entry
*
entry
,
struct
pt_regs
*
const
regs
,
unsigned
long
pc
,
int
code
,
int
size
);
int
oprofile_add_data
(
struct
op_entry
*
entry
,
unsigned
long
val
);
int
oprofile_write_commit
(
struct
op_entry
*
entry
);
#endif
/* OPROFILE_H */
include/linux/ring_buffer.h
View file @
d2852b93
...
...
@@ -116,6 +116,8 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
unsigned
long
ring_buffer_entries
(
struct
ring_buffer
*
buffer
);
unsigned
long
ring_buffer_overruns
(
struct
ring_buffer
*
buffer
);
unsigned
long
ring_buffer_entries_cpu
(
struct
ring_buffer
*
buffer
,
int
cpu
);
unsigned
long
ring_buffer_overrun_cpu
(
struct
ring_buffer
*
buffer
,
int
cpu
);
u64
ring_buffer_time_stamp
(
int
cpu
);
void
ring_buffer_normalize_time_stamp
(
int
cpu
,
u64
*
ts
);
...
...
kernel/trace/ring_buffer.c
View file @
d2852b93
...
...
@@ -31,6 +31,7 @@ void tracing_on(void)
{
ring_buffers_off
=
0
;
}
EXPORT_SYMBOL_GPL
(
tracing_on
);
/**
* tracing_off - turn off all tracing buffers
...
...
@@ -44,6 +45,7 @@ void tracing_off(void)
{
ring_buffers_off
=
1
;
}
EXPORT_SYMBOL_GPL
(
tracing_off
);
/* Up this if you want to test the TIME_EXTENTS and normalization */
#define DEBUG_SHIFT 0
...
...
@@ -60,12 +62,14 @@ u64 ring_buffer_time_stamp(int cpu)
return
time
;
}
EXPORT_SYMBOL_GPL
(
ring_buffer_time_stamp
);
void
ring_buffer_normalize_time_stamp
(
int
cpu
,
u64
*
ts
)
{
/* Just stupid testing the normalize function and deltas */
*
ts
>>=
DEBUG_SHIFT
;
}
EXPORT_SYMBOL_GPL
(
ring_buffer_normalize_time_stamp
);
#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
#define RB_ALIGNMENT_SHIFT 2
...
...
@@ -113,8 +117,15 @@ rb_event_length(struct ring_buffer_event *event)
*/
unsigned
ring_buffer_event_length
(
struct
ring_buffer_event
*
event
)
{
return
rb_event_length
(
event
);
unsigned
length
=
rb_event_length
(
event
);
if
(
event
->
type
!=
RINGBUF_TYPE_DATA
)
return
length
;
length
-=
RB_EVNT_HDR_SIZE
;
if
(
length
>
RB_MAX_SMALL_DATA
+
sizeof
(
event
->
array
[
0
]))
length
-=
sizeof
(
event
->
array
[
0
]);
return
length
;
}
EXPORT_SYMBOL_GPL
(
ring_buffer_event_length
);
/* inline for ring buffer fast paths */
static
inline
void
*
...
...
@@ -136,6 +147,7 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
{
return
rb_event_data
(
event
);
}
EXPORT_SYMBOL_GPL
(
ring_buffer_event_data
);
#define for_each_buffer_cpu(buffer, cpu) \
for_each_cpu_mask(cpu, buffer->cpumask)
...
...
@@ -381,7 +393,7 @@ extern int ring_buffer_page_too_big(void);
/**
* ring_buffer_alloc - allocate a new ring_buffer
* @size: the size in bytes that is needed.
* @size: the size in bytes
per cpu
that is needed.
* @flags: attributes to set for the ring buffer.
*
* Currently the only flag that is available is the RB_FL_OVERWRITE
...
...
@@ -444,6 +456,7 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
kfree
(
buffer
);
return
NULL
;
}
EXPORT_SYMBOL_GPL
(
ring_buffer_alloc
);
/**
* ring_buffer_free - free a ring buffer.
...
...
@@ -459,6 +472,7 @@ ring_buffer_free(struct ring_buffer *buffer)
kfree
(
buffer
);
}
EXPORT_SYMBOL_GPL
(
ring_buffer_free
);
static
void
rb_reset_cpu
(
struct
ring_buffer_per_cpu
*
cpu_buffer
);
...
...
@@ -620,6 +634,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
mutex_unlock
(
&
buffer
->
mutex
);
return
-
ENOMEM
;
}
EXPORT_SYMBOL_GPL
(
ring_buffer_resize
);
static
inline
int
rb_null_event
(
struct
ring_buffer_event
*
event
)
{
...
...
@@ -1220,6 +1235,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
preempt_enable_notrace
();
return
NULL
;
}
EXPORT_SYMBOL_GPL
(
ring_buffer_lock_reserve
);
static
void
rb_commit
(
struct
ring_buffer_per_cpu
*
cpu_buffer
,
struct
ring_buffer_event
*
event
)
...
...
@@ -1269,6 +1285,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
return
0
;
}
EXPORT_SYMBOL_GPL
(
ring_buffer_unlock_commit
);
/**
* ring_buffer_write - write data to the buffer without reserving
...
...
@@ -1334,6 +1351,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
return
ret
;
}
EXPORT_SYMBOL_GPL
(
ring_buffer_write
);
static
inline
int
rb_per_cpu_empty
(
struct
ring_buffer_per_cpu
*
cpu_buffer
)
{
...
...
@@ -1360,6 +1378,7 @@ void ring_buffer_record_disable(struct ring_buffer *buffer)
{
atomic_inc
(
&
buffer
->
record_disabled
);
}
EXPORT_SYMBOL_GPL
(
ring_buffer_record_disable
);
/**
* ring_buffer_record_enable - enable writes to the buffer
...
...
@@ -1372,6 +1391,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer)
{
atomic_dec
(
&
buffer
->
record_disabled
);
}
EXPORT_SYMBOL_GPL
(
ring_buffer_record_enable
);
/**
* ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
...
...
@@ -1393,6 +1413,7 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
cpu_buffer
=
buffer
->
buffers
[
cpu
];
atomic_inc
(
&
cpu_buffer
->
record_disabled
);
}
EXPORT_SYMBOL_GPL
(
ring_buffer_record_disable_cpu
);
/**
* ring_buffer_record_enable_cpu - enable writes to the buffer
...
...
@@ -1412,6 +1433,7 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
cpu_buffer
=
buffer
->
buffers
[
cpu
];
atomic_dec
(
&
cpu_buffer
->
record_disabled
);
}
EXPORT_SYMBOL_GPL
(
ring_buffer_record_enable_cpu
);
/**
* ring_buffer_entries_cpu - get the number of entries in a cpu buffer
...
...
@@ -1428,6 +1450,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
cpu_buffer
=
buffer
->
buffers
[
cpu
];
return
cpu_buffer
->
entries
;
}
EXPORT_SYMBOL_GPL
(
ring_buffer_entries_cpu
);
/**
* ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
...
...
@@ -1444,6 +1467,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
cpu_buffer
=
buffer
->
buffers
[
cpu
];
return
cpu_buffer
->
overrun
;
}
EXPORT_SYMBOL_GPL
(
ring_buffer_overrun_cpu
);
/**
* ring_buffer_entries - get the number of entries in a buffer
...
...
@@ -1466,6 +1490,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
return
entries
;
}
EXPORT_SYMBOL_GPL
(
ring_buffer_entries
);
/**
* ring_buffer_overrun_cpu - get the number of overruns in buffer
...
...
@@ -1488,6 +1513,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
return
overruns
;
}
EXPORT_SYMBOL_GPL
(
ring_buffer_overruns
);
/**
* ring_buffer_iter_reset - reset an iterator
...
...
@@ -1513,6 +1539,7 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
else
iter
->
read_stamp
=
iter
->
head_page
->
time_stamp
;
}
EXPORT_SYMBOL_GPL
(
ring_buffer_iter_reset
);
/**
* ring_buffer_iter_empty - check if an iterator has no more to read
...
...
@@ -1527,6 +1554,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
return
iter
->
head_page
==
cpu_buffer
->
commit_page
&&
iter
->
head
==
rb_commit_index
(
cpu_buffer
);
}
EXPORT_SYMBOL_GPL
(
ring_buffer_iter_empty
);
static
void
rb_update_read_stamp
(
struct
ring_buffer_per_cpu
*
cpu_buffer
,
...
...
@@ -1797,6 +1825,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
return
NULL
;
}
EXPORT_SYMBOL_GPL
(
ring_buffer_peek
);
/**
* ring_buffer_iter_peek - peek at the next event to be read
...
...
@@ -1867,6 +1896,7 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
return
NULL
;
}
EXPORT_SYMBOL_GPL
(
ring_buffer_iter_peek
);
/**
* ring_buffer_consume - return an event and consume it
...
...
@@ -1894,6 +1924,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
return
event
;
}
EXPORT_SYMBOL_GPL
(
ring_buffer_consume
);
/**
* ring_buffer_read_start - start a non consuming read of the buffer
...
...
@@ -1934,6 +1965,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
return
iter
;
}
EXPORT_SYMBOL_GPL
(
ring_buffer_read_start
);
/**
* ring_buffer_finish - finish reading the iterator of the buffer
...
...
@@ -1950,6 +1982,7 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter)
atomic_dec
(
&
cpu_buffer
->
record_disabled
);
kfree
(
iter
);
}
EXPORT_SYMBOL_GPL
(
ring_buffer_read_finish
);
/**
* ring_buffer_read - read the next item in the ring buffer by the iterator
...
...
@@ -1971,6 +2004,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
return
event
;
}
EXPORT_SYMBOL_GPL
(
ring_buffer_read
);
/**
* ring_buffer_size - return the size of the ring buffer (in bytes)
...
...
@@ -1980,6 +2014,7 @@ unsigned long ring_buffer_size(struct ring_buffer *buffer)
{
return
BUF_PAGE_SIZE
*
buffer
->
pages
;
}
EXPORT_SYMBOL_GPL
(
ring_buffer_size
);
static
void
rb_reset_cpu
(
struct
ring_buffer_per_cpu
*
cpu_buffer
)
...
...
@@ -2022,6 +2057,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
spin_unlock_irqrestore
(
&
cpu_buffer
->
lock
,
flags
);
}
EXPORT_SYMBOL_GPL
(
ring_buffer_reset_cpu
);
/**
* ring_buffer_reset - reset a ring buffer
...
...
@@ -2034,6 +2070,7 @@ void ring_buffer_reset(struct ring_buffer *buffer)
for_each_buffer_cpu
(
buffer
,
cpu
)
ring_buffer_reset_cpu
(
buffer
,
cpu
);
}
EXPORT_SYMBOL_GPL
(
ring_buffer_reset
);
/**
* rind_buffer_empty - is the ring buffer empty?
...
...
@@ -2052,6 +2089,7 @@ int ring_buffer_empty(struct ring_buffer *buffer)
}
return
1
;
}
EXPORT_SYMBOL_GPL
(
ring_buffer_empty
);
/**
* ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
...
...
@@ -2068,6 +2106,7 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
cpu_buffer
=
buffer
->
buffers
[
cpu
];
return
rb_per_cpu_empty
(
cpu_buffer
);
}
EXPORT_SYMBOL_GPL
(
ring_buffer_empty_cpu
);
/**
* ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
...
...
@@ -2117,6 +2156,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
return
0
;
}
EXPORT_SYMBOL_GPL
(
ring_buffer_swap_cpu
);
static
ssize_t
rb_simple_read
(
struct
file
*
filp
,
char
__user
*
ubuf
,
...
...
kernel/trace/trace.c
View file @
d2852b93
...
...
@@ -914,7 +914,7 @@ enum trace_file_type {
TRACE_FILE_LAT_FMT
=
1
,
};
static
void
trace_iterator_increment
(
struct
trace_iterator
*
iter
,
int
cpu
)
static
void
trace_iterator_increment
(
struct
trace_iterator
*
iter
)
{
/* Don't allow ftrace to trace into the ring buffers */
ftrace_disable_cpu
();
...
...
@@ -993,7 +993,7 @@ static void *find_next_entry_inc(struct trace_iterator *iter)
iter
->
ent
=
__find_next_entry
(
iter
,
&
iter
->
cpu
,
&
iter
->
ts
);
if
(
iter
->
ent
)
trace_iterator_increment
(
iter
,
iter
->
cpu
);
trace_iterator_increment
(
iter
);
return
iter
->
ent
?
iter
:
NULL
;
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment