perf evsel: Introduce config attr method

Out of the code in 'perf record', so that we can share option parsing,
etc. Eventually will be used by 'perf top', but first 'trace' will use
it.

Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-hzjqsgnte1esk90ytq0ap98v@git.kernel.orgSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent a8c9ae18
This diff is collapsed.
...@@ -185,4 +185,24 @@ extern const char perf_version_string[]; ...@@ -185,4 +185,24 @@ extern const char perf_version_string[];
void pthread__unblock_sigwinch(void); void pthread__unblock_sigwinch(void);
struct perf_record_opts {
pid_t target_pid;
pid_t target_tid;
bool call_graph;
bool inherit_stat;
bool no_delay;
bool no_inherit;
bool no_samples;
bool raw_samples;
bool sample_address;
bool sample_time;
bool sample_id_all_avail;
bool system_wide;
unsigned int freq;
unsigned int user_freq;
u64 default_interval;
u64 user_interval;
const char *cpu_list;
};
#endif #endif
...@@ -46,6 +46,22 @@ struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, ...@@ -46,6 +46,22 @@ struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
return evlist; return evlist;
} }
void perf_evlist__config_attrs(struct perf_evlist *evlist,
struct perf_record_opts *opts)
{
struct perf_evsel *evsel;
if (evlist->cpus->map[0] < 0)
opts->no_inherit = true;
list_for_each_entry(evsel, &evlist->entries, node) {
perf_evsel__config(evsel, opts);
if (evlist->nr_entries > 1)
evsel->attr.sample_type |= PERF_SAMPLE_ID;
}
}
static void perf_evlist__purge(struct perf_evlist *evlist) static void perf_evlist__purge(struct perf_evlist *evlist)
{ {
struct perf_evsel *pos, *n; struct perf_evsel *pos, *n;
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
struct pollfd; struct pollfd;
struct thread_map; struct thread_map;
struct cpu_map; struct cpu_map;
struct perf_record_opts;
#define PERF_EVLIST__HLIST_BITS 8 #define PERF_EVLIST__HLIST_BITS 8
#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS) #define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
...@@ -64,6 +65,9 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx); ...@@ -64,6 +65,9 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx);
int perf_evlist__open(struct perf_evlist *evlist, bool group); int perf_evlist__open(struct perf_evlist *evlist, bool group);
void perf_evlist__config_attrs(struct perf_evlist *evlist,
struct perf_record_opts *opts);
int perf_evlist__alloc_mmap(struct perf_evlist *evlist); int perf_evlist__alloc_mmap(struct perf_evlist *evlist);
int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite); int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite);
void perf_evlist__munmap(struct perf_evlist *evlist); void perf_evlist__munmap(struct perf_evlist *evlist);
......
...@@ -53,6 +53,76 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx) ...@@ -53,6 +53,76 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
return evsel; return evsel;
} }
void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts)
{
struct perf_event_attr *attr = &evsel->attr;
int track = !evsel->idx; /* only the first counter needs these */
attr->sample_id_all = opts->sample_id_all_avail ? 1 : 0;
attr->inherit = !opts->no_inherit;
attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
PERF_FORMAT_TOTAL_TIME_RUNNING |
PERF_FORMAT_ID;
attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
/*
* We default some events to a 1 default interval. But keep
* it a weak assumption overridable by the user.
*/
if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
opts->user_interval != ULLONG_MAX)) {
if (opts->freq) {
attr->sample_type |= PERF_SAMPLE_PERIOD;
attr->freq = 1;
attr->sample_freq = opts->freq;
} else {
attr->sample_period = opts->default_interval;
}
}
if (opts->no_samples)
attr->sample_freq = 0;
if (opts->inherit_stat)
attr->inherit_stat = 1;
if (opts->sample_address) {
attr->sample_type |= PERF_SAMPLE_ADDR;
attr->mmap_data = track;
}
if (opts->call_graph)
attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
if (opts->system_wide)
attr->sample_type |= PERF_SAMPLE_CPU;
if (opts->sample_id_all_avail &&
(opts->sample_time || opts->system_wide ||
!opts->no_inherit || opts->cpu_list))
attr->sample_type |= PERF_SAMPLE_TIME;
if (opts->raw_samples) {
attr->sample_type |= PERF_SAMPLE_TIME;
attr->sample_type |= PERF_SAMPLE_RAW;
attr->sample_type |= PERF_SAMPLE_CPU;
}
if (opts->no_delay) {
attr->watermark = 0;
attr->wakeup_events = 1;
}
attr->mmap = track;
attr->comm = track;
if (opts->target_pid == -1 && opts->target_tid == -1 && !opts->system_wide) {
attr->disabled = 1;
attr->enable_on_exec = 1;
}
}
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
{ {
int cpu, thread; int cpu, thread;
......
...@@ -67,6 +67,7 @@ struct perf_evsel { ...@@ -67,6 +67,7 @@ struct perf_evsel {
struct cpu_map; struct cpu_map;
struct thread_map; struct thread_map;
struct perf_evlist; struct perf_evlist;
struct perf_record_opts;
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx); struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx);
void perf_evsel__init(struct perf_evsel *evsel, void perf_evsel__init(struct perf_evsel *evsel,
...@@ -74,6 +75,9 @@ void perf_evsel__init(struct perf_evsel *evsel, ...@@ -74,6 +75,9 @@ void perf_evsel__init(struct perf_evsel *evsel,
void perf_evsel__exit(struct perf_evsel *evsel); void perf_evsel__exit(struct perf_evsel *evsel);
void perf_evsel__delete(struct perf_evsel *evsel); void perf_evsel__delete(struct perf_evsel *evsel);
void perf_evsel__config(struct perf_evsel *evsel,
struct perf_record_opts *opts);
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads); int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads); int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus); int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment