perf evlist: Drop redundant evsel->overwrite indicator

evsel->overwrite indicator means an event should be put into
overwritable ring buffer. In current implementation, it equals to
evsel->attr.write_backward. To reduce compliexity, remove
evsel->overwrite, use evsel->attr.write_backward instead.

In addition, in __perf_evsel__open(), if kernel doesn't support
write_backward and user explicitly set it in evsel, don't fallback
like other missing feature, since it is meaningless to fall back to
a forward ring buffer in this case: we are unable to stably read
from an forward overwritable ring buffer.

Cc: He Kuang <hekuang@huawei.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Nilay Vaish <nilayvaish@gmail.com>
Cc: Wang Nan <wangnan0@huawei.com>
Cc: Zefan Li <lizefan@huawei.com>
Cc: pi3orama@163.com
Link: http://lkml.kernel.org/r/1468485287-33422-2-git-send-email-wangnan0@huawei.comSigned-off-by: default avatarWang Nan <wangnan0@huawei.com>
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent db49120a
......@@ -101,6 +101,7 @@ int test__backward_ring_buffer(int subtest __maybe_unused)
return TEST_FAIL;
}
evlist->backward = true;
err = perf_evlist__create_maps(evlist, &opts.target);
if (err < 0) {
pr_debug("Not enough memory to create thread/cpu maps\n");
......
......@@ -1003,7 +1003,7 @@ static bool
perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
struct perf_evsel *evsel)
{
if (evsel->overwrite)
if (evsel->attr.write_backward)
return false;
return true;
}
......@@ -1018,7 +1018,7 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
evlist__for_each_entry(evlist, evsel) {
int fd;
if (evsel->overwrite != (evlist->overwrite && evlist->backward))
if (!!evsel->attr.write_backward != (evlist->overwrite && evlist->backward))
continue;
if (evsel->system_wide && thread)
......
......@@ -1377,6 +1377,9 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
int pid = -1, err;
enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
if (perf_missing_features.write_backward && evsel->attr.write_backward)
return -EINVAL;
if (evsel->system_wide)
nthreads = 1;
else
......@@ -1407,11 +1410,6 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
if (perf_missing_features.lbr_flags)
evsel->attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
PERF_SAMPLE_BRANCH_NO_CYCLES);
if (perf_missing_features.write_backward) {
if (evsel->overwrite)
return -EINVAL;
evsel->attr.write_backward = false;
}
retry_sample_id:
if (perf_missing_features.sample_id_all)
evsel->attr.sample_id_all = 0;
......@@ -1513,7 +1511,7 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
*/
if (!perf_missing_features.write_backward && evsel->attr.write_backward) {
perf_missing_features.write_backward = true;
goto fallback_missing_features;
goto out_close;
} else if (!perf_missing_features.clockid_wrong && evsel->attr.use_clockid) {
perf_missing_features.clockid_wrong = true;
goto fallback_missing_features;
......@@ -2422,7 +2420,7 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
"We found oprofile daemon running, please stop it and try again.");
break;
case EINVAL:
if (evsel->overwrite && perf_missing_features.write_backward)
if (evsel->attr.write_backward && perf_missing_features.write_backward)
return scnprintf(msg, size, "Reading from overwrite event is not supported by this kernel.");
if (perf_missing_features.clockid)
return scnprintf(msg, size, "clockid feature not supported.");
......
......@@ -114,7 +114,6 @@ struct perf_evsel {
bool tracking;
bool per_pkg;
bool precise_max;
bool overwrite;
/* parse modifier helper */
int exclude_GH;
int nr_members;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment