Commit 07940293 authored by Adrian Hunter's avatar Adrian Hunter Committed by Arnaldo Carvalho de Melo

perf callchain: Remove unnecessary validation

Now that the sample parsing correctly checks data sizes there is no
reason for it to be done again for callchains.
Signed-off-by: default avatarAdrian Hunter <adrian.hunter@intel.com>
Acked-by: default avatarNamhyung Kim <namhyung@kernel.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Namhyung Kim <namhyung@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/r/1377591794-30553-4-git-send-email-adrian.hunter@intel.comSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 03b6ea9b
...@@ -21,14 +21,6 @@ ...@@ -21,14 +21,6 @@
__thread struct callchain_cursor callchain_cursor; __thread struct callchain_cursor callchain_cursor;
bool ip_callchain__valid(struct ip_callchain *chain,
const union perf_event *event)
{
unsigned int chain_size = event->header.size;
chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event;
return chain->nr * sizeof(u64) <= chain_size;
}
#define chain_for_each_child(child, parent) \ #define chain_for_each_child(child, parent) \
list_for_each_entry(child, &parent->children, siblings) list_for_each_entry(child, &parent->children, siblings)
......
...@@ -109,11 +109,6 @@ int callchain_append(struct callchain_root *root, ...@@ -109,11 +109,6 @@ int callchain_append(struct callchain_root *root,
int callchain_merge(struct callchain_cursor *cursor, int callchain_merge(struct callchain_cursor *cursor,
struct callchain_root *dst, struct callchain_root *src); struct callchain_root *dst, struct callchain_root *src);
struct ip_callchain;
union perf_event;
bool ip_callchain__valid(struct ip_callchain *chain,
const union perf_event *event);
/* /*
* Initialize a cursor before adding entries inside, but keep * Initialize a cursor before adding entries inside, but keep
* the previously allocated entries as a cache. * the previously allocated entries as a cache.
......
...@@ -997,22 +997,6 @@ static int perf_session_deliver_event(struct perf_session *session, ...@@ -997,22 +997,6 @@ static int perf_session_deliver_event(struct perf_session *session,
} }
} }
static int perf_session__preprocess_sample(struct perf_session *session,
union perf_event *event, struct perf_sample *sample)
{
if (event->header.type != PERF_RECORD_SAMPLE ||
!(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_CALLCHAIN))
return 0;
if (!ip_callchain__valid(sample->callchain, event)) {
pr_debug("call-chain problem with event, skipping it.\n");
++session->stats.nr_invalid_chains;
session->stats.total_invalid_chains += sample->period;
return -EINVAL;
}
return 0;
}
static int perf_session__process_user_event(struct perf_session *session, union perf_event *event, static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
struct perf_tool *tool, u64 file_offset) struct perf_tool *tool, u64 file_offset)
{ {
...@@ -1075,10 +1059,6 @@ static int perf_session__process_event(struct perf_session *session, ...@@ -1075,10 +1059,6 @@ static int perf_session__process_event(struct perf_session *session,
if (ret) if (ret)
return ret; return ret;
/* Preprocess sample records - precheck callchains */
if (perf_session__preprocess_sample(session, event, &sample))
return 0;
if (tool->ordered_samples) { if (tool->ordered_samples) {
ret = perf_session_queue_event(session, event, &sample, ret = perf_session_queue_event(session, event, &sample,
file_offset); file_offset);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment