Commit d40b4a15 authored by Jiri Olsa's avatar Jiri Olsa Committed by Arnaldo Carvalho de Melo

perf tools: Flush ordered events in case of allocation failure

In previous patches we added a limit for ordered events queue allocation
size. If we reach this size we need to flush (part of) the queue to get
some free buffers.

The current functionality is not affected, because the limit is hard
coded to (u64) -1. The configuration code for size will come in
following patches.
Signed-off-by: default avatarJiri Olsa <jolsa@kernel.org>
Acked-by: default avatarDavid Ahern <dsahern@gmail.com>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jean Pihet <jean.pihet@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/n/tip-ggcas0xdq847fi85bz73do2e@git.kernel.orgSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 8d99a6ce
...@@ -732,7 +732,7 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx, ...@@ -732,7 +732,7 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
return -1; return -1;
} }
err = perf_session_queue_event(kvm->session, event, &sample, 0); err = perf_session_queue_event(kvm->session, event, &kvm->tool, &sample, 0);
/* /*
* FIXME: Here we can't consume the event, as perf_session_queue_event will * FIXME: Here we can't consume the event, as perf_session_queue_event will
* point to it, and it'll get possibly overwritten by the kernel. * point to it, and it'll get possibly overwritten by the kernel.
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include "util.h" #include "util.h"
#include "cpumap.h" #include "cpumap.h"
#include "perf_regs.h" #include "perf_regs.h"
#include "asm/bug.h"
static int perf_session__open(struct perf_session *session) static int perf_session__open(struct perf_session *session)
{ {
...@@ -456,6 +457,7 @@ struct ordered_event { ...@@ -456,6 +457,7 @@ struct ordered_event {
enum oe_flush { enum oe_flush {
OE_FLUSH__FINAL, OE_FLUSH__FINAL,
OE_FLUSH__ROUND, OE_FLUSH__ROUND,
OE_FLUSH__HALF,
}; };
static void perf_session_free_sample_buffers(struct perf_session *session) static void perf_session_free_sample_buffers(struct perf_session *session)
...@@ -637,6 +639,23 @@ static int ordered_events__flush(struct perf_session *s, struct perf_tool *tool, ...@@ -637,6 +639,23 @@ static int ordered_events__flush(struct perf_session *s, struct perf_tool *tool,
oe->next_flush = ULLONG_MAX; oe->next_flush = ULLONG_MAX;
break; break;
case OE_FLUSH__HALF:
{
struct ordered_event *first, *last;
struct list_head *head = &oe->events;
first = list_entry(head->next, struct ordered_event, list);
last = oe->last;
/* Warn if we are called before any event got allocated. */
if (WARN_ONCE(!last || list_empty(head), "empty queue"))
return 0;
oe->next_flush = first->timestamp;
oe->next_flush += (last->timestamp - first->timestamp) / 2;
break;
}
case OE_FLUSH__ROUND: case OE_FLUSH__ROUND:
default: default:
break; break;
...@@ -699,7 +718,8 @@ static int process_finished_round(struct perf_tool *tool, ...@@ -699,7 +718,8 @@ static int process_finished_round(struct perf_tool *tool,
} }
int perf_session_queue_event(struct perf_session *s, union perf_event *event, int perf_session_queue_event(struct perf_session *s, union perf_event *event,
struct perf_sample *sample, u64 file_offset) struct perf_tool *tool, struct perf_sample *sample,
u64 file_offset)
{ {
struct ordered_events *oe = &s->ordered_events; struct ordered_events *oe = &s->ordered_events;
u64 timestamp = sample->time; u64 timestamp = sample->time;
...@@ -714,6 +734,11 @@ int perf_session_queue_event(struct perf_session *s, union perf_event *event, ...@@ -714,6 +734,11 @@ int perf_session_queue_event(struct perf_session *s, union perf_event *event,
} }
new = ordered_events__new(oe, timestamp); new = ordered_events__new(oe, timestamp);
if (!new) {
ordered_events__flush(s, tool, OE_FLUSH__HALF);
new = ordered_events__new(oe, timestamp);
}
if (!new) if (!new)
return -ENOMEM; return -ENOMEM;
...@@ -1121,7 +1146,7 @@ static s64 perf_session__process_event(struct perf_session *session, ...@@ -1121,7 +1146,7 @@ static s64 perf_session__process_event(struct perf_session *session,
return ret; return ret;
if (tool->ordered_events) { if (tool->ordered_events) {
ret = perf_session_queue_event(session, event, &sample, ret = perf_session_queue_event(session, event, tool, &sample,
file_offset); file_offset);
if (ret != -ETIME) if (ret != -ETIME)
return ret; return ret;
......
...@@ -67,7 +67,8 @@ int perf_session__process_events(struct perf_session *session, ...@@ -67,7 +67,8 @@ int perf_session__process_events(struct perf_session *session,
struct perf_tool *tool); struct perf_tool *tool);
int perf_session_queue_event(struct perf_session *s, union perf_event *event, int perf_session_queue_event(struct perf_session *s, union perf_event *event,
struct perf_sample *sample, u64 file_offset); struct perf_tool *tool, struct perf_sample *sample,
u64 file_offset);
void perf_tool__fill_defaults(struct perf_tool *tool); void perf_tool__fill_defaults(struct perf_tool *tool);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment