Commit 0a4e38e6 authored by Alexander Shishkin's avatar Alexander Shishkin Committed by Ingo Molnar

perf: Support high-order allocations for AUX space

Some pmus (such as BTS or Intel PT without multiple-entry ToPA capability)
don't support scatter-gather and will prefer larger contiguous areas for
their output regions.

This patch adds a new pmu capability to request higher order allocations.
Signed-off-by: default avatarAlexander Shishkin <alexander.shishkin@linux.intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kaixu Xia <kaixu.xia@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Robert Richter <rric@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: acme@infradead.org
Cc: adrian.hunter@intel.com
Cc: kan.liang@intel.com
Cc: markus.t.metzger@intel.com
Cc: mathieu.poirier@linaro.org
Link: http://lkml.kernel.org/r/1421237903-181015-4-git-send-email-alexander.shishkin@linux.intel.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 45bfb2e5
...@@ -174,6 +174,7 @@ struct perf_event; ...@@ -174,6 +174,7 @@ struct perf_event;
*/ */
#define PERF_PMU_CAP_NO_INTERRUPT 0x01 #define PERF_PMU_CAP_NO_INTERRUPT 0x01
#define PERF_PMU_CAP_NO_NMI 0x02 #define PERF_PMU_CAP_NO_NMI 0x02
#define PERF_PMU_CAP_AUX_NO_SG 0x04
/** /**
* struct pmu - generic performance monitoring unit * struct pmu - generic performance monitoring unit
......
...@@ -243,30 +243,74 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) ...@@ -243,30 +243,74 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
spin_lock_init(&rb->event_lock); spin_lock_init(&rb->event_lock);
} }
#define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
static struct page *rb_alloc_aux_page(int node, int order)
{
struct page *page;
if (order > MAX_ORDER)
order = MAX_ORDER;
do {
page = alloc_pages_node(node, PERF_AUX_GFP, order);
} while (!page && order--);
if (page && order) {
/*
* Communicate the allocation size to the driver
*/
split_page(page, order);
SetPagePrivate(page);
set_page_private(page, order);
}
return page;
}
static void rb_free_aux_page(struct ring_buffer *rb, int idx)
{
struct page *page = virt_to_page(rb->aux_pages[idx]);
ClearPagePrivate(page);
page->mapping = NULL;
__free_page(page);
}
int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
pgoff_t pgoff, int nr_pages, int flags) pgoff_t pgoff, int nr_pages, int flags)
{ {
bool overwrite = !(flags & RING_BUFFER_WRITABLE); bool overwrite = !(flags & RING_BUFFER_WRITABLE);
int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu); int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
int ret = -ENOMEM; int ret = -ENOMEM, max_order = 0;
if (!has_aux(event)) if (!has_aux(event))
return -ENOTSUPP; return -ENOTSUPP;
if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG)
/*
* We need to start with the max_order that fits in nr_pages,
* not the other way around, hence ilog2() and not get_order.
*/
max_order = ilog2(nr_pages);
rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node); rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node);
if (!rb->aux_pages) if (!rb->aux_pages)
return -ENOMEM; return -ENOMEM;
rb->free_aux = event->pmu->free_aux; rb->free_aux = event->pmu->free_aux;
for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages; for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
rb->aux_nr_pages++) {
struct page *page; struct page *page;
int last, order;
page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
page = rb_alloc_aux_page(node, order);
if (!page) if (!page)
goto out; goto out;
rb->aux_pages[rb->aux_nr_pages] = page_address(page); for (last = rb->aux_nr_pages + (1 << page_private(page));
last > rb->aux_nr_pages; rb->aux_nr_pages++)
rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
} }
rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages, rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
...@@ -304,7 +348,7 @@ static void __rb_free_aux(struct ring_buffer *rb) ...@@ -304,7 +348,7 @@ static void __rb_free_aux(struct ring_buffer *rb)
} }
for (pg = 0; pg < rb->aux_nr_pages; pg++) for (pg = 0; pg < rb->aux_nr_pages; pg++)
free_page((unsigned long)rb->aux_pages[pg]); rb_free_aux_page(rb, pg);
kfree(rb->aux_pages); kfree(rb->aux_pages);
rb->aux_nr_pages = 0; rb->aux_nr_pages = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment