Commit 8f0504a9 authored by Jesper Dangaard Brouer's avatar Jesper Dangaard Brouer Committed by Alexei Starovoitov

bpf: cpumap do bulk allocation of SKBs

As cpumap now batch consume xdp_frame's from the ptr_ring, it knows how many
SKBs it need to allocate. Thus, lets bulk allocate these SKBs via
kmem_cache_alloc_bulk() API, and use the previously introduced function
build_skb_around().

Notice that the flag __GFP_ZERO asks the slab/slub allocator to clear the
memory for us. This does clear a larger area than needed, but my micro
benchmarks on Intel CPUs show that this is slightly faster due to being a
cacheline aligned area is cleared for the SKBs. (For SLUB allocator, there
is a future optimization potential, because SKBs will with high probability
originate from same page. If we can find/identify continuous memory areas
then the Intel CPU memset rep stos will have a real performance gain.)
Signed-off-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Acked-by: default avatarSong Liu <songliubraving@fb.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent ba0509b6
...@@ -160,12 +160,12 @@ static void cpu_map_kthread_stop(struct work_struct *work) ...@@ -160,12 +160,12 @@ static void cpu_map_kthread_stop(struct work_struct *work)
} }
static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu, static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
struct xdp_frame *xdpf) struct xdp_frame *xdpf,
struct sk_buff *skb)
{ {
unsigned int hard_start_headroom; unsigned int hard_start_headroom;
unsigned int frame_size; unsigned int frame_size;
void *pkt_data_start; void *pkt_data_start;
struct sk_buff *skb;
/* Part of headroom was reserved to xdpf */ /* Part of headroom was reserved to xdpf */
hard_start_headroom = sizeof(struct xdp_frame) + xdpf->headroom; hard_start_headroom = sizeof(struct xdp_frame) + xdpf->headroom;
...@@ -191,8 +191,8 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu, ...@@ -191,8 +191,8 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
pkt_data_start = xdpf->data - hard_start_headroom; pkt_data_start = xdpf->data - hard_start_headroom;
skb = build_skb(pkt_data_start, frame_size); skb = build_skb_around(skb, pkt_data_start, frame_size);
if (!skb) if (unlikely(!skb))
return NULL; return NULL;
skb_reserve(skb, hard_start_headroom); skb_reserve(skb, hard_start_headroom);
...@@ -256,7 +256,9 @@ static int cpu_map_kthread_run(void *data) ...@@ -256,7 +256,9 @@ static int cpu_map_kthread_run(void *data)
while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) { while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) {
unsigned int drops = 0, sched = 0; unsigned int drops = 0, sched = 0;
void *frames[CPUMAP_BATCH]; void *frames[CPUMAP_BATCH];
int i, n; void *skbs[CPUMAP_BATCH];
gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
int i, n, m;
/* Release CPU reschedule checks */ /* Release CPU reschedule checks */
if (__ptr_ring_empty(rcpu->queue)) { if (__ptr_ring_empty(rcpu->queue)) {
...@@ -278,14 +280,20 @@ static int cpu_map_kthread_run(void *data) ...@@ -278,14 +280,20 @@ static int cpu_map_kthread_run(void *data)
* consume side valid as no-resize allowed of queue. * consume side valid as no-resize allowed of queue.
*/ */
n = ptr_ring_consume_batched(rcpu->queue, frames, CPUMAP_BATCH); n = ptr_ring_consume_batched(rcpu->queue, frames, CPUMAP_BATCH);
m = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, n, skbs);
if (unlikely(m == 0)) {
for (i = 0; i < n; i++)
skbs[i] = NULL; /* effect: xdp_return_frame */
drops = n;
}
local_bh_disable(); local_bh_disable();
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i]; struct xdp_frame *xdpf = frames[i];
struct sk_buff *skb; struct sk_buff *skb = skbs[i];
int ret; int ret;
skb = cpu_map_build_skb(rcpu, xdpf); skb = cpu_map_build_skb(rcpu, xdpf, skb);
if (!skb) { if (!skb) {
xdp_return_frame(xdpf); xdp_return_frame(xdpf);
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment