Commit 1c194998 authored by Lorenzo Bianconi's avatar Lorenzo Bianconi Committed by Alexei Starovoitov

bpf: introduce frags support to bpf_prog_test_run_xdp()

Introduce the capability to allocate a xdp frags in
bpf_prog_test_run_xdp routine. This is a preliminary patch to
introduce the selftests for new xdp frags ebpf helpers
Acked-by: default avatarToke Hoiland-Jorgensen <toke@redhat.com>
Acked-by: default avatarJohn Fastabend <john.fastabend@gmail.com>
Signed-off-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Link: https://lore.kernel.org/r/b7c0e425a9287f00f601c4fc0de54738ec6ceeea.1642758637.git.lorenzo@kernel.orgSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent be3d72a2
...@@ -876,16 +876,16 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, ...@@ -876,16 +876,16 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr) union bpf_attr __user *uattr)
{ {
u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
u32 headroom = XDP_PACKET_HEADROOM;
u32 size = kattr->test.data_size_in; u32 size = kattr->test.data_size_in;
u32 headroom = XDP_PACKET_HEADROOM;
u32 retval, duration, max_data_sz;
u32 repeat = kattr->test.repeat; u32 repeat = kattr->test.repeat;
struct netdev_rx_queue *rxqueue; struct netdev_rx_queue *rxqueue;
struct skb_shared_info *sinfo;
struct xdp_buff xdp = {}; struct xdp_buff xdp = {};
u32 retval, duration; int i, ret = -EINVAL;
struct xdp_md *ctx; struct xdp_md *ctx;
u32 max_data_sz;
void *data; void *data;
int ret = -EINVAL;
if (prog->expected_attach_type == BPF_XDP_DEVMAP || if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
prog->expected_attach_type == BPF_XDP_CPUMAP) prog->expected_attach_type == BPF_XDP_CPUMAP)
...@@ -905,27 +905,60 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, ...@@ -905,27 +905,60 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
headroom -= ctx->data; headroom -= ctx->data;
} }
/* XDP have extra tailroom as (most) drivers use full page */
max_data_sz = 4096 - headroom - tailroom; max_data_sz = 4096 - headroom - tailroom;
size = min_t(u32, size, max_data_sz);
data = bpf_test_init(kattr, kattr->test.data_size_in, data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom);
max_data_sz, headroom, tailroom);
if (IS_ERR(data)) { if (IS_ERR(data)) {
ret = PTR_ERR(data); ret = PTR_ERR(data);
goto free_ctx; goto free_ctx;
} }
rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
xdp_init_buff(&xdp, headroom + max_data_sz + tailroom, rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom;
&rxqueue->xdp_rxq); xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq);
xdp_prepare_buff(&xdp, data, headroom, size, true); xdp_prepare_buff(&xdp, data, headroom, size, true);
sinfo = xdp_get_shared_info_from_buff(&xdp);
ret = xdp_convert_md_to_buff(ctx, &xdp); ret = xdp_convert_md_to_buff(ctx, &xdp);
if (ret) if (ret)
goto free_data; goto free_data;
if (unlikely(kattr->test.data_size_in > size)) {
void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
while (size < kattr->test.data_size_in) {
struct page *page;
skb_frag_t *frag;
int data_len;
page = alloc_page(GFP_KERNEL);
if (!page) {
ret = -ENOMEM;
goto out;
}
frag = &sinfo->frags[sinfo->nr_frags++];
__skb_frag_set_page(frag, page);
data_len = min_t(int, kattr->test.data_size_in - size,
PAGE_SIZE);
skb_frag_size_set(frag, data_len);
if (copy_from_user(page_address(page), data_in + size,
data_len)) {
ret = -EFAULT;
goto out;
}
sinfo->xdp_frags_size += data_len;
size += data_len;
}
xdp_buff_set_frags_flag(&xdp);
}
if (repeat > 1) if (repeat > 1)
bpf_prog_change_xdp(NULL, prog); bpf_prog_change_xdp(NULL, prog);
ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true); ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
/* We convert the xdp_buff back to an xdp_md before checking the return /* We convert the xdp_buff back to an xdp_md before checking the return
* code so the reference count of any held netdevice will be decremented * code so the reference count of any held netdevice will be decremented
...@@ -935,10 +968,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, ...@@ -935,10 +968,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
if (ret) if (ret)
goto out; goto out;
if (xdp.data_meta != data + headroom || size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size;
xdp.data_end != xdp.data_meta + size)
size = xdp.data_end - xdp.data_meta;
ret = bpf_test_finish(kattr, uattr, xdp.data_meta, size, retval, ret = bpf_test_finish(kattr, uattr, xdp.data_meta, size, retval,
duration); duration);
if (!ret) if (!ret)
...@@ -949,6 +979,8 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, ...@@ -949,6 +979,8 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
if (repeat > 1) if (repeat > 1)
bpf_prog_change_xdp(prog, NULL); bpf_prog_change_xdp(prog, NULL);
free_data: free_data:
for (i = 0; i < sinfo->nr_frags; i++)
__free_page(skb_frag_page(&sinfo->frags[i]));
kfree(data); kfree(data);
free_ctx: free_ctx:
kfree(ctx); kfree(ctx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment