Commit d8f2835a authored by Xuan Zhuo's avatar Xuan Zhuo Committed by Jakub Kicinski

virtio_net: introduce receive_mergeable_xdp()

The purpose of this patch is to simplify the receive_mergeable().
Separate all the logic of XDP into a function.
Signed-off-by: default avatarXuan Zhuo <xuanzhuo@linux.alibaba.com>
Acked-by: default avatarJason Wang <jasowang@redhat.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 4cb00b13
...@@ -1308,6 +1308,66 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi, ...@@ -1308,6 +1308,66 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
return page_address(*page) + VIRTIO_XDP_HEADROOM; return page_address(*page) + VIRTIO_XDP_HEADROOM;
} }
static struct sk_buff *receive_mergeable_xdp(struct net_device *dev,
struct virtnet_info *vi,
struct receive_queue *rq,
struct bpf_prog *xdp_prog,
void *buf,
void *ctx,
unsigned int len,
unsigned int *xdp_xmit,
struct virtnet_rq_stats *stats)
{
struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
struct page *page = virt_to_head_page(buf);
int offset = buf - page_address(page);
unsigned int xdp_frags_truesz = 0;
struct sk_buff *head_skb;
unsigned int frame_sz;
struct xdp_buff xdp;
void *data;
u32 act;
int err;
data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page,
offset, &len, hdr);
if (unlikely(!data))
goto err_xdp;
err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
&num_buf, &xdp_frags_truesz, stats);
if (unlikely(err))
goto err_xdp;
act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
switch (act) {
case XDP_PASS:
head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
if (unlikely(!head_skb))
break;
return head_skb;
case XDP_TX:
case XDP_REDIRECT:
return NULL;
default:
break;
}
put_xdp_frags(&xdp);
err_xdp:
put_page(page);
mergeable_buf_free(rq, num_buf, dev, stats);
stats->xdp_drops++;
stats->drops++;
return NULL;
}
static struct sk_buff *receive_mergeable(struct net_device *dev, static struct sk_buff *receive_mergeable(struct net_device *dev,
struct virtnet_info *vi, struct virtnet_info *vi,
struct receive_queue *rq, struct receive_queue *rq,
...@@ -1327,8 +1387,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, ...@@ -1327,8 +1387,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
unsigned int headroom = mergeable_ctx_to_headroom(ctx); unsigned int headroom = mergeable_ctx_to_headroom(ctx);
unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
unsigned int frame_sz;
int err;
head_skb = NULL; head_skb = NULL;
stats->bytes += len - vi->hdr_len; stats->bytes += len - vi->hdr_len;
...@@ -1348,41 +1406,10 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, ...@@ -1348,41 +1406,10 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
rcu_read_lock(); rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog); xdp_prog = rcu_dereference(rq->xdp_prog);
if (xdp_prog) { if (xdp_prog) {
unsigned int xdp_frags_truesz = 0; head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx,
struct xdp_buff xdp; len, xdp_xmit, stats);
void *data; rcu_read_unlock();
u32 act; return head_skb;
data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz,
&num_buf, &page, offset, &len, hdr);
if (unlikely(!data))
goto err_xdp;
err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
&num_buf, &xdp_frags_truesz, stats);
if (unlikely(err))
goto err_xdp;
act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
switch (act) {
case XDP_PASS:
head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
if (unlikely(!head_skb))
goto err_xdp_frags;
rcu_read_unlock();
return head_skb;
case XDP_TX:
case XDP_REDIRECT:
rcu_read_unlock();
goto xdp_xmit;
default:
break;
}
err_xdp_frags:
put_xdp_frags(&xdp);
goto err_xdp;
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -1452,9 +1479,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, ...@@ -1452,9 +1479,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
return head_skb; return head_skb;
err_xdp:
rcu_read_unlock();
stats->xdp_drops++;
err_skb: err_skb:
put_page(page); put_page(page);
mergeable_buf_free(rq, num_buf, dev, stats); mergeable_buf_free(rq, num_buf, dev, stats);
...@@ -1462,7 +1486,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, ...@@ -1462,7 +1486,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
err_buf: err_buf:
stats->drops++; stats->drops++;
dev_kfree_skb(head_skb); dev_kfree_skb(head_skb);
xdp_xmit:
return NULL; return NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment