Commit 5db48105 authored by Xuan Zhuo's avatar Xuan Zhuo Committed by Jakub Kicinski

virtio_net: separate receive_mergeable

This commit separates the function receive_mergeable(),
put the logic of appending frag to the skb as an independent function.
The subsequent commit will reuse it.
Signed-off-by: default avatarXuan Zhuo <xuanzhuo@linux.alibaba.com>
Acked-by: default avatarJason Wang <jasowang@redhat.com>
Acked-by: default avatarMichael S. Tsirkin <mst@redhat.com>
Link: https://patch.msgid.link/20240708112537.96291-6-xuanzhuo@linux.alibaba.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent c86c120f
...@@ -1821,6 +1821,49 @@ static struct sk_buff *receive_mergeable_xdp(struct net_device *dev, ...@@ -1821,6 +1821,49 @@ static struct sk_buff *receive_mergeable_xdp(struct net_device *dev,
return NULL; return NULL;
} }
static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb,
struct sk_buff *curr_skb,
struct page *page, void *buf,
int len, int truesize)
{
int num_skb_frags;
int offset;
num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
if (unlikely(!nskb))
return NULL;
if (curr_skb == head_skb)
skb_shinfo(curr_skb)->frag_list = nskb;
else
curr_skb->next = nskb;
curr_skb = nskb;
head_skb->truesize += nskb->truesize;
num_skb_frags = 0;
}
if (curr_skb != head_skb) {
head_skb->data_len += len;
head_skb->len += len;
head_skb->truesize += truesize;
}
offset = buf - page_address(page);
if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
put_page(page);
skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
len, truesize);
} else {
skb_add_rx_frag(curr_skb, num_skb_frags, page,
offset, len, truesize);
}
return curr_skb;
}
static struct sk_buff *receive_mergeable(struct net_device *dev, static struct sk_buff *receive_mergeable(struct net_device *dev,
struct virtnet_info *vi, struct virtnet_info *vi,
struct receive_queue *rq, struct receive_queue *rq,
...@@ -1870,8 +1913,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, ...@@ -1870,8 +1913,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
if (unlikely(!curr_skb)) if (unlikely(!curr_skb))
goto err_skb; goto err_skb;
while (--num_buf) { while (--num_buf) {
int num_skb_frags;
buf = virtnet_rq_get_buf(rq, &len, &ctx); buf = virtnet_rq_get_buf(rq, &len, &ctx);
if (unlikely(!buf)) { if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers out of %d missing\n", pr_debug("%s: rx error: %d buffers out of %d missing\n",
...@@ -1896,34 +1937,10 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, ...@@ -1896,34 +1937,10 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
goto err_skb; goto err_skb;
} }
num_skb_frags = skb_shinfo(curr_skb)->nr_frags; curr_skb = virtnet_skb_append_frag(head_skb, curr_skb, page,
if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { buf, len, truesize);
struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); if (!curr_skb)
goto err_skb;
if (unlikely(!nskb))
goto err_skb;
if (curr_skb == head_skb)
skb_shinfo(curr_skb)->frag_list = nskb;
else
curr_skb->next = nskb;
curr_skb = nskb;
head_skb->truesize += nskb->truesize;
num_skb_frags = 0;
}
if (curr_skb != head_skb) {
head_skb->data_len += len;
head_skb->len += len;
head_skb->truesize += truesize;
}
offset = buf - page_address(page);
if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
put_page(page);
skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
len, truesize);
} else {
skb_add_rx_frag(curr_skb, num_skb_frags, page,
offset, len, truesize);
}
} }
ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment