Commit 56a86f84 authored by Jason Wang's avatar Jason Wang Committed by David S. Miller

virtio-net: fix page miscount during XDP linearizing

We don't put page during linearizing, the would cause leaking when
xmit through XDP_TX or the packet exceeds PAGE_SIZE. Fix them by
put page accordingly. Also decrease the number of buffers during
linearizing to make sure caller can free buffers correctly when packet
exceeds PAGE_SIZE. With this patch, we won't get OOM after linearize
huge number of packets.

Cc: John Fastabend <john.r.fastabend@intel.com>
Signed-off-by: default avatarJason Wang <jasowang@redhat.com>
Acked-by: default avatarJohn Fastabend <john.r.fastabend@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 275be061
...@@ -483,7 +483,7 @@ static struct sk_buff *receive_big(struct net_device *dev, ...@@ -483,7 +483,7 @@ static struct sk_buff *receive_big(struct net_device *dev,
* anymore. * anymore.
*/ */
static struct page *xdp_linearize_page(struct receive_queue *rq, static struct page *xdp_linearize_page(struct receive_queue *rq,
u16 num_buf, u16 *num_buf,
struct page *p, struct page *p,
int offset, int offset,
unsigned int *len) unsigned int *len)
...@@ -497,7 +497,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, ...@@ -497,7 +497,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
memcpy(page_address(page) + page_off, page_address(p) + offset, *len); memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
page_off += *len; page_off += *len;
while (--num_buf) { while (--*num_buf) {
unsigned int buflen; unsigned int buflen;
unsigned long ctx; unsigned long ctx;
void *buf; void *buf;
...@@ -507,19 +507,22 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, ...@@ -507,19 +507,22 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
if (unlikely(!ctx)) if (unlikely(!ctx))
goto err_buf; goto err_buf;
buf = mergeable_ctx_to_buf_address(ctx);
p = virt_to_head_page(buf);
off = buf - page_address(p);
/* guard against a misconfigured or uncooperative backend that /* guard against a misconfigured or uncooperative backend that
* is sending packet larger than the MTU. * is sending packet larger than the MTU.
*/ */
if ((page_off + buflen) > PAGE_SIZE) if ((page_off + buflen) > PAGE_SIZE) {
put_page(p);
goto err_buf; goto err_buf;
}
buf = mergeable_ctx_to_buf_address(ctx);
p = virt_to_head_page(buf);
off = buf - page_address(p);
memcpy(page_address(page) + page_off, memcpy(page_address(page) + page_off,
page_address(p) + off, buflen); page_address(p) + off, buflen);
page_off += buflen; page_off += buflen;
put_page(p);
} }
*len = page_off; *len = page_off;
...@@ -555,7 +558,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, ...@@ -555,7 +558,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
/* This happens when rx buffer size is underestimated */ /* This happens when rx buffer size is underestimated */
if (unlikely(num_buf > 1)) { if (unlikely(num_buf > 1)) {
/* linearize data for XDP */ /* linearize data for XDP */
xdp_page = xdp_linearize_page(rq, num_buf, xdp_page = xdp_linearize_page(rq, &num_buf,
page, offset, &len); page, offset, &len);
if (!xdp_page) if (!xdp_page)
goto err_xdp; goto err_xdp;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment