Commit 2311e06b authored by Xuan Zhuo's avatar Xuan Zhuo Committed by Jakub Kicinski

virtio_net: fix missing dma unmap for resize

For rq, we have three cases getting buffers from virtio core:

1. virtqueue_get_buf{,_ctx}
2. virtqueue_detach_unused_buf
3. callback for virtqueue_resize

But in commit 295525e2("virtio_net: merge dma operations when
filling mergeable buffers"), I missed the dma unmap for the #3 case.

That will leak some memory, because I did not release the pages referred
by the unused buffers.

If we do such script, we will make the system OOM.

    while true
    do
            ethtool -G ens4 rx 128
            ethtool -G ens4 rx 256
            free -m
    done

Fixes: 295525e2 ("virtio_net: merge dma operations when filling mergeable buffers")
Signed-off-by: default avatarXuan Zhuo <xuanzhuo@linux.alibaba.com>
Acked-by: default avatarMichael S. Tsirkin <mst@redhat.com>
Link: https://lore.kernel.org/r/20231226094333.47740-1-xuanzhuo@linux.alibaba.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 01b2885d
...@@ -334,7 +334,6 @@ struct virtio_net_common_hdr { ...@@ -334,7 +334,6 @@ struct virtio_net_common_hdr {
}; };
}; };
static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf); static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
static bool is_xdp_frame(void *ptr) static bool is_xdp_frame(void *ptr)
...@@ -408,6 +407,17 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) ...@@ -408,6 +407,17 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
return p; return p;
} }
static void virtnet_rq_free_buf(struct virtnet_info *vi,
struct receive_queue *rq, void *buf)
{
if (vi->mergeable_rx_bufs)
put_page(virt_to_head_page(buf));
else if (vi->big_packets)
give_pages(rq, buf);
else
put_page(virt_to_head_page(buf));
}
static void enable_delayed_refill(struct virtnet_info *vi) static void enable_delayed_refill(struct virtnet_info *vi)
{ {
spin_lock_bh(&vi->refill_lock); spin_lock_bh(&vi->refill_lock);
...@@ -634,17 +644,6 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx) ...@@ -634,17 +644,6 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
return buf; return buf;
} }
static void *virtnet_rq_detach_unused_buf(struct receive_queue *rq)
{
void *buf;
buf = virtqueue_detach_unused_buf(rq->vq);
if (buf && rq->do_dma)
virtnet_rq_unmap(rq, buf, 0);
return buf;
}
static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len) static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
{ {
struct virtnet_rq_dma *dma; struct virtnet_rq_dma *dma;
...@@ -744,6 +743,20 @@ static void virtnet_rq_set_premapped(struct virtnet_info *vi) ...@@ -744,6 +743,20 @@ static void virtnet_rq_set_premapped(struct virtnet_info *vi)
} }
} }
static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
{
struct virtnet_info *vi = vq->vdev->priv;
struct receive_queue *rq;
int i = vq2rxq(vq);
rq = &vi->rq[i];
if (rq->do_dma)
virtnet_rq_unmap(rq, buf, 0);
virtnet_rq_free_buf(vi, rq, buf);
}
static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
{ {
unsigned int len; unsigned int len;
...@@ -1764,7 +1777,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, ...@@ -1764,7 +1777,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
if (unlikely(len < vi->hdr_len + ETH_HLEN)) { if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len); pr_debug("%s: short packet %i\n", dev->name, len);
DEV_STATS_INC(dev, rx_length_errors); DEV_STATS_INC(dev, rx_length_errors);
virtnet_rq_free_unused_buf(rq->vq, buf); virtnet_rq_free_buf(vi, rq, buf);
return; return;
} }
...@@ -2392,7 +2405,7 @@ static int virtnet_rx_resize(struct virtnet_info *vi, ...@@ -2392,7 +2405,7 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
if (running) if (running)
napi_disable(&rq->napi); napi_disable(&rq->napi);
err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf); err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf);
if (err) if (err)
netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err); netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
...@@ -4031,19 +4044,6 @@ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf) ...@@ -4031,19 +4044,6 @@ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
xdp_return_frame(ptr_to_xdp(buf)); xdp_return_frame(ptr_to_xdp(buf));
} }
static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf)
{
struct virtnet_info *vi = vq->vdev->priv;
int i = vq2rxq(vq);
if (vi->mergeable_rx_bufs)
put_page(virt_to_head_page(buf));
else if (vi->big_packets)
give_pages(&vi->rq[i], buf);
else
put_page(virt_to_head_page(buf));
}
static void free_unused_bufs(struct virtnet_info *vi) static void free_unused_bufs(struct virtnet_info *vi)
{ {
void *buf; void *buf;
...@@ -4057,10 +4057,10 @@ static void free_unused_bufs(struct virtnet_info *vi) ...@@ -4057,10 +4057,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
} }
for (i = 0; i < vi->max_queue_pairs; i++) { for (i = 0; i < vi->max_queue_pairs; i++) {
struct receive_queue *rq = &vi->rq[i]; struct virtqueue *vq = vi->rq[i].vq;
while ((buf = virtnet_rq_detach_unused_buf(rq)) != NULL) while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
virtnet_rq_free_unused_buf(rq->vq, buf); virtnet_rq_unmap_free_buf(vq, buf);
cond_resched(); cond_resched();
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment