Commit c41fcce9 authored by Shreyas Bhatewara's avatar Shreyas Bhatewara Committed by David S. Miller

vmxnet3: Fix memory leaks in rx path (fwd)

If rcd length was zero, the page used for frag was not being released. It
was being replaced with a newly allocated page. This change takes care
of that memory leak.
Signed-off-by: default avatarGuolin Yang <gyang@vmware.com>
Signed-off-by: default avatarShreyas N Bhatewara <sbhatewara@vmware.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e9ba47bf
...@@ -861,6 +861,9 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, ...@@ -861,6 +861,9 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
, skb_headlen(skb)); , skb_headlen(skb));
} }
if (skb->len <= VMXNET3_HDR_COPY_SIZE)
ctx->copy_size = skb->len;
/* make sure headers are accessible directly */ /* make sure headers are accessible directly */
if (unlikely(!pskb_may_pull(skb, ctx->copy_size))) if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
goto err; goto err;
...@@ -1273,13 +1276,14 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, ...@@ -1273,13 +1276,14 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
if (skip_page_frags) if (skip_page_frags)
goto rcd_done; goto rcd_done;
if (rcd->len) {
new_page = alloc_page(GFP_ATOMIC); new_page = alloc_page(GFP_ATOMIC);
if (unlikely(new_page == NULL)) {
/* Replacement page frag could not be allocated. /* Replacement page frag could not be allocated.
* Reuse this page. Drop the pkt and free the * Reuse this page. Drop the pkt and free the
* skb which contained this page as a frag. Skip * skb which contained this page as a frag. Skip
* processing all the following non-sop frags. * processing all the following non-sop frags.
*/ */
if (unlikely(!new_page)) {
rq->stats.rx_buf_alloc_failure++; rq->stats.rx_buf_alloc_failure++;
dev_kfree_skb(ctx->skb); dev_kfree_skb(ctx->skb);
ctx->skb = NULL; ctx->skb = NULL;
...@@ -1287,23 +1291,22 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, ...@@ -1287,23 +1291,22 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
goto rcd_done; goto rcd_done;
} }
if (rcd->len) {
dma_unmap_page(&adapter->pdev->dev, dma_unmap_page(&adapter->pdev->dev,
rbi->dma_addr, rbi->len, rbi->dma_addr, rbi->len,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
vmxnet3_append_frag(ctx->skb, rcd, rbi); vmxnet3_append_frag(ctx->skb, rcd, rbi);
}
/* Immediate refill */ /* Immediate refill */
rbi->page = new_page; rbi->page = new_page;
rbi->dma_addr = dma_map_page(&adapter->pdev->dev, rbi->dma_addr = dma_map_page(&adapter->pdev->dev
rbi->page, , rbi->page,
0, PAGE_SIZE, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
rxd->addr = cpu_to_le64(rbi->dma_addr); rxd->addr = cpu_to_le64(rbi->dma_addr);
rxd->len = rbi->len; rxd->len = rbi->len;
} }
}
skb = ctx->skb; skb = ctx->skb;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment