Commit c27569fc authored by Madalin Bucur's avatar Madalin Bucur Committed by David S. Miller

dpaa_eth: fix DMA mapping leak

On the error path some fragments remain DMA mapped. Adding a fix
that unmaps all the fragments. Rework cleanup path to be simpler.

Fixes: 8151ee88 ("dpaa_eth: use page backed rx buffers")
Signed-off-by: default avatarMadalin Bucur <madalin.bucur@oss.nxp.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ec34c015
...@@ -1719,7 +1719,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, ...@@ -1719,7 +1719,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
int page_offset; int page_offset;
unsigned int sz; unsigned int sz;
int *count_ptr; int *count_ptr;
int i; int i, j;
vaddr = phys_to_virt(addr); vaddr = phys_to_virt(addr);
WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
...@@ -1736,14 +1736,14 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, ...@@ -1736,14 +1736,14 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr, WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
SMP_CACHE_BYTES)); SMP_CACHE_BYTES));
dma_unmap_page(priv->rx_dma_dev, sg_addr,
DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
/* We may use multiple Rx pools */ /* We may use multiple Rx pools */
dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
if (!dpaa_bp) if (!dpaa_bp)
goto free_buffers; goto free_buffers;
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
dma_unmap_page(priv->rx_dma_dev, sg_addr,
DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
if (!skb) { if (!skb) {
sz = dpaa_bp->size + sz = dpaa_bp->size +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
...@@ -1786,7 +1786,9 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, ...@@ -1786,7 +1786,9 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
skb_add_rx_frag(skb, i - 1, head_page, frag_off, skb_add_rx_frag(skb, i - 1, head_page, frag_off,
frag_len, dpaa_bp->size); frag_len, dpaa_bp->size);
} }
/* Update the pool count for the current {cpu x bpool} */ /* Update the pool count for the current {cpu x bpool} */
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
(*count_ptr)--; (*count_ptr)--;
if (qm_sg_entry_is_final(&sgt[i])) if (qm_sg_entry_is_final(&sgt[i]))
...@@ -1800,26 +1802,25 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, ...@@ -1800,26 +1802,25 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
return skb; return skb;
free_buffers: free_buffers:
/* compensate sw bpool counter changes */
for (i--; i >= 0; i--) {
dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
if (dpaa_bp) {
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
(*count_ptr)++;
}
}
/* free all the SG entries */ /* free all the SG entries */
for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) { for (j = 0; j < DPAA_SGT_MAX_ENTRIES ; j++) {
sg_addr = qm_sg_addr(&sgt[i]); sg_addr = qm_sg_addr(&sgt[j]);
sg_vaddr = phys_to_virt(sg_addr); sg_vaddr = phys_to_virt(sg_addr);
/* all pages 0..i were unmaped */
if (j > i)
dma_unmap_page(priv->rx_dma_dev, qm_sg_addr(&sgt[j]),
DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
free_pages((unsigned long)sg_vaddr, 0); free_pages((unsigned long)sg_vaddr, 0);
dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); /* counters 0..i-1 were decremented */
if (dpaa_bp) { if (j >= i) {
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); dpaa_bp = dpaa_bpid2pool(sgt[j].bpid);
(*count_ptr)--; if (dpaa_bp) {
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
(*count_ptr)--;
}
} }
if (qm_sg_entry_is_final(&sgt[i])) if (qm_sg_entry_is_final(&sgt[j]))
break; break;
} }
/* free the SGT fragment */ /* free the SGT fragment */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment