Commit 8151ee88 authored by Madalin Bucur's avatar Madalin Bucur Committed by David S. Miller

dpaa_eth: use page backed rx buffers

Change the buffers used for reception from netdev_frags to pages.
Signed-off-by: default avatarMadalin Bucur <madalin.bucur@nxp.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f07f3004
...@@ -180,13 +180,7 @@ static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS]; ...@@ -180,13 +180,7 @@ static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
#define DPAA_BP_RAW_SIZE 4096 #define DPAA_BP_RAW_SIZE 4096
/* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is #define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size)
* even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
* via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
* half-page-aligned buffers, so we reserve some more space for start-of-buffer
* alignment.
*/
#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES)
static int dpaa_max_frm; static int dpaa_max_frm;
...@@ -1313,13 +1307,14 @@ static void dpaa_fd_release(const struct net_device *net_dev, ...@@ -1313,13 +1307,14 @@ static void dpaa_fd_release(const struct net_device *net_dev,
vaddr = phys_to_virt(qm_fd_addr(fd)); vaddr = phys_to_virt(qm_fd_addr(fd));
sgt = vaddr + qm_fd_get_offset(fd); sgt = vaddr + qm_fd_get_offset(fd);
dma_unmap_single(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd), dma_unmap_page(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd),
dpaa_bp->size, DMA_FROM_DEVICE); DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
dpaa_release_sgt_members(sgt); dpaa_release_sgt_members(sgt);
addr = dma_map_single(dpaa_bp->priv->rx_dma_dev, vaddr, addr = dma_map_page(dpaa_bp->priv->rx_dma_dev,
dpaa_bp->size, DMA_FROM_DEVICE); virt_to_page(vaddr), 0, DPAA_BP_RAW_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) { if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) {
netdev_err(net_dev, "DMA mapping failed\n"); netdev_err(net_dev, "DMA mapping failed\n");
return; return;
...@@ -1469,21 +1464,18 @@ static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp) ...@@ -1469,21 +1464,18 @@ static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
struct net_device *net_dev = dpaa_bp->priv->net_dev; struct net_device *net_dev = dpaa_bp->priv->net_dev;
struct bm_buffer bmb[8]; struct bm_buffer bmb[8];
dma_addr_t addr; dma_addr_t addr;
void *new_buf; struct page *p;
u8 i; u8 i;
for (i = 0; i < 8; i++) { for (i = 0; i < 8; i++) {
new_buf = netdev_alloc_frag(dpaa_bp->raw_size); p = dev_alloc_pages(0);
if (unlikely(!new_buf)) { if (unlikely(!p)) {
netdev_err(net_dev, netdev_err(net_dev, "dev_alloc_pages() failed\n");
"netdev_alloc_frag() failed, size %zu\n",
dpaa_bp->raw_size);
goto release_previous_buffs; goto release_previous_buffs;
} }
new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES);
addr = dma_map_single(dpaa_bp->priv->rx_dma_dev, new_buf, addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, p, 0,
dpaa_bp->size, DMA_FROM_DEVICE); DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev, if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev,
addr))) { addr))) {
netdev_err(net_dev, "DMA map failed\n"); netdev_err(net_dev, "DMA map failed\n");
...@@ -1694,7 +1686,7 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv, ...@@ -1694,7 +1686,7 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
return skb; return skb;
free_buffer: free_buffer:
skb_free_frag(vaddr); free_pages((unsigned long)vaddr, 0);
return NULL; return NULL;
} }
...@@ -1741,8 +1733,8 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, ...@@ -1741,8 +1733,8 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
goto free_buffers; goto free_buffers;
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
dma_unmap_single(dpaa_bp->priv->rx_dma_dev, sg_addr, dma_unmap_page(priv->rx_dma_dev, sg_addr,
dpaa_bp->size, DMA_FROM_DEVICE); DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
if (!skb) { if (!skb) {
sz = dpaa_bp->size + sz = dpaa_bp->size +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
...@@ -1794,7 +1786,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, ...@@ -1794,7 +1786,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n"); WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
/* free the SG table buffer */ /* free the SG table buffer */
skb_free_frag(vaddr); free_pages((unsigned long)vaddr, 0);
return skb; return skb;
...@@ -1811,7 +1803,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, ...@@ -1811,7 +1803,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) { for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
sg_addr = qm_sg_addr(&sgt[i]); sg_addr = qm_sg_addr(&sgt[i]);
sg_vaddr = phys_to_virt(sg_addr); sg_vaddr = phys_to_virt(sg_addr);
skb_free_frag(sg_vaddr); free_pages((unsigned long)sg_vaddr, 0);
dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
if (dpaa_bp) { if (dpaa_bp) {
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
...@@ -1822,7 +1814,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, ...@@ -1822,7 +1814,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
break; break;
} }
/* free the SGT fragment */ /* free the SGT fragment */
skb_free_frag(vaddr); free_pages((unsigned long)vaddr, 0);
return NULL; return NULL;
} }
...@@ -2281,8 +2273,8 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, ...@@ -2281,8 +2273,8 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
return qman_cb_dqrr_consume; return qman_cb_dqrr_consume;
} }
dma_unmap_single(dpaa_bp->priv->rx_dma_dev, addr, dpaa_bp->size, dma_unmap_page(dpaa_bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
/* prefetch the first 64 bytes of the frame or the SGT start */ /* prefetch the first 64 bytes of the frame or the SGT start */
vaddr = phys_to_virt(addr); vaddr = phys_to_virt(addr);
...@@ -2637,7 +2629,8 @@ static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp, ...@@ -2637,7 +2629,8 @@ static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
{ {
dma_addr_t addr = bm_buf_addr(bmb); dma_addr_t addr = bm_buf_addr(bmb);
dma_unmap_single(bp->priv->rx_dma_dev, addr, bp->size, DMA_FROM_DEVICE); dma_unmap_page(bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
DMA_FROM_DEVICE);
skb_free_frag(phys_to_virt(addr)); skb_free_frag(phys_to_virt(addr));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment