Commit 84d06c60 authored by Madalin Bucur's avatar Madalin Bucur Committed by David S. Miller

dpaa_eth: use a page to store the SGT

Use a page to store the scatter gather table on the transmit path.
Signed-off-by: default avatarMadalin Bucur <madalin.bucur@nxp.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2388ba36
...@@ -1592,7 +1592,7 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, ...@@ -1592,7 +1592,7 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
int i; int i;
if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
dma_unmap_single(priv->tx_dma_dev, addr, dma_unmap_page(priv->tx_dma_dev, addr,
qm_fd_get_offset(fd) + DPAA_SGT_SIZE, qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
dma_dir); dma_dir);
...@@ -1636,8 +1636,8 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, ...@@ -1636,8 +1636,8 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
} }
if (qm_fd_get_format(fd) == qm_fd_sg) if (qm_fd_get_format(fd) == qm_fd_sg)
/* Free the page frag that we allocated on Tx */ /* Free the page that we allocated on Tx for the SGT */
skb_free_frag(vaddr); free_pages((unsigned long)vaddr, 0);
return skb; return skb;
} }
...@@ -1885,21 +1885,20 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, ...@@ -1885,21 +1885,20 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
struct net_device *net_dev = priv->net_dev; struct net_device *net_dev = priv->net_dev;
struct qm_sg_entry *sgt; struct qm_sg_entry *sgt;
struct sk_buff **skbh; struct sk_buff **skbh;
int i, j, err, sz; void *buff_start;
void *buffer_start;
skb_frag_t *frag; skb_frag_t *frag;
dma_addr_t addr; dma_addr_t addr;
size_t frag_len; size_t frag_len;
void *sgt_buf; struct page *p;
int i, j, err;
/* get a page frag to store the SGTable */
sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE); /* get a page to store the SGTable */
sgt_buf = netdev_alloc_frag(sz); p = dev_alloc_pages(0);
if (unlikely(!sgt_buf)) { if (unlikely(!p)) {
netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n", netdev_err(net_dev, "dev_alloc_pages() failed\n");
sz);
return -ENOMEM; return -ENOMEM;
} }
buff_start = page_address(p);
/* Enable L3/L4 hardware checksum computation. /* Enable L3/L4 hardware checksum computation.
* *
...@@ -1907,7 +1906,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, ...@@ -1907,7 +1906,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
* need to write into the skb. * need to write into the skb.
*/ */
err = dpaa_enable_tx_csum(priv, skb, fd, err = dpaa_enable_tx_csum(priv, skb, fd,
sgt_buf + DPAA_TX_PRIV_DATA_SIZE); buff_start + DPAA_TX_PRIV_DATA_SIZE);
if (unlikely(err < 0)) { if (unlikely(err < 0)) {
if (net_ratelimit()) if (net_ratelimit())
netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
...@@ -1916,7 +1915,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, ...@@ -1916,7 +1915,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
} }
/* SGT[0] is used by the linear part */ /* SGT[0] is used by the linear part */
sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom); sgt = (struct qm_sg_entry *)(buff_start + priv->tx_headroom);
frag_len = skb_headlen(skb); frag_len = skb_headlen(skb);
qm_sg_entry_set_len(&sgt[0], frag_len); qm_sg_entry_set_len(&sgt[0], frag_len);
sgt[0].bpid = FSL_DPAA_BPID_INV; sgt[0].bpid = FSL_DPAA_BPID_INV;
...@@ -1954,14 +1953,14 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, ...@@ -1954,14 +1953,14 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
/* Set the final bit in the last used entry of the SGT */ /* Set the final bit in the last used entry of the SGT */
qm_sg_entry_set_f(&sgt[nr_frags], frag_len); qm_sg_entry_set_f(&sgt[nr_frags], frag_len);
/* set fd offset to priv->tx_headroom */
qm_fd_set_sg(fd, priv->tx_headroom, skb->len); qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
/* DMA map the SGT page */ /* DMA map the SGT page */
buffer_start = (void *)sgt - priv->tx_headroom; skbh = (struct sk_buff **)buff_start;
skbh = (struct sk_buff **)buffer_start;
*skbh = skb; *skbh = skb;
addr = dma_map_single(priv->tx_dma_dev, buffer_start, addr = dma_map_page(priv->tx_dma_dev, p, 0,
priv->tx_headroom + DPAA_SGT_SIZE, dma_dir); priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
netdev_err(priv->net_dev, "DMA mapping failed\n"); netdev_err(priv->net_dev, "DMA mapping failed\n");
...@@ -1982,7 +1981,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, ...@@ -1982,7 +1981,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
qm_sg_entry_get_len(&sgt[j]), dma_dir); qm_sg_entry_get_len(&sgt[j]), dma_dir);
sg0_map_failed: sg0_map_failed:
csum_failed: csum_failed:
skb_free_frag(sgt_buf); free_pages((unsigned long)buff_start, 0);
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment