Commit 20f79739 authored by Wei Fang's avatar Wei Fang Committed by Paolo Abeni

net: fec: recycle pages for transmitted XDP frames

Once the XDP frames have been successfully transmitted through the
ndo_xdp_xmit() interface, it's the driver responsibility to free
the frames so that the page_pool can recycle the pages and reuse
them. However, this action is not implemented in the fec driver.
This leads to a user-visible problem that the console will print
the following warning log.

[  157.568851] page_pool_release_retry() stalled pool shutdown 1389 inflight 60 sec
[  217.983446] page_pool_release_retry() stalled pool shutdown 1389 inflight 120 sec
[  278.399006] page_pool_release_retry() stalled pool shutdown 1389 inflight 181 sec
[  338.812885] page_pool_release_retry() stalled pool shutdown 1389 inflight 241 sec
[  399.226946] page_pool_release_retry() stalled pool shutdown 1389 inflight 302 sec

Therefore, to solve this issue, we free XDP frames via xdp_return_frame()
while cleaning the tx BD ring.

Fixes: 6d6b39f1 ("net: fec: add initial XDP support")
Signed-off-by: default avatarWei Fang <wei.fang@nxp.com>
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parent be7ecbe7
...@@ -544,10 +544,23 @@ enum { ...@@ -544,10 +544,23 @@ enum {
XDP_STATS_TOTAL, XDP_STATS_TOTAL,
}; };
enum fec_txbuf_type {
FEC_TXBUF_T_SKB,
FEC_TXBUF_T_XDP_NDO,
};
struct fec_tx_buffer {
union {
struct sk_buff *skb;
struct xdp_frame *xdp;
};
enum fec_txbuf_type type;
};
struct fec_enet_priv_tx_q { struct fec_enet_priv_tx_q {
struct bufdesc_prop bd; struct bufdesc_prop bd;
unsigned char *tx_bounce[TX_RING_SIZE]; unsigned char *tx_bounce[TX_RING_SIZE];
struct sk_buff *tx_skbuff[TX_RING_SIZE]; struct fec_tx_buffer tx_buf[TX_RING_SIZE];
unsigned short tx_stop_threshold; unsigned short tx_stop_threshold;
unsigned short tx_wake_threshold; unsigned short tx_wake_threshold;
......
...@@ -397,7 +397,7 @@ static void fec_dump(struct net_device *ndev) ...@@ -397,7 +397,7 @@ static void fec_dump(struct net_device *ndev)
fec16_to_cpu(bdp->cbd_sc), fec16_to_cpu(bdp->cbd_sc),
fec32_to_cpu(bdp->cbd_bufaddr), fec32_to_cpu(bdp->cbd_bufaddr),
fec16_to_cpu(bdp->cbd_datlen), fec16_to_cpu(bdp->cbd_datlen),
txq->tx_skbuff[index]); txq->tx_buf[index].skb);
bdp = fec_enet_get_nextdesc(bdp, &txq->bd); bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
index++; index++;
} while (bdp != txq->bd.base); } while (bdp != txq->bd.base);
...@@ -654,7 +654,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, ...@@ -654,7 +654,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
index = fec_enet_get_bd_index(last_bdp, &txq->bd); index = fec_enet_get_bd_index(last_bdp, &txq->bd);
/* Save skb pointer */ /* Save skb pointer */
txq->tx_skbuff[index] = skb; txq->tx_buf[index].skb = skb;
/* Make sure the updates to rest of the descriptor are performed before /* Make sure the updates to rest of the descriptor are performed before
* transferring ownership. * transferring ownership.
...@@ -672,9 +672,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, ...@@ -672,9 +672,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
/* Make sure the update to bdp and tx_skbuff are performed before /* Make sure the update to bdp is performed before txq->bd.cur. */
* txq->bd.cur.
*/
wmb(); wmb();
txq->bd.cur = bdp; txq->bd.cur = bdp;
...@@ -862,7 +860,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, ...@@ -862,7 +860,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
} }
/* Save skb pointer */ /* Save skb pointer */
txq->tx_skbuff[index] = skb; txq->tx_buf[index].skb = skb;
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
txq->bd.cur = bdp; txq->bd.cur = bdp;
...@@ -952,16 +950,33 @@ static void fec_enet_bd_init(struct net_device *dev) ...@@ -952,16 +950,33 @@ static void fec_enet_bd_init(struct net_device *dev)
for (i = 0; i < txq->bd.ring_size; i++) { for (i = 0; i < txq->bd.ring_size; i++) {
/* Initialize the BD for every fragment in the page. */ /* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = cpu_to_fec16(0); bdp->cbd_sc = cpu_to_fec16(0);
if (bdp->cbd_bufaddr && if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) if (bdp->cbd_bufaddr &&
dma_unmap_single(&fep->pdev->dev, !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
fec32_to_cpu(bdp->cbd_bufaddr), dma_unmap_single(&fep->pdev->dev,
fec16_to_cpu(bdp->cbd_datlen), fec32_to_cpu(bdp->cbd_bufaddr),
DMA_TO_DEVICE); fec16_to_cpu(bdp->cbd_datlen),
if (txq->tx_skbuff[i]) { DMA_TO_DEVICE);
dev_kfree_skb_any(txq->tx_skbuff[i]); if (txq->tx_buf[i].skb) {
txq->tx_skbuff[i] = NULL; dev_kfree_skb_any(txq->tx_buf[i].skb);
txq->tx_buf[i].skb = NULL;
}
} else {
if (bdp->cbd_bufaddr)
dma_unmap_single(&fep->pdev->dev,
fec32_to_cpu(bdp->cbd_bufaddr),
fec16_to_cpu(bdp->cbd_datlen),
DMA_TO_DEVICE);
if (txq->tx_buf[i].xdp) {
xdp_return_frame(txq->tx_buf[i].xdp);
txq->tx_buf[i].xdp = NULL;
}
/* restore default tx buffer type: FEC_TXBUF_T_SKB */
txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
} }
bdp->cbd_bufaddr = cpu_to_fec32(0); bdp->cbd_bufaddr = cpu_to_fec32(0);
bdp = fec_enet_get_nextdesc(bdp, &txq->bd); bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
} }
...@@ -1360,6 +1375,7 @@ static void ...@@ -1360,6 +1375,7 @@ static void
fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
{ {
struct fec_enet_private *fep; struct fec_enet_private *fep;
struct xdp_frame *xdpf;
struct bufdesc *bdp; struct bufdesc *bdp;
unsigned short status; unsigned short status;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -1387,16 +1403,31 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) ...@@ -1387,16 +1403,31 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
index = fec_enet_get_bd_index(bdp, &txq->bd); index = fec_enet_get_bd_index(bdp, &txq->bd);
skb = txq->tx_skbuff[index]; if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
txq->tx_skbuff[index] = NULL; skb = txq->tx_buf[index].skb;
if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) txq->tx_buf[index].skb = NULL;
dma_unmap_single(&fep->pdev->dev, if (bdp->cbd_bufaddr &&
fec32_to_cpu(bdp->cbd_bufaddr), !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
fec16_to_cpu(bdp->cbd_datlen), dma_unmap_single(&fep->pdev->dev,
DMA_TO_DEVICE); fec32_to_cpu(bdp->cbd_bufaddr),
bdp->cbd_bufaddr = cpu_to_fec32(0); fec16_to_cpu(bdp->cbd_datlen),
if (!skb) DMA_TO_DEVICE);
goto skb_done; bdp->cbd_bufaddr = cpu_to_fec32(0);
if (!skb)
goto tx_buf_done;
} else {
xdpf = txq->tx_buf[index].xdp;
if (bdp->cbd_bufaddr)
dma_unmap_single(&fep->pdev->dev,
fec32_to_cpu(bdp->cbd_bufaddr),
fec16_to_cpu(bdp->cbd_datlen),
DMA_TO_DEVICE);
bdp->cbd_bufaddr = cpu_to_fec32(0);
if (!xdpf) {
txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
goto tx_buf_done;
}
}
/* Check for errors. */ /* Check for errors. */
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
...@@ -1415,21 +1446,11 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) ...@@ -1415,21 +1446,11 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
ndev->stats.tx_carrier_errors++; ndev->stats.tx_carrier_errors++;
} else { } else {
ndev->stats.tx_packets++; ndev->stats.tx_packets++;
ndev->stats.tx_bytes += skb->len;
}
/* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB)
* are to time stamp the packet, so we still need to check time ndev->stats.tx_bytes += skb->len;
* stamping enabled flag. else
*/ ndev->stats.tx_bytes += xdpf->len;
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
fep->hwts_tx_en) &&
fep->bufdesc_ex) {
struct skb_shared_hwtstamps shhwtstamps;
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
skb_tstamp_tx(skb, &shhwtstamps);
} }
/* Deferred means some collisions occurred during transmit, /* Deferred means some collisions occurred during transmit,
...@@ -1438,10 +1459,32 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) ...@@ -1438,10 +1459,32 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
if (status & BD_ENET_TX_DEF) if (status & BD_ENET_TX_DEF)
ndev->stats.collisions++; ndev->stats.collisions++;
/* Free the sk buffer associated with this last transmit */ if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
dev_kfree_skb_any(skb); /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
skb_done: * are to time stamp the packet, so we still need to check time
/* Make sure the update to bdp and tx_skbuff are performed * stamping enabled flag.
*/
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
fep->hwts_tx_en) && fep->bufdesc_ex) {
struct skb_shared_hwtstamps shhwtstamps;
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
skb_tstamp_tx(skb, &shhwtstamps);
}
/* Free the sk buffer associated with this last transmit */
dev_kfree_skb_any(skb);
} else {
xdp_return_frame(xdpf);
txq->tx_buf[index].xdp = NULL;
/* restore default tx buffer type: FEC_TXBUF_T_SKB */
txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
}
tx_buf_done:
/* Make sure the update to bdp and tx_buf are performed
* before dirty_tx * before dirty_tx
*/ */
wmb(); wmb();
...@@ -3249,9 +3292,19 @@ static void fec_enet_free_buffers(struct net_device *ndev) ...@@ -3249,9 +3292,19 @@ static void fec_enet_free_buffers(struct net_device *ndev)
for (i = 0; i < txq->bd.ring_size; i++) { for (i = 0; i < txq->bd.ring_size; i++) {
kfree(txq->tx_bounce[i]); kfree(txq->tx_bounce[i]);
txq->tx_bounce[i] = NULL; txq->tx_bounce[i] = NULL;
skb = txq->tx_skbuff[i];
txq->tx_skbuff[i] = NULL; if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
dev_kfree_skb(skb); skb = txq->tx_buf[i].skb;
txq->tx_buf[i].skb = NULL;
dev_kfree_skb(skb);
} else {
if (txq->tx_buf[i].xdp) {
xdp_return_frame(txq->tx_buf[i].xdp);
txq->tx_buf[i].xdp = NULL;
}
txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
}
} }
} }
} }
...@@ -3817,7 +3870,8 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, ...@@ -3817,7 +3870,8 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
ebdp->cbd_esc = cpu_to_fec32(estatus); ebdp->cbd_esc = cpu_to_fec32(estatus);
} }
txq->tx_skbuff[index] = NULL; txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO;
txq->tx_buf[index].xdp = frame;
/* Make sure the updates to rest of the descriptor are performed before /* Make sure the updates to rest of the descriptor are performed before
* transferring ownership. * transferring ownership.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment