Commit 132c32ee authored by Ong Boon Leong's avatar Ong Boon Leong Committed by David S. Miller

net: stmmac: Add TX via XDP zero-copy socket

We add the support of XDP ZC TX submission and cleaning into
stmmac_tx_clean(). The function is made to clean as many TX complete
frames as possible, i.e. limit by priv->dma_tx_size instead of NAPI
budget. For TX ring that is associated with XSK pool, the function
stmmac_xdp_xmit_zc() is introduced to TX frame buffers from XSK pool by
using xsk_tx_peek_desc(). To make stmmac_tx_clean() support the cleaning
of XSK TX frames, STMMAC_TXBUF_T_XSK_TX TX buffer type is introduced.

As stmmac_tx_clean() uses the return value to cue whether NAPI function
should continue to poll, we augment the caller of stmmac_tx_clean() to
pass NAPI budget instead of priv->dma_tx_size through 'budget' input and
made stmmac_tx_clean() to always clean up-to the TX ring size instead.
This allows us to use the return boolean status of stmmac_xdp_xmit_zc()
to decide if XSK TX work is done or not: If true, set 'xmits' to return
'budget - 1' so that NAPI poll may exit. Else, set 'xmits' to return
'budget' to make NAPI poll continue to poll since XSK TX work is not
done. Finally, at the end of stmmac_tx_clean(), the function now take
a maximum value between 'count' and 'xmits' so that status from both
TX cleaning and XSK TX (only for XDP ZC) is considered.

This patch adds a new NAPI poll called stmmac_napi_poll_rxtx() that is
meant to be enabled/disabled for RX and TX ring that are bound to XSK
pool. This NAPI poll function starts with cleaning TX ring, then submits
XSK TX frames to TX ring before proceed to perform RX operations, i.e.
, receiving RX frames and replenishing RX ring with RX free buffers
obtained from XSK pool. Therefore, during XSK RX and TX setup, the driver
enables stmmac_napi_poll_rxtx() for RX and TX operations, then during
XSK RX and TX pool tear-down, the driver reenables the exisiting
independent NAPI poll functions accordingly: stmmac_napi_poll_rx() and
stmmac_napi_poll_tx().
Signed-off-by: default avatarOng Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent bba2556e
......@@ -40,6 +40,7 @@ enum stmmac_txbuf_type {
STMMAC_TXBUF_T_SKB,
STMMAC_TXBUF_T_XDP_TX,
STMMAC_TXBUF_T_XDP_NDO,
STMMAC_TXBUF_T_XSK_TX,
};
struct stmmac_tx_info {
......@@ -69,6 +70,8 @@ struct stmmac_tx_queue {
struct xdp_frame **xdpf;
};
struct stmmac_tx_info *tx_skbuff_dma;
struct xsk_buff_pool *xsk_pool;
u32 xsk_frames_done;
unsigned int cur_tx;
unsigned int dirty_tx;
dma_addr_t dma_tx_phy;
......@@ -116,6 +119,7 @@ struct stmmac_rx_queue {
struct stmmac_channel {
struct napi_struct rx_napi ____cacheline_aligned_in_smp;
struct napi_struct tx_napi ____cacheline_aligned_in_smp;
struct napi_struct rxtx_napi ____cacheline_aligned_in_smp;
struct stmmac_priv *priv_data;
spinlock_t lock;
u32 index;
......@@ -338,6 +342,8 @@ static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue);
int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags);
#if IS_ENABLED(CONFIG_STMMAC_SELFTESTS)
......
......@@ -14,7 +14,8 @@ static int stmmac_xdp_enable_pool(struct stmmac_priv *priv,
u32 frame_size;
int err;
if (queue >= priv->plat->rx_queues_to_use)
if (queue >= priv->plat->rx_queues_to_use ||
queue >= priv->plat->tx_queues_to_use)
return -EINVAL;
frame_size = xsk_pool_get_rx_frame_size(pool);
......@@ -34,14 +35,17 @@ static int stmmac_xdp_enable_pool(struct stmmac_priv *priv,
if (need_update) {
stmmac_disable_rx_queue(priv, queue);
stmmac_disable_tx_queue(priv, queue);
napi_disable(&ch->rx_napi);
napi_disable(&ch->tx_napi);
}
set_bit(queue, priv->af_xdp_zc_qps);
if (need_update) {
napi_enable(&ch->rx_napi);
napi_enable(&ch->rxtx_napi);
stmmac_enable_rx_queue(priv, queue);
stmmac_enable_tx_queue(priv, queue);
err = stmmac_xsk_wakeup(priv->dev, queue, XDP_WAKEUP_RX);
if (err)
......@@ -57,7 +61,8 @@ static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue)
struct xsk_buff_pool *pool;
bool need_update;
if (queue >= priv->plat->rx_queues_to_use)
if (queue >= priv->plat->rx_queues_to_use ||
queue >= priv->plat->tx_queues_to_use)
return -EINVAL;
pool = xsk_get_pool_from_qid(priv->dev, queue);
......@@ -68,8 +73,9 @@ static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue)
if (need_update) {
stmmac_disable_rx_queue(priv, queue);
stmmac_disable_tx_queue(priv, queue);
synchronize_rcu();
napi_disable(&ch->rx_napi);
napi_disable(&ch->rxtx_napi);
}
xsk_pool_dma_unmap(pool, STMMAC_RX_DMA_ATTR);
......@@ -78,7 +84,9 @@ static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue)
if (need_update) {
napi_enable(&ch->rx_napi);
napi_enable(&ch->tx_napi);
stmmac_enable_rx_queue(priv, queue);
stmmac_enable_tx_queue(priv, queue);
}
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment