Commit 8d212a9e authored by Niklas Cassel's avatar Niklas Cassel Committed by David S. Miller

net: stmmac: set MSS for each tx DMA channel

The DMA engine in dwmac4 can segment a large TSO packet to several
smaller packets of (max) size Maximum Segment Size (MSS).

The DMA engine fetches and saves the MSS via a context descriptor.

This context decriptor has to be provided to each tx DMA channel.
To ensure that this is done, move struct member mss from stmmac_priv
to stmmac_tx_queue.

stmmac_reset_queues_param() now also resets mss, together with other
queue parameters, so reset of mss value can be removed from
stmmac_resume().

init_dma_tx_desc_rings() now also resets mss, together with other
queue parameters, so reset of mss value can be removed from
stmmac_open().

This fixes tx queue timeouts for dwmac4, with DT property
snps,tx-queues-to-use > 1, when running iperf3 with multiple threads.

Fixes: ce736788 ("net: stmmac: adding multiple buffers for TX")
Signed-off-by: default avatarNiklas Cassel <niklas.cassel@axis.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3fef2b62
......@@ -58,6 +58,7 @@ struct stmmac_tx_queue {
unsigned int dirty_tx;
dma_addr_t dma_tx_phy;
u32 tx_tail_addr;
u32 mss;
};
struct stmmac_rx_queue {
......@@ -138,7 +139,6 @@ struct stmmac_priv {
spinlock_t ptp_lock;
void __iomem *mmcaddr;
void __iomem *ptpaddr;
u32 mss;
#ifdef CONFIG_DEBUG_FS
struct dentry *dbgfs_dir;
......
......@@ -1355,6 +1355,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
tx_q->dirty_tx = 0;
tx_q->cur_tx = 0;
tx_q->mss = 0;
netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
}
......@@ -1946,6 +1947,7 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
(i == DMA_TX_SIZE - 1));
tx_q->dirty_tx = 0;
tx_q->cur_tx = 0;
tx_q->mss = 0;
netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
stmmac_start_tx_dma(priv, chan);
......@@ -2632,7 +2634,6 @@ static int stmmac_open(struct net_device *dev)
priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
priv->rx_copybreak = STMMAC_RX_COPYBREAK;
priv->mss = 0;
ret = alloc_dma_desc_resources(priv);
if (ret < 0) {
......@@ -2872,10 +2873,10 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
mss = skb_shinfo(skb)->gso_size;
/* set new MSS value if needed */
if (mss != priv->mss) {
if (mss != tx_q->mss) {
mss_desc = tx_q->dma_tx + tx_q->cur_tx;
priv->hw->desc->set_mss(mss_desc, mss);
priv->mss = mss;
tx_q->mss = mss;
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
}
......@@ -4436,6 +4437,7 @@ static void stmmac_reset_queues_param(struct stmmac_priv *priv)
tx_q->cur_tx = 0;
tx_q->dirty_tx = 0;
tx_q->mss = 0;
}
}
......@@ -4481,11 +4483,6 @@ int stmmac_resume(struct device *dev)
stmmac_reset_queues_param(priv);
/* reset private mss value to force mss context settings at
* next tso xmit (only used for gmac4).
*/
priv->mss = 0;
stmmac_clear_descriptors(priv);
stmmac_hw_setup(ndev, false);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment