Commit 8e4372e6 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'add-mtu-change-with-stmmac-interface-running'

Christian Marangi says:

====================
Add MTU change with stmmac interface running

This series is to permit MTU change while the interface is running.
Major rework are needed to permit to allocate a new dma conf based on
the new MTU before applying it. This is to make sure there is enough
space to allocate all the DMA queue before releasing the stmmac driver.

This was tested with a simple way to stress the network while the
interface is running.

2 ssh connection to the device:
- One generating simple traffic with while true; do free; done
- The other making the mtu change with a delay of 1 second

The connection is correctly stopped and recovered after the MTU is changed.

The first 2 patch of this series are minor fixup that fix problems
presented while testing this. One fix a problem when we renable a queue
while we are generating a new dma conf. The other is a corner case that
was notice while stressing the driver and turning down the interface while
there was some traffic.

(this is a follow-up of a simpler patch that wanted to add the same
feature. It was suggested to first try to check if it was possible to
apply the new configuration. Posting as RFC as it does major rework for
the new concept of DMA conf)
====================

Link: https://lore.kernel.org/r/20220723142933.16030-1-ansuelsmth@gmail.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents aa246499 34700796
...@@ -46,7 +46,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum) ...@@ -46,7 +46,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
while (len != 0) { while (len != 0) {
tx_q->tx_skbuff[entry] = NULL; tx_q->tx_skbuff[entry] = NULL;
entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
desc = tx_q->dma_tx + entry; desc = tx_q->dma_tx + entry;
if (len > bmax) { if (len > bmax) {
...@@ -137,7 +137,7 @@ static void refill_desc3(void *priv_ptr, struct dma_desc *p) ...@@ -137,7 +137,7 @@ static void refill_desc3(void *priv_ptr, struct dma_desc *p)
*/ */
p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy + p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy +
(((rx_q->dirty_rx) + 1) % (((rx_q->dirty_rx) + 1) %
priv->dma_rx_size) * priv->dma_conf.dma_rx_size) *
sizeof(struct dma_desc))); sizeof(struct dma_desc)));
} }
...@@ -155,7 +155,7 @@ static void clean_desc3(void *priv_ptr, struct dma_desc *p) ...@@ -155,7 +155,7 @@ static void clean_desc3(void *priv_ptr, struct dma_desc *p)
*/ */
p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy + p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy +
((tx_q->dirty_tx + 1) % ((tx_q->dirty_tx + 1) %
priv->dma_tx_size)) priv->dma_conf.dma_tx_size))
* sizeof(struct dma_desc))); * sizeof(struct dma_desc)));
} }
......
...@@ -51,7 +51,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum) ...@@ -51,7 +51,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
STMMAC_RING_MODE, 0, false, skb->len); STMMAC_RING_MODE, 0, false, skb->len);
tx_q->tx_skbuff[entry] = NULL; tx_q->tx_skbuff[entry] = NULL;
entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
if (priv->extend_desc) if (priv->extend_desc)
desc = (struct dma_desc *)(tx_q->dma_etx + entry); desc = (struct dma_desc *)(tx_q->dma_etx + entry);
...@@ -107,7 +107,7 @@ static void refill_desc3(void *priv_ptr, struct dma_desc *p) ...@@ -107,7 +107,7 @@ static void refill_desc3(void *priv_ptr, struct dma_desc *p)
struct stmmac_priv *priv = rx_q->priv_data; struct stmmac_priv *priv = rx_q->priv_data;
/* Fill DES3 in case of RING mode */ /* Fill DES3 in case of RING mode */
if (priv->dma_buf_sz == BUF_SIZE_16KiB) if (priv->dma_conf.dma_buf_sz == BUF_SIZE_16KiB)
p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB); p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
} }
......
...@@ -188,6 +188,18 @@ struct stmmac_rfs_entry { ...@@ -188,6 +188,18 @@ struct stmmac_rfs_entry {
int tc; int tc;
}; };
struct stmmac_dma_conf {
unsigned int dma_buf_sz;
/* RX Queue */
struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
unsigned int dma_rx_size;
/* TX Queue */
struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
unsigned int dma_tx_size;
};
struct stmmac_priv { struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */ /* Frequently used values are kept adjacent for cache effect */
u32 tx_coal_frames[MTL_MAX_TX_QUEUES]; u32 tx_coal_frames[MTL_MAX_TX_QUEUES];
...@@ -201,7 +213,6 @@ struct stmmac_priv { ...@@ -201,7 +213,6 @@ struct stmmac_priv {
int sph_cap; int sph_cap;
u32 sarc_type; u32 sarc_type;
unsigned int dma_buf_sz;
unsigned int rx_copybreak; unsigned int rx_copybreak;
u32 rx_riwt[MTL_MAX_TX_QUEUES]; u32 rx_riwt[MTL_MAX_TX_QUEUES];
int hwts_rx_en; int hwts_rx_en;
...@@ -213,13 +224,7 @@ struct stmmac_priv { ...@@ -213,13 +224,7 @@ struct stmmac_priv {
int (*hwif_quirks)(struct stmmac_priv *priv); int (*hwif_quirks)(struct stmmac_priv *priv);
struct mutex lock; struct mutex lock;
/* RX Queue */ struct stmmac_dma_conf dma_conf;
struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
unsigned int dma_rx_size;
/* TX Queue */
struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
unsigned int dma_tx_size;
/* Generic channel for NAPI */ /* Generic channel for NAPI */
struct stmmac_channel channel[STMMAC_CH_MAX]; struct stmmac_channel channel[STMMAC_CH_MAX];
......
...@@ -485,8 +485,8 @@ static void stmmac_get_ringparam(struct net_device *netdev, ...@@ -485,8 +485,8 @@ static void stmmac_get_ringparam(struct net_device *netdev,
ring->rx_max_pending = DMA_MAX_RX_SIZE; ring->rx_max_pending = DMA_MAX_RX_SIZE;
ring->tx_max_pending = DMA_MAX_TX_SIZE; ring->tx_max_pending = DMA_MAX_TX_SIZE;
ring->rx_pending = priv->dma_rx_size; ring->rx_pending = priv->dma_conf.dma_rx_size;
ring->tx_pending = priv->dma_tx_size; ring->tx_pending = priv->dma_conf.dma_tx_size;
} }
static int stmmac_set_ringparam(struct net_device *netdev, static int stmmac_set_ringparam(struct net_device *netdev,
......
...@@ -795,8 +795,8 @@ static int stmmac_test_flowctrl(struct stmmac_priv *priv) ...@@ -795,8 +795,8 @@ static int stmmac_test_flowctrl(struct stmmac_priv *priv)
struct stmmac_channel *ch = &priv->channel[i]; struct stmmac_channel *ch = &priv->channel[i];
u32 tail; u32 tail;
tail = priv->rx_queue[i].dma_rx_phy + tail = priv->dma_conf.rx_queue[i].dma_rx_phy +
(priv->dma_rx_size * sizeof(struct dma_desc)); (priv->dma_conf.dma_rx_size * sizeof(struct dma_desc));
stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i); stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i);
stmmac_start_rx(priv, priv->ioaddr, i); stmmac_start_rx(priv, priv->ioaddr, i);
...@@ -1680,7 +1680,7 @@ static int stmmac_test_arpoffload(struct stmmac_priv *priv) ...@@ -1680,7 +1680,7 @@ static int stmmac_test_arpoffload(struct stmmac_priv *priv)
static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue) static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue)
{ {
struct stmmac_packet_attrs attr = { }; struct stmmac_packet_attrs attr = { };
int size = priv->dma_buf_sz; int size = priv->dma_conf.dma_buf_sz;
attr.dst = priv->dev->dev_addr; attr.dst = priv->dev->dev_addr;
attr.max_size = size - ETH_FCS_LEN; attr.max_size = size - ETH_FCS_LEN;
...@@ -1763,7 +1763,7 @@ static int stmmac_test_tbs(struct stmmac_priv *priv) ...@@ -1763,7 +1763,7 @@ static int stmmac_test_tbs(struct stmmac_priv *priv)
/* Find first TBS enabled Queue, if any */ /* Find first TBS enabled Queue, if any */
for (i = 0; i < priv->plat->tx_queues_to_use; i++) for (i = 0; i < priv->plat->tx_queues_to_use; i++)
if (priv->tx_queue[i].tbs & STMMAC_TBS_AVAIL) if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_AVAIL)
break; break;
if (i >= priv->plat->tx_queues_to_use) if (i >= priv->plat->tx_queues_to_use)
......
...@@ -1091,13 +1091,13 @@ static int tc_setup_etf(struct stmmac_priv *priv, ...@@ -1091,13 +1091,13 @@ static int tc_setup_etf(struct stmmac_priv *priv,
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (qopt->queue >= priv->plat->tx_queues_to_use) if (qopt->queue >= priv->plat->tx_queues_to_use)
return -EINVAL; return -EINVAL;
if (!(priv->tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL)) if (!(priv->dma_conf.tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
return -EINVAL; return -EINVAL;
if (qopt->enable) if (qopt->enable)
priv->tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN; priv->dma_conf.tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
else else
priv->tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN; priv->dma_conf.tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
netdev_info(priv->dev, "%s ETF for Queue %d\n", netdev_info(priv->dev, "%s ETF for Queue %d\n",
qopt->enable ? "enabled" : "disabled", qopt->queue); qopt->enable ? "enabled" : "disabled", qopt->queue);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment