Commit 8e4372e6 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'add-mtu-change-with-stmmac-interface-running'

Christian Marangi says:

====================
Add MTU change with stmmac interface running

This series is to permit MTU change while the interface is running.
Major rework are needed to permit to allocate a new dma conf based on
the new MTU before applying it. This is to make sure there is enough
space to allocate all the DMA queue before releasing the stmmac driver.

This was tested with a simple way to stress the network while the
interface is running.

2 ssh connection to the device:
- One generating simple traffic with while true; do free; done
- The other making the mtu change with a delay of 1 second

The connection is correctly stopped and recovered after the MTU is changed.

The first 2 patch of this series are minor fixup that fix problems
presented while testing this. One fix a problem when we renable a queue
while we are generating a new dma conf. The other is a corner case that
was notice while stressing the driver and turning down the interface while
there was some traffic.

(this is a follow-up of a simpler patch that wanted to add the same
feature. It was suggested to first try to check if it was possible to
apply the new configuration. Posting as RFC as it does major rework for
the new concept of DMA conf)
====================

Link: https://lore.kernel.org/r/20220723142933.16030-1-ansuelsmth@gmail.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents aa246499 34700796
...@@ -46,7 +46,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum) ...@@ -46,7 +46,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
while (len != 0) { while (len != 0) {
tx_q->tx_skbuff[entry] = NULL; tx_q->tx_skbuff[entry] = NULL;
entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
desc = tx_q->dma_tx + entry; desc = tx_q->dma_tx + entry;
if (len > bmax) { if (len > bmax) {
...@@ -137,7 +137,7 @@ static void refill_desc3(void *priv_ptr, struct dma_desc *p) ...@@ -137,7 +137,7 @@ static void refill_desc3(void *priv_ptr, struct dma_desc *p)
*/ */
p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy + p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy +
(((rx_q->dirty_rx) + 1) % (((rx_q->dirty_rx) + 1) %
priv->dma_rx_size) * priv->dma_conf.dma_rx_size) *
sizeof(struct dma_desc))); sizeof(struct dma_desc)));
} }
...@@ -155,7 +155,7 @@ static void clean_desc3(void *priv_ptr, struct dma_desc *p) ...@@ -155,7 +155,7 @@ static void clean_desc3(void *priv_ptr, struct dma_desc *p)
*/ */
p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy + p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy +
((tx_q->dirty_tx + 1) % ((tx_q->dirty_tx + 1) %
priv->dma_tx_size)) priv->dma_conf.dma_tx_size))
* sizeof(struct dma_desc))); * sizeof(struct dma_desc)));
} }
......
...@@ -51,7 +51,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum) ...@@ -51,7 +51,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
STMMAC_RING_MODE, 0, false, skb->len); STMMAC_RING_MODE, 0, false, skb->len);
tx_q->tx_skbuff[entry] = NULL; tx_q->tx_skbuff[entry] = NULL;
entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
if (priv->extend_desc) if (priv->extend_desc)
desc = (struct dma_desc *)(tx_q->dma_etx + entry); desc = (struct dma_desc *)(tx_q->dma_etx + entry);
...@@ -107,7 +107,7 @@ static void refill_desc3(void *priv_ptr, struct dma_desc *p) ...@@ -107,7 +107,7 @@ static void refill_desc3(void *priv_ptr, struct dma_desc *p)
struct stmmac_priv *priv = rx_q->priv_data; struct stmmac_priv *priv = rx_q->priv_data;
/* Fill DES3 in case of RING mode */ /* Fill DES3 in case of RING mode */
if (priv->dma_buf_sz == BUF_SIZE_16KiB) if (priv->dma_conf.dma_buf_sz == BUF_SIZE_16KiB)
p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB); p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
} }
......
...@@ -188,6 +188,18 @@ struct stmmac_rfs_entry { ...@@ -188,6 +188,18 @@ struct stmmac_rfs_entry {
int tc; int tc;
}; };
struct stmmac_dma_conf {
unsigned int dma_buf_sz;
/* RX Queue */
struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
unsigned int dma_rx_size;
/* TX Queue */
struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
unsigned int dma_tx_size;
};
struct stmmac_priv { struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */ /* Frequently used values are kept adjacent for cache effect */
u32 tx_coal_frames[MTL_MAX_TX_QUEUES]; u32 tx_coal_frames[MTL_MAX_TX_QUEUES];
...@@ -201,7 +213,6 @@ struct stmmac_priv { ...@@ -201,7 +213,6 @@ struct stmmac_priv {
int sph_cap; int sph_cap;
u32 sarc_type; u32 sarc_type;
unsigned int dma_buf_sz;
unsigned int rx_copybreak; unsigned int rx_copybreak;
u32 rx_riwt[MTL_MAX_TX_QUEUES]; u32 rx_riwt[MTL_MAX_TX_QUEUES];
int hwts_rx_en; int hwts_rx_en;
...@@ -213,13 +224,7 @@ struct stmmac_priv { ...@@ -213,13 +224,7 @@ struct stmmac_priv {
int (*hwif_quirks)(struct stmmac_priv *priv); int (*hwif_quirks)(struct stmmac_priv *priv);
struct mutex lock; struct mutex lock;
/* RX Queue */ struct stmmac_dma_conf dma_conf;
struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
unsigned int dma_rx_size;
/* TX Queue */
struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
unsigned int dma_tx_size;
/* Generic channel for NAPI */ /* Generic channel for NAPI */
struct stmmac_channel channel[STMMAC_CH_MAX]; struct stmmac_channel channel[STMMAC_CH_MAX];
......
...@@ -485,8 +485,8 @@ static void stmmac_get_ringparam(struct net_device *netdev, ...@@ -485,8 +485,8 @@ static void stmmac_get_ringparam(struct net_device *netdev,
ring->rx_max_pending = DMA_MAX_RX_SIZE; ring->rx_max_pending = DMA_MAX_RX_SIZE;
ring->tx_max_pending = DMA_MAX_TX_SIZE; ring->tx_max_pending = DMA_MAX_TX_SIZE;
ring->rx_pending = priv->dma_rx_size; ring->rx_pending = priv->dma_conf.dma_rx_size;
ring->tx_pending = priv->dma_tx_size; ring->tx_pending = priv->dma_conf.dma_tx_size;
} }
static int stmmac_set_ringparam(struct net_device *netdev, static int stmmac_set_ringparam(struct net_device *netdev,
......
...@@ -74,8 +74,8 @@ static int phyaddr = -1; ...@@ -74,8 +74,8 @@ static int phyaddr = -1;
module_param(phyaddr, int, 0444); module_param(phyaddr, int, 0444);
MODULE_PARM_DESC(phyaddr, "Physical device address"); MODULE_PARM_DESC(phyaddr, "Physical device address");
#define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4) #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
#define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4) #define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
/* Limit to make sure XDP TX and slow path can coexist */ /* Limit to make sure XDP TX and slow path can coexist */
#define STMMAC_XSK_TX_BUDGET_MAX 256 #define STMMAC_XSK_TX_BUDGET_MAX 256
...@@ -130,6 +130,9 @@ static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id); ...@@ -130,6 +130,9 @@ static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id); static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
static irqreturn_t stmmac_msi_intr_tx(int irq, void *data); static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
static irqreturn_t stmmac_msi_intr_rx(int irq, void *data); static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
static void stmmac_reset_queues_param(struct stmmac_priv *priv);
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue); static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue); static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
...@@ -231,7 +234,7 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv) ...@@ -231,7 +234,7 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv)
/* synchronize_rcu() needed for pending XDP buffers to drain */ /* synchronize_rcu() needed for pending XDP buffers to drain */
for (queue = 0; queue < rx_queues_cnt; queue++) { for (queue = 0; queue < rx_queues_cnt; queue++) {
rx_q = &priv->rx_queue[queue]; rx_q = &priv->dma_conf.rx_queue[queue];
if (rx_q->xsk_pool) { if (rx_q->xsk_pool) {
synchronize_rcu(); synchronize_rcu();
break; break;
...@@ -357,13 +360,13 @@ static void print_pkt(unsigned char *buf, int len) ...@@ -357,13 +360,13 @@ static void print_pkt(unsigned char *buf, int len)
static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
{ {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
u32 avail; u32 avail;
if (tx_q->dirty_tx > tx_q->cur_tx) if (tx_q->dirty_tx > tx_q->cur_tx)
avail = tx_q->dirty_tx - tx_q->cur_tx - 1; avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
else else
avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
return avail; return avail;
} }
...@@ -375,13 +378,13 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) ...@@ -375,13 +378,13 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
*/ */
static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
{ {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
u32 dirty; u32 dirty;
if (rx_q->dirty_rx <= rx_q->cur_rx) if (rx_q->dirty_rx <= rx_q->cur_rx)
dirty = rx_q->cur_rx - rx_q->dirty_rx; dirty = rx_q->cur_rx - rx_q->dirty_rx;
else else
dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
return dirty; return dirty;
} }
...@@ -409,7 +412,7 @@ static int stmmac_enable_eee_mode(struct stmmac_priv *priv) ...@@ -409,7 +412,7 @@ static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
/* check if all TX queues have the work finished */ /* check if all TX queues have the work finished */
for (queue = 0; queue < tx_cnt; queue++) { for (queue = 0; queue < tx_cnt; queue++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
if (tx_q->dirty_tx != tx_q->cur_tx) if (tx_q->dirty_tx != tx_q->cur_tx)
return -EBUSY; /* still unfinished work */ return -EBUSY; /* still unfinished work */
...@@ -1220,7 +1223,8 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) ...@@ -1220,7 +1223,8 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
return 0; return 0;
} }
static void stmmac_display_rx_rings(struct stmmac_priv *priv) static void stmmac_display_rx_rings(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf)
{ {
u32 rx_cnt = priv->plat->rx_queues_to_use; u32 rx_cnt = priv->plat->rx_queues_to_use;
unsigned int desc_size; unsigned int desc_size;
...@@ -1229,7 +1233,7 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv) ...@@ -1229,7 +1233,7 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv)
/* Display RX rings */ /* Display RX rings */
for (queue = 0; queue < rx_cnt; queue++) { for (queue = 0; queue < rx_cnt; queue++) {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
pr_info("\tRX Queue %u rings\n", queue); pr_info("\tRX Queue %u rings\n", queue);
...@@ -1242,12 +1246,13 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv) ...@@ -1242,12 +1246,13 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv)
} }
/* Display RX ring */ /* Display RX ring */
stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true, stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
rx_q->dma_rx_phy, desc_size); rx_q->dma_rx_phy, desc_size);
} }
} }
static void stmmac_display_tx_rings(struct stmmac_priv *priv) static void stmmac_display_tx_rings(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf)
{ {
u32 tx_cnt = priv->plat->tx_queues_to_use; u32 tx_cnt = priv->plat->tx_queues_to_use;
unsigned int desc_size; unsigned int desc_size;
...@@ -1256,7 +1261,7 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv) ...@@ -1256,7 +1261,7 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
/* Display TX rings */ /* Display TX rings */
for (queue = 0; queue < tx_cnt; queue++) { for (queue = 0; queue < tx_cnt; queue++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
pr_info("\tTX Queue %d rings\n", queue); pr_info("\tTX Queue %d rings\n", queue);
...@@ -1271,18 +1276,19 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv) ...@@ -1271,18 +1276,19 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
desc_size = sizeof(struct dma_desc); desc_size = sizeof(struct dma_desc);
} }
stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false, stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
tx_q->dma_tx_phy, desc_size); tx_q->dma_tx_phy, desc_size);
} }
} }
static void stmmac_display_rings(struct stmmac_priv *priv) static void stmmac_display_rings(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf)
{ {
/* Display RX ring */ /* Display RX ring */
stmmac_display_rx_rings(priv); stmmac_display_rx_rings(priv, dma_conf);
/* Display TX ring */ /* Display TX ring */
stmmac_display_tx_rings(priv); stmmac_display_tx_rings(priv, dma_conf);
} }
static int stmmac_set_bfsize(int mtu, int bufsize) static int stmmac_set_bfsize(int mtu, int bufsize)
...@@ -1306,44 +1312,50 @@ static int stmmac_set_bfsize(int mtu, int bufsize) ...@@ -1306,44 +1312,50 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
/** /**
* stmmac_clear_rx_descriptors - clear RX descriptors * stmmac_clear_rx_descriptors - clear RX descriptors
* @priv: driver private structure * @priv: driver private structure
* @dma_conf: structure to take the dma data
* @queue: RX queue index * @queue: RX queue index
* Description: this function is called to clear the RX descriptors * Description: this function is called to clear the RX descriptors
* in case of both basic and extended descriptors are used. * in case of both basic and extended descriptors are used.
*/ */
static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
u32 queue)
{ {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i; int i;
/* Clear the RX descriptors */ /* Clear the RX descriptors */
for (i = 0; i < priv->dma_rx_size; i++) for (i = 0; i < dma_conf->dma_rx_size; i++)
if (priv->extend_desc) if (priv->extend_desc)
stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
priv->use_riwt, priv->mode, priv->use_riwt, priv->mode,
(i == priv->dma_rx_size - 1), (i == dma_conf->dma_rx_size - 1),
priv->dma_buf_sz); dma_conf->dma_buf_sz);
else else
stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
priv->use_riwt, priv->mode, priv->use_riwt, priv->mode,
(i == priv->dma_rx_size - 1), (i == dma_conf->dma_rx_size - 1),
priv->dma_buf_sz); dma_conf->dma_buf_sz);
} }
/** /**
* stmmac_clear_tx_descriptors - clear tx descriptors * stmmac_clear_tx_descriptors - clear tx descriptors
* @priv: driver private structure * @priv: driver private structure
* @dma_conf: structure to take the dma data
* @queue: TX queue index. * @queue: TX queue index.
* Description: this function is called to clear the TX descriptors * Description: this function is called to clear the TX descriptors
* in case of both basic and extended descriptors are used. * in case of both basic and extended descriptors are used.
*/ */
static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
u32 queue)
{ {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
int i; int i;
/* Clear the TX descriptors */ /* Clear the TX descriptors */
for (i = 0; i < priv->dma_tx_size; i++) { for (i = 0; i < dma_conf->dma_tx_size; i++) {
int last = (i == (priv->dma_tx_size - 1)); int last = (i == (dma_conf->dma_tx_size - 1));
struct dma_desc *p; struct dma_desc *p;
if (priv->extend_desc) if (priv->extend_desc)
...@@ -1360,10 +1372,12 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) ...@@ -1360,10 +1372,12 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
/** /**
* stmmac_clear_descriptors - clear descriptors * stmmac_clear_descriptors - clear descriptors
* @priv: driver private structure * @priv: driver private structure
* @dma_conf: structure to take the dma data
* Description: this function is called to clear the TX and RX descriptors * Description: this function is called to clear the TX and RX descriptors
* in case of both basic and extended descriptors are used. * in case of both basic and extended descriptors are used.
*/ */
static void stmmac_clear_descriptors(struct stmmac_priv *priv) static void stmmac_clear_descriptors(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf)
{ {
u32 rx_queue_cnt = priv->plat->rx_queues_to_use; u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
u32 tx_queue_cnt = priv->plat->tx_queues_to_use; u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
...@@ -1371,16 +1385,17 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv) ...@@ -1371,16 +1385,17 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv)
/* Clear the RX descriptors */ /* Clear the RX descriptors */
for (queue = 0; queue < rx_queue_cnt; queue++) for (queue = 0; queue < rx_queue_cnt; queue++)
stmmac_clear_rx_descriptors(priv, queue); stmmac_clear_rx_descriptors(priv, dma_conf, queue);
/* Clear the TX descriptors */ /* Clear the TX descriptors */
for (queue = 0; queue < tx_queue_cnt; queue++) for (queue = 0; queue < tx_queue_cnt; queue++)
stmmac_clear_tx_descriptors(priv, queue); stmmac_clear_tx_descriptors(priv, dma_conf, queue);
} }
/** /**
* stmmac_init_rx_buffers - init the RX descriptor buffer. * stmmac_init_rx_buffers - init the RX descriptor buffer.
* @priv: driver private structure * @priv: driver private structure
* @dma_conf: structure to take the dma data
* @p: descriptor pointer * @p: descriptor pointer
* @i: descriptor index * @i: descriptor index
* @flags: gfp flag * @flags: gfp flag
...@@ -1388,10 +1403,12 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv) ...@@ -1388,10 +1403,12 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv)
* Description: this function is called to allocate a receive buffer, perform * Description: this function is called to allocate a receive buffer, perform
* the DMA mapping and init the descriptor. * the DMA mapping and init the descriptor.
*/ */
static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
struct dma_desc *p,
int i, gfp_t flags, u32 queue) int i, gfp_t flags, u32 queue)
{ {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
...@@ -1420,7 +1437,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, ...@@ -1420,7 +1437,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
stmmac_set_desc_addr(priv, p, buf->addr); stmmac_set_desc_addr(priv, p, buf->addr);
if (priv->dma_buf_sz == BUF_SIZE_16KiB) if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
stmmac_init_desc3(priv, p); stmmac_init_desc3(priv, p);
return 0; return 0;
...@@ -1429,12 +1446,13 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, ...@@ -1429,12 +1446,13 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
/** /**
* stmmac_free_rx_buffer - free RX dma buffers * stmmac_free_rx_buffer - free RX dma buffers
* @priv: private structure * @priv: private structure
* @queue: RX queue index * @rx_q: RX queue
* @i: buffer index. * @i: buffer index.
*/ */
static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i) static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
struct stmmac_rx_queue *rx_q,
int i)
{ {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
if (buf->page) if (buf->page)
...@@ -1449,12 +1467,15 @@ static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i) ...@@ -1449,12 +1467,15 @@ static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
/** /**
* stmmac_free_tx_buffer - free RX dma buffers * stmmac_free_tx_buffer - free RX dma buffers
* @priv: private structure * @priv: private structure
* @dma_conf: structure to take the dma data
* @queue: RX queue index * @queue: RX queue index
* @i: buffer index. * @i: buffer index.
*/ */
static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
u32 queue, int i)
{ {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
if (tx_q->tx_skbuff_dma[i].buf && if (tx_q->tx_skbuff_dma[i].buf &&
tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) { tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
...@@ -1493,23 +1514,28 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) ...@@ -1493,23 +1514,28 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
/** /**
* dma_free_rx_skbufs - free RX dma buffers * dma_free_rx_skbufs - free RX dma buffers
* @priv: private structure * @priv: private structure
* @dma_conf: structure to take the dma data
* @queue: RX queue index * @queue: RX queue index
*/ */
static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) static void dma_free_rx_skbufs(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
u32 queue)
{ {
struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i; int i;
for (i = 0; i < priv->dma_rx_size; i++) for (i = 0; i < dma_conf->dma_rx_size; i++)
stmmac_free_rx_buffer(priv, queue, i); stmmac_free_rx_buffer(priv, rx_q, i);
} }
static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue, static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
gfp_t flags) struct stmmac_dma_conf *dma_conf,
u32 queue, gfp_t flags)
{ {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i; int i;
for (i = 0; i < priv->dma_rx_size; i++) { for (i = 0; i < dma_conf->dma_rx_size; i++) {
struct dma_desc *p; struct dma_desc *p;
int ret; int ret;
...@@ -1518,7 +1544,7 @@ static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue, ...@@ -1518,7 +1544,7 @@ static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
else else
p = rx_q->dma_rx + i; p = rx_q->dma_rx + i;
ret = stmmac_init_rx_buffers(priv, p, i, flags, ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
queue); queue);
if (ret) if (ret)
return ret; return ret;
...@@ -1532,14 +1558,17 @@ static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue, ...@@ -1532,14 +1558,17 @@ static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
/** /**
* dma_free_rx_xskbufs - free RX dma buffers from XSK pool * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
* @priv: private structure * @priv: private structure
* @dma_conf: structure to take the dma data
* @queue: RX queue index * @queue: RX queue index
*/ */
static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue) static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
u32 queue)
{ {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i; int i;
for (i = 0; i < priv->dma_rx_size; i++) { for (i = 0; i < dma_conf->dma_rx_size; i++) {
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
if (!buf->xdp) if (!buf->xdp)
...@@ -1550,12 +1579,14 @@ static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue) ...@@ -1550,12 +1579,14 @@ static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
} }
} }
static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue) static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
u32 queue)
{ {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i; int i;
for (i = 0; i < priv->dma_rx_size; i++) { for (i = 0; i < dma_conf->dma_rx_size; i++) {
struct stmmac_rx_buffer *buf; struct stmmac_rx_buffer *buf;
dma_addr_t dma_addr; dma_addr_t dma_addr;
struct dma_desc *p; struct dma_desc *p;
...@@ -1590,22 +1621,25 @@ static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 q ...@@ -1590,22 +1621,25 @@ static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 q
/** /**
* __init_dma_rx_desc_rings - init the RX descriptor ring (per queue) * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
* @priv: driver private structure * @priv: driver private structure
* @dma_conf: structure to take the dma data
* @queue: RX queue index * @queue: RX queue index
* @flags: gfp flag. * @flags: gfp flag.
* Description: this function initializes the DMA RX descriptors * Description: this function initializes the DMA RX descriptors
* and allocates the socket buffers. It supports the chained and ring * and allocates the socket buffers. It supports the chained and ring
* modes. * modes.
*/ */
static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags) static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
u32 queue, gfp_t flags)
{ {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int ret; int ret;
netif_dbg(priv, probe, priv->dev, netif_dbg(priv, probe, priv->dev,
"(%s) dma_rx_phy=0x%08x\n", __func__, "(%s) dma_rx_phy=0x%08x\n", __func__,
(u32)rx_q->dma_rx_phy); (u32)rx_q->dma_rx_phy);
stmmac_clear_rx_descriptors(priv, queue); stmmac_clear_rx_descriptors(priv, dma_conf, queue);
xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq); xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
...@@ -1632,32 +1666,31 @@ static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t f ...@@ -1632,32 +1666,31 @@ static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t f
/* RX XDP ZC buffer pool may not be populated, e.g. /* RX XDP ZC buffer pool may not be populated, e.g.
* xdpsock TX-only. * xdpsock TX-only.
*/ */
stmmac_alloc_rx_buffers_zc(priv, queue); stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
} else { } else {
ret = stmmac_alloc_rx_buffers(priv, queue, flags); ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
if (ret < 0) if (ret < 0)
return -ENOMEM; return -ENOMEM;
} }
rx_q->cur_rx = 0;
rx_q->dirty_rx = 0;
/* Setup the chained descriptor addresses */ /* Setup the chained descriptor addresses */
if (priv->mode == STMMAC_CHAIN_MODE) { if (priv->mode == STMMAC_CHAIN_MODE) {
if (priv->extend_desc) if (priv->extend_desc)
stmmac_mode_init(priv, rx_q->dma_erx, stmmac_mode_init(priv, rx_q->dma_erx,
rx_q->dma_rx_phy, rx_q->dma_rx_phy,
priv->dma_rx_size, 1); dma_conf->dma_rx_size, 1);
else else
stmmac_mode_init(priv, rx_q->dma_rx, stmmac_mode_init(priv, rx_q->dma_rx,
rx_q->dma_rx_phy, rx_q->dma_rx_phy,
priv->dma_rx_size, 0); dma_conf->dma_rx_size, 0);
} }
return 0; return 0;
} }
static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) static int init_dma_rx_desc_rings(struct net_device *dev,
struct stmmac_dma_conf *dma_conf,
gfp_t flags)
{ {
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_count = priv->plat->rx_queues_to_use; u32 rx_count = priv->plat->rx_queues_to_use;
...@@ -1669,7 +1702,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) ...@@ -1669,7 +1702,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
"SKB addresses:\nskb\t\tskb data\tdma data\n"); "SKB addresses:\nskb\t\tskb data\tdma data\n");
for (queue = 0; queue < rx_count; queue++) { for (queue = 0; queue < rx_count; queue++) {
ret = __init_dma_rx_desc_rings(priv, queue, flags); ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
if (ret) if (ret)
goto err_init_rx_buffers; goto err_init_rx_buffers;
} }
...@@ -1678,12 +1711,12 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) ...@@ -1678,12 +1711,12 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
err_init_rx_buffers: err_init_rx_buffers:
while (queue >= 0) { while (queue >= 0) {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
if (rx_q->xsk_pool) if (rx_q->xsk_pool)
dma_free_rx_xskbufs(priv, queue); dma_free_rx_xskbufs(priv, dma_conf, queue);
else else
dma_free_rx_skbufs(priv, queue); dma_free_rx_skbufs(priv, dma_conf, queue);
rx_q->buf_alloc_num = 0; rx_q->buf_alloc_num = 0;
rx_q->xsk_pool = NULL; rx_q->xsk_pool = NULL;
...@@ -1697,14 +1730,17 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) ...@@ -1697,14 +1730,17 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
/** /**
* __init_dma_tx_desc_rings - init the TX descriptor ring (per queue) * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
* @priv: driver private structure * @priv: driver private structure
* @queue : TX queue index * @dma_conf: structure to take the dma data
* @queue: TX queue index
* Description: this function initializes the DMA TX descriptors * Description: this function initializes the DMA TX descriptors
* and allocates the socket buffers. It supports the chained and ring * and allocates the socket buffers. It supports the chained and ring
* modes. * modes.
*/ */
static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue) static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
u32 queue)
{ {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
int i; int i;
netif_dbg(priv, probe, priv->dev, netif_dbg(priv, probe, priv->dev,
...@@ -1716,16 +1752,16 @@ static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue) ...@@ -1716,16 +1752,16 @@ static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
if (priv->extend_desc) if (priv->extend_desc)
stmmac_mode_init(priv, tx_q->dma_etx, stmmac_mode_init(priv, tx_q->dma_etx,
tx_q->dma_tx_phy, tx_q->dma_tx_phy,
priv->dma_tx_size, 1); dma_conf->dma_tx_size, 1);
else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
stmmac_mode_init(priv, tx_q->dma_tx, stmmac_mode_init(priv, tx_q->dma_tx,
tx_q->dma_tx_phy, tx_q->dma_tx_phy,
priv->dma_tx_size, 0); dma_conf->dma_tx_size, 0);
} }
tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
for (i = 0; i < priv->dma_tx_size; i++) { for (i = 0; i < dma_conf->dma_tx_size; i++) {
struct dma_desc *p; struct dma_desc *p;
if (priv->extend_desc) if (priv->extend_desc)
...@@ -1744,16 +1780,11 @@ static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue) ...@@ -1744,16 +1780,11 @@ static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
tx_q->tx_skbuff[i] = NULL; tx_q->tx_skbuff[i] = NULL;
} }
tx_q->dirty_tx = 0;
tx_q->cur_tx = 0;
tx_q->mss = 0;
netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
return 0; return 0;
} }
static int init_dma_tx_desc_rings(struct net_device *dev) static int init_dma_tx_desc_rings(struct net_device *dev,
struct stmmac_dma_conf *dma_conf)
{ {
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
u32 tx_queue_cnt; u32 tx_queue_cnt;
...@@ -1762,7 +1793,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev) ...@@ -1762,7 +1793,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
tx_queue_cnt = priv->plat->tx_queues_to_use; tx_queue_cnt = priv->plat->tx_queues_to_use;
for (queue = 0; queue < tx_queue_cnt; queue++) for (queue = 0; queue < tx_queue_cnt; queue++)
__init_dma_tx_desc_rings(priv, queue); __init_dma_tx_desc_rings(priv, dma_conf, queue);
return 0; return 0;
} }
...@@ -1770,26 +1801,29 @@ static int init_dma_tx_desc_rings(struct net_device *dev) ...@@ -1770,26 +1801,29 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
/** /**
* init_dma_desc_rings - init the RX/TX descriptor rings * init_dma_desc_rings - init the RX/TX descriptor rings
* @dev: net device structure * @dev: net device structure
* @dma_conf: structure to take the dma data
* @flags: gfp flag. * @flags: gfp flag.
* Description: this function initializes the DMA RX/TX descriptors * Description: this function initializes the DMA RX/TX descriptors
* and allocates the socket buffers. It supports the chained and ring * and allocates the socket buffers. It supports the chained and ring
* modes. * modes.
*/ */
static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) static int init_dma_desc_rings(struct net_device *dev,
struct stmmac_dma_conf *dma_conf,
gfp_t flags)
{ {
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
int ret; int ret;
ret = init_dma_rx_desc_rings(dev, flags); ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
if (ret) if (ret)
return ret; return ret;
ret = init_dma_tx_desc_rings(dev); ret = init_dma_tx_desc_rings(dev, dma_conf);
stmmac_clear_descriptors(priv); stmmac_clear_descriptors(priv, dma_conf);
if (netif_msg_hw(priv)) if (netif_msg_hw(priv))
stmmac_display_rings(priv); stmmac_display_rings(priv, dma_conf);
return ret; return ret;
} }
...@@ -1797,17 +1831,20 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) ...@@ -1797,17 +1831,20 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
/** /**
* dma_free_tx_skbufs - free TX dma buffers * dma_free_tx_skbufs - free TX dma buffers
* @priv: private structure * @priv: private structure
* @dma_conf: structure to take the dma data
* @queue: TX queue index * @queue: TX queue index
*/ */
static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) static void dma_free_tx_skbufs(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
u32 queue)
{ {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
int i; int i;
tx_q->xsk_frames_done = 0; tx_q->xsk_frames_done = 0;
for (i = 0; i < priv->dma_tx_size; i++) for (i = 0; i < dma_conf->dma_tx_size; i++)
stmmac_free_tx_buffer(priv, queue, i); stmmac_free_tx_buffer(priv, dma_conf, queue, i);
if (tx_q->xsk_pool && tx_q->xsk_frames_done) { if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
...@@ -1826,34 +1863,37 @@ static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) ...@@ -1826,34 +1863,37 @@ static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
u32 queue; u32 queue;
for (queue = 0; queue < tx_queue_cnt; queue++) for (queue = 0; queue < tx_queue_cnt; queue++)
dma_free_tx_skbufs(priv, queue); dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
} }
/** /**
* __free_dma_rx_desc_resources - free RX dma desc resources (per queue) * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
* @priv: private structure * @priv: private structure
* @dma_conf: structure to take the dma data
* @queue: RX queue index * @queue: RX queue index
*/ */
static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
u32 queue)
{ {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
/* Release the DMA RX socket buffers */ /* Release the DMA RX socket buffers */
if (rx_q->xsk_pool) if (rx_q->xsk_pool)
dma_free_rx_xskbufs(priv, queue); dma_free_rx_xskbufs(priv, dma_conf, queue);
else else
dma_free_rx_skbufs(priv, queue); dma_free_rx_skbufs(priv, dma_conf, queue);
rx_q->buf_alloc_num = 0; rx_q->buf_alloc_num = 0;
rx_q->xsk_pool = NULL; rx_q->xsk_pool = NULL;
/* Free DMA regions of consistent memory previously allocated */ /* Free DMA regions of consistent memory previously allocated */
if (!priv->extend_desc) if (!priv->extend_desc)
dma_free_coherent(priv->device, priv->dma_rx_size * dma_free_coherent(priv->device, dma_conf->dma_rx_size *
sizeof(struct dma_desc), sizeof(struct dma_desc),
rx_q->dma_rx, rx_q->dma_rx_phy); rx_q->dma_rx, rx_q->dma_rx_phy);
else else
dma_free_coherent(priv->device, priv->dma_rx_size * dma_free_coherent(priv->device, dma_conf->dma_rx_size *
sizeof(struct dma_extended_desc), sizeof(struct dma_extended_desc),
rx_q->dma_erx, rx_q->dma_rx_phy); rx_q->dma_erx, rx_q->dma_rx_phy);
...@@ -1865,29 +1905,33 @@ static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) ...@@ -1865,29 +1905,33 @@ static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
page_pool_destroy(rx_q->page_pool); page_pool_destroy(rx_q->page_pool);
} }
static void free_dma_rx_desc_resources(struct stmmac_priv *priv) static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf)
{ {
u32 rx_count = priv->plat->rx_queues_to_use; u32 rx_count = priv->plat->rx_queues_to_use;
u32 queue; u32 queue;
/* Free RX queue resources */ /* Free RX queue resources */
for (queue = 0; queue < rx_count; queue++) for (queue = 0; queue < rx_count; queue++)
__free_dma_rx_desc_resources(priv, queue); __free_dma_rx_desc_resources(priv, dma_conf, queue);
} }
/** /**
* __free_dma_tx_desc_resources - free TX dma desc resources (per queue) * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
* @priv: private structure * @priv: private structure
* @dma_conf: structure to take the dma data
* @queue: TX queue index * @queue: TX queue index
*/ */
static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
u32 queue)
{ {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
size_t size; size_t size;
void *addr; void *addr;
/* Release the DMA TX socket buffers */ /* Release the DMA TX socket buffers */
dma_free_tx_skbufs(priv, queue); dma_free_tx_skbufs(priv, dma_conf, queue);
if (priv->extend_desc) { if (priv->extend_desc) {
size = sizeof(struct dma_extended_desc); size = sizeof(struct dma_extended_desc);
...@@ -1900,7 +1944,7 @@ static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) ...@@ -1900,7 +1944,7 @@ static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
addr = tx_q->dma_tx; addr = tx_q->dma_tx;
} }
size *= priv->dma_tx_size; size *= dma_conf->dma_tx_size;
dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
...@@ -1908,28 +1952,32 @@ static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) ...@@ -1908,28 +1952,32 @@ static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
kfree(tx_q->tx_skbuff); kfree(tx_q->tx_skbuff);
} }
static void free_dma_tx_desc_resources(struct stmmac_priv *priv) static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf)
{ {
u32 tx_count = priv->plat->tx_queues_to_use; u32 tx_count = priv->plat->tx_queues_to_use;
u32 queue; u32 queue;
/* Free TX queue resources */ /* Free TX queue resources */
for (queue = 0; queue < tx_count; queue++) for (queue = 0; queue < tx_count; queue++)
__free_dma_tx_desc_resources(priv, queue); __free_dma_tx_desc_resources(priv, dma_conf, queue);
} }
/** /**
* __alloc_dma_rx_desc_resources - alloc RX resources (per queue). * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
* @priv: private structure * @priv: private structure
* @dma_conf: structure to take the dma data
* @queue: RX queue index * @queue: RX queue index
* Description: according to which descriptor can be used (extend or basic) * Description: according to which descriptor can be used (extend or basic)
* this function allocates the resources for TX and RX paths. In case of * this function allocates the resources for TX and RX paths. In case of
* reception, for example, it pre-allocated the RX socket buffer in order to * reception, for example, it pre-allocated the RX socket buffer in order to
* allow zero-copy mechanism. * allow zero-copy mechanism.
*/ */
static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
u32 queue)
{ {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue]; struct stmmac_channel *ch = &priv->channel[queue];
bool xdp_prog = stmmac_xdp_is_enabled(priv); bool xdp_prog = stmmac_xdp_is_enabled(priv);
struct page_pool_params pp_params = { 0 }; struct page_pool_params pp_params = { 0 };
...@@ -1941,8 +1989,8 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) ...@@ -1941,8 +1989,8 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
rx_q->priv_data = priv; rx_q->priv_data = priv;
pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pp_params.pool_size = priv->dma_rx_size; pp_params.pool_size = dma_conf->dma_rx_size;
num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE); num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
pp_params.order = ilog2(num_pages); pp_params.order = ilog2(num_pages);
pp_params.nid = dev_to_node(priv->device); pp_params.nid = dev_to_node(priv->device);
pp_params.dev = priv->device; pp_params.dev = priv->device;
...@@ -1957,7 +2005,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) ...@@ -1957,7 +2005,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
return ret; return ret;
} }
rx_q->buf_pool = kcalloc(priv->dma_rx_size, rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
sizeof(*rx_q->buf_pool), sizeof(*rx_q->buf_pool),
GFP_KERNEL); GFP_KERNEL);
if (!rx_q->buf_pool) if (!rx_q->buf_pool)
...@@ -1965,7 +2013,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) ...@@ -1965,7 +2013,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
if (priv->extend_desc) { if (priv->extend_desc) {
rx_q->dma_erx = dma_alloc_coherent(priv->device, rx_q->dma_erx = dma_alloc_coherent(priv->device,
priv->dma_rx_size * dma_conf->dma_rx_size *
sizeof(struct dma_extended_desc), sizeof(struct dma_extended_desc),
&rx_q->dma_rx_phy, &rx_q->dma_rx_phy,
GFP_KERNEL); GFP_KERNEL);
...@@ -1974,7 +2022,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) ...@@ -1974,7 +2022,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
} else { } else {
rx_q->dma_rx = dma_alloc_coherent(priv->device, rx_q->dma_rx = dma_alloc_coherent(priv->device,
priv->dma_rx_size * dma_conf->dma_rx_size *
sizeof(struct dma_desc), sizeof(struct dma_desc),
&rx_q->dma_rx_phy, &rx_q->dma_rx_phy,
GFP_KERNEL); GFP_KERNEL);
...@@ -1999,7 +2047,8 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) ...@@ -1999,7 +2047,8 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
return 0; return 0;
} }
static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf)
{ {
u32 rx_count = priv->plat->rx_queues_to_use; u32 rx_count = priv->plat->rx_queues_to_use;
u32 queue; u32 queue;
...@@ -2007,7 +2056,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) ...@@ -2007,7 +2056,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
/* RX queues buffers and DMA */ /* RX queues buffers and DMA */
for (queue = 0; queue < rx_count; queue++) { for (queue = 0; queue < rx_count; queue++) {
ret = __alloc_dma_rx_desc_resources(priv, queue); ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
if (ret) if (ret)
goto err_dma; goto err_dma;
} }
...@@ -2015,7 +2064,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) ...@@ -2015,7 +2064,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
return 0; return 0;
err_dma: err_dma:
free_dma_rx_desc_resources(priv); free_dma_rx_desc_resources(priv, dma_conf);
return ret; return ret;
} }
...@@ -2023,28 +2072,31 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) ...@@ -2023,28 +2072,31 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
/** /**
* __alloc_dma_tx_desc_resources - alloc TX resources (per queue). * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
* @priv: private structure * @priv: private structure
* @dma_conf: structure to take the dma data
* @queue: TX queue index * @queue: TX queue index
* Description: according to which descriptor can be used (extend or basic) * Description: according to which descriptor can be used (extend or basic)
* this function allocates the resources for TX and RX paths. In case of * this function allocates the resources for TX and RX paths. In case of
* reception, for example, it pre-allocated the RX socket buffer in order to * reception, for example, it pre-allocated the RX socket buffer in order to
* allow zero-copy mechanism. * allow zero-copy mechanism.
*/ */
static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
u32 queue)
{ {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
size_t size; size_t size;
void *addr; void *addr;
tx_q->queue_index = queue; tx_q->queue_index = queue;
tx_q->priv_data = priv; tx_q->priv_data = priv;
tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size, tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
sizeof(*tx_q->tx_skbuff_dma), sizeof(*tx_q->tx_skbuff_dma),
GFP_KERNEL); GFP_KERNEL);
if (!tx_q->tx_skbuff_dma) if (!tx_q->tx_skbuff_dma)
return -ENOMEM; return -ENOMEM;
tx_q->tx_skbuff = kcalloc(priv->dma_tx_size, tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
sizeof(struct sk_buff *), sizeof(struct sk_buff *),
GFP_KERNEL); GFP_KERNEL);
if (!tx_q->tx_skbuff) if (!tx_q->tx_skbuff)
...@@ -2057,7 +2109,7 @@ static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) ...@@ -2057,7 +2109,7 @@ static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
else else
size = sizeof(struct dma_desc); size = sizeof(struct dma_desc);
size *= priv->dma_tx_size; size *= dma_conf->dma_tx_size;
addr = dma_alloc_coherent(priv->device, size, addr = dma_alloc_coherent(priv->device, size,
&tx_q->dma_tx_phy, GFP_KERNEL); &tx_q->dma_tx_phy, GFP_KERNEL);
...@@ -2074,7 +2126,8 @@ static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) ...@@ -2074,7 +2126,8 @@ static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
return 0; return 0;
} }
static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf)
{ {
u32 tx_count = priv->plat->tx_queues_to_use; u32 tx_count = priv->plat->tx_queues_to_use;
u32 queue; u32 queue;
...@@ -2082,7 +2135,7 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) ...@@ -2082,7 +2135,7 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
/* TX queues buffers and DMA */ /* TX queues buffers and DMA */
for (queue = 0; queue < tx_count; queue++) { for (queue = 0; queue < tx_count; queue++) {
ret = __alloc_dma_tx_desc_resources(priv, queue); ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
if (ret) if (ret)
goto err_dma; goto err_dma;
} }
...@@ -2090,27 +2143,29 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) ...@@ -2090,27 +2143,29 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
return 0; return 0;
err_dma: err_dma:
free_dma_tx_desc_resources(priv); free_dma_tx_desc_resources(priv, dma_conf);
return ret; return ret;
} }
/** /**
* alloc_dma_desc_resources - alloc TX/RX resources. * alloc_dma_desc_resources - alloc TX/RX resources.
* @priv: private structure * @priv: private structure
* @dma_conf: structure to take the dma data
* Description: according to which descriptor can be used (extend or basic) * Description: according to which descriptor can be used (extend or basic)
* this function allocates the resources for TX and RX paths. In case of * this function allocates the resources for TX and RX paths. In case of
* reception, for example, it pre-allocated the RX socket buffer in order to * reception, for example, it pre-allocated the RX socket buffer in order to
* allow zero-copy mechanism. * allow zero-copy mechanism.
*/ */
static int alloc_dma_desc_resources(struct stmmac_priv *priv) static int alloc_dma_desc_resources(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf)
{ {
/* RX Allocation */ /* RX Allocation */
int ret = alloc_dma_rx_desc_resources(priv); int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
if (ret) if (ret)
return ret; return ret;
ret = alloc_dma_tx_desc_resources(priv); ret = alloc_dma_tx_desc_resources(priv, dma_conf);
return ret; return ret;
} }
...@@ -2118,16 +2173,18 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv) ...@@ -2118,16 +2173,18 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
/** /**
* free_dma_desc_resources - free dma desc resources * free_dma_desc_resources - free dma desc resources
* @priv: private structure * @priv: private structure
* @dma_conf: structure to take the dma data
*/ */
static void free_dma_desc_resources(struct stmmac_priv *priv) static void free_dma_desc_resources(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf)
{ {
/* Release the DMA TX socket buffers */ /* Release the DMA TX socket buffers */
free_dma_tx_desc_resources(priv); free_dma_tx_desc_resources(priv, dma_conf);
/* Release the DMA RX socket buffers later /* Release the DMA RX socket buffers later
* to ensure all pending XDP_TX buffers are returned. * to ensure all pending XDP_TX buffers are returned.
*/ */
free_dma_rx_desc_resources(priv); free_dma_rx_desc_resources(priv, dma_conf);
} }
/** /**
...@@ -2301,7 +2358,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) ...@@ -2301,7 +2358,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
/* configure all channels */ /* configure all channels */
for (chan = 0; chan < rx_channels_count; chan++) { for (chan = 0; chan < rx_channels_count; chan++) {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
u32 buf_size; u32 buf_size;
qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
...@@ -2316,7 +2373,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) ...@@ -2316,7 +2373,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
chan); chan);
} else { } else {
stmmac_set_dma_bfsize(priv, priv->ioaddr, stmmac_set_dma_bfsize(priv, priv->ioaddr,
priv->dma_buf_sz, priv->dma_conf.dma_buf_sz,
chan); chan);
} }
} }
...@@ -2332,7 +2389,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) ...@@ -2332,7 +2389,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
{ {
struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
struct xsk_buff_pool *pool = tx_q->xsk_pool; struct xsk_buff_pool *pool = tx_q->xsk_pool;
unsigned int entry = tx_q->cur_tx; unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc = NULL; struct dma_desc *tx_desc = NULL;
...@@ -2407,7 +2464,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) ...@@ -2407,7 +2464,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
stmmac_enable_dma_transmission(priv, priv->ioaddr); stmmac_enable_dma_transmission(priv, priv->ioaddr);
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
entry = tx_q->cur_tx; entry = tx_q->cur_tx;
} }
...@@ -2448,7 +2505,7 @@ static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan) ...@@ -2448,7 +2505,7 @@ static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
*/ */
static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
{ {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
unsigned int bytes_compl = 0, pkts_compl = 0; unsigned int bytes_compl = 0, pkts_compl = 0;
unsigned int entry, xmits = 0, count = 0; unsigned int entry, xmits = 0, count = 0;
...@@ -2461,7 +2518,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) ...@@ -2461,7 +2518,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
entry = tx_q->dirty_tx; entry = tx_q->dirty_tx;
/* Try to clean all TX complete frame in 1 shot */ /* Try to clean all TX complete frame in 1 shot */
while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) { while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
struct xdp_frame *xdpf; struct xdp_frame *xdpf;
struct sk_buff *skb; struct sk_buff *skb;
struct dma_desc *p; struct dma_desc *p;
...@@ -2563,7 +2620,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) ...@@ -2563,7 +2620,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
stmmac_release_tx_desc(priv, p, priv->mode); stmmac_release_tx_desc(priv, p, priv->mode);
entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
} }
tx_q->dirty_tx = entry; tx_q->dirty_tx = entry;
...@@ -2628,17 +2685,14 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) ...@@ -2628,17 +2685,14 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
*/ */
static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
{ {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
stmmac_stop_tx_dma(priv, chan); stmmac_stop_tx_dma(priv, chan);
dma_free_tx_skbufs(priv, chan); dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
stmmac_clear_tx_descriptors(priv, chan); stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
tx_q->dirty_tx = 0; stmmac_reset_tx_queue(priv, chan);
tx_q->cur_tx = 0;
tx_q->mss = 0;
netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, chan); tx_q->dma_tx_phy, chan);
stmmac_start_tx_dma(priv, chan); stmmac_start_tx_dma(priv, chan);
...@@ -2698,8 +2752,8 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir) ...@@ -2698,8 +2752,8 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
{ {
int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
&priv->xstats, chan, dir); &priv->xstats, chan, dir);
struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
struct stmmac_channel *ch = &priv->channel[chan]; struct stmmac_channel *ch = &priv->channel[chan];
struct napi_struct *rx_napi; struct napi_struct *rx_napi;
struct napi_struct *tx_napi; struct napi_struct *tx_napi;
...@@ -2865,7 +2919,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) ...@@ -2865,7 +2919,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
/* DMA RX Channel Configuration */ /* DMA RX Channel Configuration */
for (chan = 0; chan < rx_channels_count; chan++) { for (chan = 0; chan < rx_channels_count; chan++) {
rx_q = &priv->rx_queue[chan]; rx_q = &priv->dma_conf.rx_queue[chan];
stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
rx_q->dma_rx_phy, chan); rx_q->dma_rx_phy, chan);
...@@ -2879,7 +2933,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) ...@@ -2879,7 +2933,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
/* DMA TX Channel Configuration */ /* DMA TX Channel Configuration */
for (chan = 0; chan < tx_channels_count; chan++) { for (chan = 0; chan < tx_channels_count; chan++) {
tx_q = &priv->tx_queue[chan]; tx_q = &priv->dma_conf.tx_queue[chan];
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, chan); tx_q->dma_tx_phy, chan);
...@@ -2894,7 +2948,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) ...@@ -2894,7 +2948,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
{ {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
hrtimer_start(&tx_q->txtimer, hrtimer_start(&tx_q->txtimer,
STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
...@@ -2944,7 +2998,7 @@ static void stmmac_init_coalesce(struct stmmac_priv *priv) ...@@ -2944,7 +2998,7 @@ static void stmmac_init_coalesce(struct stmmac_priv *priv)
u32 chan; u32 chan;
for (chan = 0; chan < tx_channel_count; chan++) { for (chan = 0; chan < tx_channel_count; chan++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES; priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER; priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
...@@ -2966,12 +3020,12 @@ static void stmmac_set_rings_length(struct stmmac_priv *priv) ...@@ -2966,12 +3020,12 @@ static void stmmac_set_rings_length(struct stmmac_priv *priv)
/* set TX ring length */ /* set TX ring length */
for (chan = 0; chan < tx_channels_count; chan++) for (chan = 0; chan < tx_channels_count; chan++)
stmmac_set_tx_ring_len(priv, priv->ioaddr, stmmac_set_tx_ring_len(priv, priv->ioaddr,
(priv->dma_tx_size - 1), chan); (priv->dma_conf.dma_tx_size - 1), chan);
/* set RX ring length */ /* set RX ring length */
for (chan = 0; chan < rx_channels_count; chan++) for (chan = 0; chan < rx_channels_count; chan++)
stmmac_set_rx_ring_len(priv, priv->ioaddr, stmmac_set_rx_ring_len(priv, priv->ioaddr,
(priv->dma_rx_size - 1), chan); (priv->dma_conf.dma_rx_size - 1), chan);
} }
/** /**
...@@ -3306,7 +3360,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) ...@@ -3306,7 +3360,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
/* Enable TSO */ /* Enable TSO */
if (priv->tso) { if (priv->tso) {
for (chan = 0; chan < tx_cnt; chan++) { for (chan = 0; chan < tx_cnt; chan++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
/* TSO and TBS cannot co-exist */ /* TSO and TBS cannot co-exist */
if (tx_q->tbs & STMMAC_TBS_AVAIL) if (tx_q->tbs & STMMAC_TBS_AVAIL)
...@@ -3328,7 +3382,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) ...@@ -3328,7 +3382,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
/* TBS */ /* TBS */
for (chan = 0; chan < tx_cnt; chan++) { for (chan = 0; chan < tx_cnt; chan++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
int enable = tx_q->tbs & STMMAC_TBS_AVAIL; int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
...@@ -3372,7 +3426,7 @@ static void stmmac_free_irq(struct net_device *dev, ...@@ -3372,7 +3426,7 @@ static void stmmac_free_irq(struct net_device *dev,
for (j = irq_idx - 1; j >= 0; j--) { for (j = irq_idx - 1; j >= 0; j--) {
if (priv->tx_irq[j] > 0) { if (priv->tx_irq[j] > 0) {
irq_set_affinity_hint(priv->tx_irq[j], NULL); irq_set_affinity_hint(priv->tx_irq[j], NULL);
free_irq(priv->tx_irq[j], &priv->tx_queue[j]); free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
} }
} }
irq_idx = priv->plat->rx_queues_to_use; irq_idx = priv->plat->rx_queues_to_use;
...@@ -3381,7 +3435,7 @@ static void stmmac_free_irq(struct net_device *dev, ...@@ -3381,7 +3435,7 @@ static void stmmac_free_irq(struct net_device *dev,
for (j = irq_idx - 1; j >= 0; j--) { for (j = irq_idx - 1; j >= 0; j--) {
if (priv->rx_irq[j] > 0) { if (priv->rx_irq[j] > 0) {
irq_set_affinity_hint(priv->rx_irq[j], NULL); irq_set_affinity_hint(priv->rx_irq[j], NULL);
free_irq(priv->rx_irq[j], &priv->rx_queue[j]); free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
} }
} }
...@@ -3516,7 +3570,7 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev) ...@@ -3516,7 +3570,7 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
sprintf(int_name, "%s:%s-%d", dev->name, "rx", i); sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
ret = request_irq(priv->rx_irq[i], ret = request_irq(priv->rx_irq[i],
stmmac_msi_intr_rx, stmmac_msi_intr_rx,
0, int_name, &priv->rx_queue[i]); 0, int_name, &priv->dma_conf.rx_queue[i]);
if (unlikely(ret < 0)) { if (unlikely(ret < 0)) {
netdev_err(priv->dev, netdev_err(priv->dev,
"%s: alloc rx-%d MSI %d (error: %d)\n", "%s: alloc rx-%d MSI %d (error: %d)\n",
...@@ -3541,7 +3595,7 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev) ...@@ -3541,7 +3595,7 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
sprintf(int_name, "%s:%s-%d", dev->name, "tx", i); sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
ret = request_irq(priv->tx_irq[i], ret = request_irq(priv->tx_irq[i],
stmmac_msi_intr_tx, stmmac_msi_intr_tx,
0, int_name, &priv->tx_queue[i]); 0, int_name, &priv->dma_conf.tx_queue[i]);
if (unlikely(ret < 0)) { if (unlikely(ret < 0)) {
netdev_err(priv->dev, netdev_err(priv->dev,
"%s: alloc tx-%d MSI %d (error: %d)\n", "%s: alloc tx-%d MSI %d (error: %d)\n",
...@@ -3628,19 +3682,93 @@ static int stmmac_request_irq(struct net_device *dev) ...@@ -3628,19 +3682,93 @@ static int stmmac_request_irq(struct net_device *dev)
} }
/** /**
* stmmac_open - open entry point of the driver * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
* @priv: driver private structure
* @mtu: MTU to setup the dma queue and buf with
* Description: Allocate and generate a dma_conf based on the provided MTU.
* Allocate the Tx/Rx DMA queue and init them.
* Return value:
* the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
*/
static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
{
struct stmmac_dma_conf *dma_conf;
int chan, bfsize, ret;
dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
if (!dma_conf) {
netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
__func__);
return ERR_PTR(-ENOMEM);
}
bfsize = stmmac_set_16kib_bfsize(priv, mtu);
if (bfsize < 0)
bfsize = 0;
if (bfsize < BUF_SIZE_16KiB)
bfsize = stmmac_set_bfsize(mtu, 0);
dma_conf->dma_buf_sz = bfsize;
/* Chose the tx/rx size from the already defined one in the
* priv struct. (if defined)
*/
dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
if (!dma_conf->dma_tx_size)
dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
if (!dma_conf->dma_rx_size)
dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
/* Earlier check for TBS */
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
/* Setup per-TXQ tbs flag before TX descriptor alloc */
tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
}
ret = alloc_dma_desc_resources(priv, dma_conf);
if (ret < 0) {
netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
__func__);
goto alloc_error;
}
ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
if (ret < 0) {
netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
__func__);
goto init_error;
}
return dma_conf;
init_error:
free_dma_desc_resources(priv, dma_conf);
alloc_error:
kfree(dma_conf);
return ERR_PTR(ret);
}
/**
* __stmmac_open - open entry point of the driver
* @dev : pointer to the device structure. * @dev : pointer to the device structure.
* @dma_conf : structure to take the dma data
* Description: * Description:
* This function is the open entry point of the driver. * This function is the open entry point of the driver.
* Return value: * Return value:
* 0 on success and an appropriate (-)ve integer as defined in errno.h * 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure. * file on failure.
*/ */
static int stmmac_open(struct net_device *dev) static int __stmmac_open(struct net_device *dev,
struct stmmac_dma_conf *dma_conf)
{ {
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
int mode = priv->plat->phy_interface; int mode = priv->plat->phy_interface;
int bfsize = 0;
u32 chan; u32 chan;
int ret; int ret;
...@@ -3665,45 +3793,12 @@ static int stmmac_open(struct net_device *dev) ...@@ -3665,45 +3793,12 @@ static int stmmac_open(struct net_device *dev)
memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
priv->xstats.threshold = tc; priv->xstats.threshold = tc;
bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
if (bfsize < 0)
bfsize = 0;
if (bfsize < BUF_SIZE_16KiB)
bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
priv->dma_buf_sz = bfsize;
buf_sz = bfsize;
priv->rx_copybreak = STMMAC_RX_COPYBREAK; priv->rx_copybreak = STMMAC_RX_COPYBREAK;
if (!priv->dma_tx_size) buf_sz = dma_conf->dma_buf_sz;
priv->dma_tx_size = DMA_DEFAULT_TX_SIZE; memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
if (!priv->dma_rx_size)
priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
/* Earlier check for TBS */ stmmac_reset_queues_param(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
/* Setup per-TXQ tbs flag before TX descriptor alloc */
tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
}
ret = alloc_dma_desc_resources(priv);
if (ret < 0) {
netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
__func__);
goto dma_desc_error;
}
ret = init_dma_desc_rings(dev, GFP_KERNEL);
if (ret < 0) {
netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
__func__);
goto init_error;
}
ret = stmmac_hw_setup(dev, true); ret = stmmac_hw_setup(dev, true);
if (ret < 0) { if (ret < 0) {
...@@ -3731,18 +3826,32 @@ static int stmmac_open(struct net_device *dev) ...@@ -3731,18 +3826,32 @@ static int stmmac_open(struct net_device *dev)
phylink_stop(priv->phylink); phylink_stop(priv->phylink);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
hrtimer_cancel(&priv->tx_queue[chan].txtimer); hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
stmmac_hw_teardown(dev); stmmac_hw_teardown(dev);
init_error: init_error:
free_dma_desc_resources(priv); free_dma_desc_resources(priv, &priv->dma_conf);
dma_desc_error:
phylink_disconnect_phy(priv->phylink); phylink_disconnect_phy(priv->phylink);
init_phy_error: init_phy_error:
pm_runtime_put(priv->device); pm_runtime_put(priv->device);
return ret; return ret;
} }
static int stmmac_open(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
struct stmmac_dma_conf *dma_conf;
int ret;
dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
if (IS_ERR(dma_conf))
return PTR_ERR(dma_conf);
ret = __stmmac_open(dev, dma_conf);
kfree(dma_conf);
return ret;
}
static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
{ {
set_bit(__FPE_REMOVING, &priv->fpe_task_state); set_bit(__FPE_REMOVING, &priv->fpe_task_state);
...@@ -3764,8 +3873,6 @@ static int stmmac_release(struct net_device *dev) ...@@ -3764,8 +3873,6 @@ static int stmmac_release(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
u32 chan; u32 chan;
netif_tx_disable(dev);
if (device_may_wakeup(priv->device)) if (device_may_wakeup(priv->device))
phylink_speed_down(priv->phylink, false); phylink_speed_down(priv->phylink, false);
/* Stop and disconnect the PHY */ /* Stop and disconnect the PHY */
...@@ -3775,7 +3882,9 @@ static int stmmac_release(struct net_device *dev) ...@@ -3775,7 +3882,9 @@ static int stmmac_release(struct net_device *dev)
stmmac_disable_all_queues(priv); stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
hrtimer_cancel(&priv->tx_queue[chan].txtimer); hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
netif_tx_disable(dev);
/* Free the IRQ lines */ /* Free the IRQ lines */
stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
...@@ -3789,7 +3898,7 @@ static int stmmac_release(struct net_device *dev) ...@@ -3789,7 +3898,7 @@ static int stmmac_release(struct net_device *dev)
stmmac_stop_all_dma(priv); stmmac_stop_all_dma(priv);
/* Release and free the Rx/Tx resources */ /* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv); free_dma_desc_resources(priv, &priv->dma_conf);
/* Disable the MAC Rx/Tx */ /* Disable the MAC Rx/Tx */
stmmac_mac_set(priv, priv->ioaddr, false); stmmac_mac_set(priv, priv->ioaddr, false);
...@@ -3833,7 +3942,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, ...@@ -3833,7 +3942,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
return false; return false;
stmmac_set_tx_owner(priv, p); stmmac_set_tx_owner(priv, p);
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
return true; return true;
} }
...@@ -3851,7 +3960,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, ...@@ -3851,7 +3960,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
int total_len, bool last_segment, u32 queue) int total_len, bool last_segment, u32 queue)
{ {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
struct dma_desc *desc; struct dma_desc *desc;
u32 buff_size; u32 buff_size;
int tmp_len; int tmp_len;
...@@ -3862,7 +3971,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, ...@@ -3862,7 +3971,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
dma_addr_t curr_addr; dma_addr_t curr_addr;
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
priv->dma_tx_size); priv->dma_conf.dma_tx_size);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
if (tx_q->tbs & STMMAC_TBS_AVAIL) if (tx_q->tbs & STMMAC_TBS_AVAIL)
...@@ -3890,7 +3999,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, ...@@ -3890,7 +3999,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue) static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
{ {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
int desc_size; int desc_size;
if (likely(priv->extend_desc)) if (likely(priv->extend_desc))
...@@ -3952,7 +4061,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3952,7 +4061,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
dma_addr_t des; dma_addr_t des;
int i; int i;
tx_q = &priv->tx_queue[queue]; tx_q = &priv->dma_conf.tx_queue[queue];
first_tx = tx_q->cur_tx; first_tx = tx_q->cur_tx;
/* Compute header lengths */ /* Compute header lengths */
...@@ -3992,7 +4101,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3992,7 +4101,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
stmmac_set_mss(priv, mss_desc, mss); stmmac_set_mss(priv, mss_desc, mss);
tx_q->mss = mss; tx_q->mss = mss;
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
priv->dma_tx_size); priv->dma_conf.dma_tx_size);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
} }
...@@ -4104,7 +4213,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -4104,7 +4213,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
* ndo_start_xmit will fill this descriptor the next time it's * ndo_start_xmit will fill this descriptor the next time it's
* called and stmmac_tx_clean may clean up to this descriptor. * called and stmmac_tx_clean may clean up to this descriptor.
*/ */
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
...@@ -4192,7 +4301,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -4192,7 +4301,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
int entry, first_tx; int entry, first_tx;
dma_addr_t des; dma_addr_t des;
tx_q = &priv->tx_queue[queue]; tx_q = &priv->dma_conf.tx_queue[queue];
first_tx = tx_q->cur_tx; first_tx = tx_q->cur_tx;
if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
...@@ -4255,7 +4364,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -4255,7 +4364,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
int len = skb_frag_size(frag); int len = skb_frag_size(frag);
bool last_segment = (i == (nfrags - 1)); bool last_segment = (i == (nfrags - 1));
entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
WARN_ON(tx_q->tx_skbuff[entry]); WARN_ON(tx_q->tx_skbuff[entry]);
if (likely(priv->extend_desc)) if (likely(priv->extend_desc))
...@@ -4326,7 +4435,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -4326,7 +4435,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* ndo_start_xmit will fill this descriptor the next time it's * ndo_start_xmit will fill this descriptor the next time it's
* called and stmmac_tx_clean may clean up to this descriptor. * called and stmmac_tx_clean may clean up to this descriptor.
*/ */
entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
tx_q->cur_tx = entry; tx_q->cur_tx = entry;
if (netif_msg_pktdata(priv)) { if (netif_msg_pktdata(priv)) {
...@@ -4441,7 +4550,7 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) ...@@ -4441,7 +4550,7 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
*/ */
static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
{ {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
int dirty = stmmac_rx_dirty(priv, queue); int dirty = stmmac_rx_dirty(priv, queue);
unsigned int entry = rx_q->dirty_rx; unsigned int entry = rx_q->dirty_rx;
gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
...@@ -4495,7 +4604,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) ...@@ -4495,7 +4604,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
dma_wmb(); dma_wmb();
stmmac_set_rx_owner(priv, p, use_rx_wd); stmmac_set_rx_owner(priv, p, use_rx_wd);
entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size); entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
} }
rx_q->dirty_rx = entry; rx_q->dirty_rx = entry;
rx_q->rx_tail_addr = rx_q->dma_rx_phy + rx_q->rx_tail_addr = rx_q->dma_rx_phy +
...@@ -4523,12 +4632,12 @@ static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv, ...@@ -4523,12 +4632,12 @@ static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
/* First descriptor, not last descriptor and not split header */ /* First descriptor, not last descriptor and not split header */
if (status & rx_not_ls) if (status & rx_not_ls)
return priv->dma_buf_sz; return priv->dma_conf.dma_buf_sz;
plen = stmmac_get_rx_frame_len(priv, p, coe); plen = stmmac_get_rx_frame_len(priv, p, coe);
/* First descriptor and last descriptor and not split header */ /* First descriptor and last descriptor and not split header */
return min_t(unsigned int, priv->dma_buf_sz, plen); return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
} }
static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
...@@ -4544,7 +4653,7 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, ...@@ -4544,7 +4653,7 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
/* Not last descriptor */ /* Not last descriptor */
if (status & rx_not_ls) if (status & rx_not_ls)
return priv->dma_buf_sz; return priv->dma_conf.dma_buf_sz;
plen = stmmac_get_rx_frame_len(priv, p, coe); plen = stmmac_get_rx_frame_len(priv, p, coe);
...@@ -4555,7 +4664,7 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, ...@@ -4555,7 +4664,7 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
struct xdp_frame *xdpf, bool dma_map) struct xdp_frame *xdpf, bool dma_map)
{ {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
unsigned int entry = tx_q->cur_tx; unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc; struct dma_desc *tx_desc;
dma_addr_t dma_addr; dma_addr_t dma_addr;
...@@ -4618,7 +4727,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, ...@@ -4618,7 +4727,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
stmmac_enable_dma_transmission(priv, priv->ioaddr); stmmac_enable_dma_transmission(priv, priv->ioaddr);
entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
tx_q->cur_tx = entry; tx_q->cur_tx = entry;
return STMMAC_XDP_TX; return STMMAC_XDP_TX;
...@@ -4792,7 +4901,7 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, ...@@ -4792,7 +4901,7 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
{ {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
unsigned int entry = rx_q->dirty_rx; unsigned int entry = rx_q->dirty_rx;
struct dma_desc *rx_desc = NULL; struct dma_desc *rx_desc = NULL;
bool ret = true; bool ret = true;
...@@ -4835,7 +4944,7 @@ static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) ...@@ -4835,7 +4944,7 @@ static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
dma_wmb(); dma_wmb();
stmmac_set_rx_owner(priv, rx_desc, use_rx_wd); stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size); entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
} }
if (rx_desc) { if (rx_desc) {
...@@ -4850,7 +4959,7 @@ static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) ...@@ -4850,7 +4959,7 @@ static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
{ {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
unsigned int count = 0, error = 0, len = 0; unsigned int count = 0, error = 0, len = 0;
int dirty = stmmac_rx_dirty(priv, queue); int dirty = stmmac_rx_dirty(priv, queue);
unsigned int next_entry = rx_q->cur_rx; unsigned int next_entry = rx_q->cur_rx;
...@@ -4872,7 +4981,7 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) ...@@ -4872,7 +4981,7 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
desc_size = sizeof(struct dma_desc); desc_size = sizeof(struct dma_desc);
} }
stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true, stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
rx_q->dma_rx_phy, desc_size); rx_q->dma_rx_phy, desc_size);
} }
while (count < limit) { while (count < limit) {
...@@ -4919,7 +5028,7 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) ...@@ -4919,7 +5028,7 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
/* Prefetch the next RX descriptor */ /* Prefetch the next RX descriptor */
rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
priv->dma_rx_size); priv->dma_conf.dma_rx_size);
next_entry = rx_q->cur_rx; next_entry = rx_q->cur_rx;
if (priv->extend_desc) if (priv->extend_desc)
...@@ -5040,7 +5149,7 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) ...@@ -5040,7 +5149,7 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
*/ */
static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{ {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue]; struct stmmac_channel *ch = &priv->channel[queue];
unsigned int count = 0, error = 0, len = 0; unsigned int count = 0, error = 0, len = 0;
int status = 0, coe = priv->hw->rx_csum; int status = 0, coe = priv->hw->rx_csum;
...@@ -5053,7 +5162,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) ...@@ -5053,7 +5162,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
int buf_sz; int buf_sz;
dma_dir = page_pool_get_dma_dir(rx_q->page_pool); dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
if (netif_msg_rx_status(priv)) { if (netif_msg_rx_status(priv)) {
void *rx_head; void *rx_head;
...@@ -5067,7 +5176,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) ...@@ -5067,7 +5176,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
desc_size = sizeof(struct dma_desc); desc_size = sizeof(struct dma_desc);
} }
stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true, stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
rx_q->dma_rx_phy, desc_size); rx_q->dma_rx_phy, desc_size);
} }
while (count < limit) { while (count < limit) {
...@@ -5111,7 +5220,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) ...@@ -5111,7 +5220,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
break; break;
rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
priv->dma_rx_size); priv->dma_conf.dma_rx_size);
next_entry = rx_q->cur_rx; next_entry = rx_q->cur_rx;
if (priv->extend_desc) if (priv->extend_desc)
...@@ -5246,7 +5355,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) ...@@ -5246,7 +5355,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
buf1_len, dma_dir); buf1_len, dma_dir);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->page, buf->page_offset, buf1_len, buf->page, buf->page_offset, buf1_len,
priv->dma_buf_sz); priv->dma_conf.dma_buf_sz);
/* Data payload appended into SKB */ /* Data payload appended into SKB */
page_pool_release_page(rx_q->page_pool, buf->page); page_pool_release_page(rx_q->page_pool, buf->page);
...@@ -5258,7 +5367,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) ...@@ -5258,7 +5367,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
buf2_len, dma_dir); buf2_len, dma_dir);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->sec_page, 0, buf2_len, buf->sec_page, 0, buf2_len,
priv->dma_buf_sz); priv->dma_conf.dma_buf_sz);
/* Data payload appended into SKB */ /* Data payload appended into SKB */
page_pool_release_page(rx_q->page_pool, buf->sec_page); page_pool_release_page(rx_q->page_pool, buf->sec_page);
...@@ -5442,18 +5551,15 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu) ...@@ -5442,18 +5551,15 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
{ {
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
int txfifosz = priv->plat->tx_fifo_size; int txfifosz = priv->plat->tx_fifo_size;
struct stmmac_dma_conf *dma_conf;
const int mtu = new_mtu; const int mtu = new_mtu;
int ret;
if (txfifosz == 0) if (txfifosz == 0)
txfifosz = priv->dma_cap.tx_fifo_size; txfifosz = priv->dma_cap.tx_fifo_size;
txfifosz /= priv->plat->tx_queues_to_use; txfifosz /= priv->plat->tx_queues_to_use;
if (netif_running(dev)) {
netdev_err(priv->dev, "must be stopped to change its MTU\n");
return -EBUSY;
}
if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) { if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n"); netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
return -EINVAL; return -EINVAL;
...@@ -5465,8 +5571,29 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu) ...@@ -5465,8 +5571,29 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB)) if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
return -EINVAL; return -EINVAL;
dev->mtu = mtu; if (netif_running(dev)) {
netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
/* Try to allocate the new DMA conf with the new mtu */
dma_conf = stmmac_setup_dma_desc(priv, mtu);
if (IS_ERR(dma_conf)) {
netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
mtu);
return PTR_ERR(dma_conf);
}
stmmac_release(dev);
ret = __stmmac_open(dev, dma_conf);
kfree(dma_conf);
if (ret) {
netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
return ret;
}
stmmac_set_rx_mode(dev);
}
dev->mtu = mtu;
netdev_update_features(dev); netdev_update_features(dev);
return 0; return 0;
...@@ -5700,11 +5827,13 @@ static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id) ...@@ -5700,11 +5827,13 @@ static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
{ {
struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data; struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
struct stmmac_dma_conf *dma_conf;
int chan = tx_q->queue_index; int chan = tx_q->queue_index;
struct stmmac_priv *priv; struct stmmac_priv *priv;
int status; int status;
priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]); dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
if (unlikely(!data)) { if (unlikely(!data)) {
netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
...@@ -5730,10 +5859,12 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) ...@@ -5730,10 +5859,12 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
{ {
struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data; struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
struct stmmac_dma_conf *dma_conf;
int chan = rx_q->queue_index; int chan = rx_q->queue_index;
struct stmmac_priv *priv; struct stmmac_priv *priv;
priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]); dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
if (unlikely(!data)) { if (unlikely(!data)) {
netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
...@@ -5764,10 +5895,10 @@ static void stmmac_poll_controller(struct net_device *dev) ...@@ -5764,10 +5895,10 @@ static void stmmac_poll_controller(struct net_device *dev)
if (priv->plat->multi_msi_en) { if (priv->plat->multi_msi_en) {
for (i = 0; i < priv->plat->rx_queues_to_use; i++) for (i = 0; i < priv->plat->rx_queues_to_use; i++)
stmmac_msi_intr_rx(0, &priv->rx_queue[i]); stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
for (i = 0; i < priv->plat->tx_queues_to_use; i++) for (i = 0; i < priv->plat->tx_queues_to_use; i++)
stmmac_msi_intr_tx(0, &priv->tx_queue[i]); stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]);
} else { } else {
disable_irq(dev->irq); disable_irq(dev->irq);
stmmac_interrupt(dev->irq, dev); stmmac_interrupt(dev->irq, dev);
...@@ -5946,34 +6077,34 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v) ...@@ -5946,34 +6077,34 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v)
return 0; return 0;
for (queue = 0; queue < rx_count; queue++) { for (queue = 0; queue < rx_count; queue++) {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
seq_printf(seq, "RX Queue %d:\n", queue); seq_printf(seq, "RX Queue %d:\n", queue);
if (priv->extend_desc) { if (priv->extend_desc) {
seq_printf(seq, "Extended descriptor ring:\n"); seq_printf(seq, "Extended descriptor ring:\n");
sysfs_display_ring((void *)rx_q->dma_erx, sysfs_display_ring((void *)rx_q->dma_erx,
priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy); priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
} else { } else {
seq_printf(seq, "Descriptor ring:\n"); seq_printf(seq, "Descriptor ring:\n");
sysfs_display_ring((void *)rx_q->dma_rx, sysfs_display_ring((void *)rx_q->dma_rx,
priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy); priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
} }
} }
for (queue = 0; queue < tx_count; queue++) { for (queue = 0; queue < tx_count; queue++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
seq_printf(seq, "TX Queue %d:\n", queue); seq_printf(seq, "TX Queue %d:\n", queue);
if (priv->extend_desc) { if (priv->extend_desc) {
seq_printf(seq, "Extended descriptor ring:\n"); seq_printf(seq, "Extended descriptor ring:\n");
sysfs_display_ring((void *)tx_q->dma_etx, sysfs_display_ring((void *)tx_q->dma_etx,
priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy); priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
seq_printf(seq, "Descriptor ring:\n"); seq_printf(seq, "Descriptor ring:\n");
sysfs_display_ring((void *)tx_q->dma_tx, sysfs_display_ring((void *)tx_q->dma_tx,
priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy); priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
} }
} }
...@@ -6307,31 +6438,32 @@ void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue) ...@@ -6307,31 +6438,32 @@ void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
spin_unlock_irqrestore(&ch->lock, flags); spin_unlock_irqrestore(&ch->lock, flags);
stmmac_stop_rx_dma(priv, queue); stmmac_stop_rx_dma(priv, queue);
__free_dma_rx_desc_resources(priv, queue); __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
} }
void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
{ {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue]; struct stmmac_channel *ch = &priv->channel[queue];
unsigned long flags; unsigned long flags;
u32 buf_size; u32 buf_size;
int ret; int ret;
ret = __alloc_dma_rx_desc_resources(priv, queue); ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
if (ret) { if (ret) {
netdev_err(priv->dev, "Failed to alloc RX desc.\n"); netdev_err(priv->dev, "Failed to alloc RX desc.\n");
return; return;
} }
ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL); ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
if (ret) { if (ret) {
__free_dma_rx_desc_resources(priv, queue); __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
netdev_err(priv->dev, "Failed to init RX desc.\n"); netdev_err(priv->dev, "Failed to init RX desc.\n");
return; return;
} }
stmmac_clear_rx_descriptors(priv, queue); stmmac_reset_rx_queue(priv, queue);
stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
rx_q->dma_rx_phy, rx_q->queue_index); rx_q->dma_rx_phy, rx_q->queue_index);
...@@ -6348,7 +6480,7 @@ void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) ...@@ -6348,7 +6480,7 @@ void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
rx_q->queue_index); rx_q->queue_index);
} else { } else {
stmmac_set_dma_bfsize(priv, priv->ioaddr, stmmac_set_dma_bfsize(priv, priv->ioaddr,
priv->dma_buf_sz, priv->dma_conf.dma_buf_sz,
rx_q->queue_index); rx_q->queue_index);
} }
...@@ -6369,30 +6501,31 @@ void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue) ...@@ -6369,30 +6501,31 @@ void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
spin_unlock_irqrestore(&ch->lock, flags); spin_unlock_irqrestore(&ch->lock, flags);
stmmac_stop_tx_dma(priv, queue); stmmac_stop_tx_dma(priv, queue);
__free_dma_tx_desc_resources(priv, queue); __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
} }
void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
{ {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue]; struct stmmac_channel *ch = &priv->channel[queue];
unsigned long flags; unsigned long flags;
int ret; int ret;
ret = __alloc_dma_tx_desc_resources(priv, queue); ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
if (ret) { if (ret) {
netdev_err(priv->dev, "Failed to alloc TX desc.\n"); netdev_err(priv->dev, "Failed to alloc TX desc.\n");
return; return;
} }
ret = __init_dma_tx_desc_rings(priv, queue); ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
if (ret) { if (ret) {
__free_dma_tx_desc_resources(priv, queue); __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
netdev_err(priv->dev, "Failed to init TX desc.\n"); netdev_err(priv->dev, "Failed to init TX desc.\n");
return; return;
} }
stmmac_clear_tx_descriptors(priv, queue); stmmac_reset_tx_queue(priv, queue);
stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, tx_q->queue_index); tx_q->dma_tx_phy, tx_q->queue_index);
...@@ -6420,7 +6553,7 @@ void stmmac_xdp_release(struct net_device *dev) ...@@ -6420,7 +6553,7 @@ void stmmac_xdp_release(struct net_device *dev)
stmmac_disable_all_queues(priv); stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
hrtimer_cancel(&priv->tx_queue[chan].txtimer); hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
/* Free the IRQ lines */ /* Free the IRQ lines */
stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
...@@ -6429,7 +6562,7 @@ void stmmac_xdp_release(struct net_device *dev) ...@@ -6429,7 +6562,7 @@ void stmmac_xdp_release(struct net_device *dev)
stmmac_stop_all_dma(priv); stmmac_stop_all_dma(priv);
/* Release and free the Rx/Tx resources */ /* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv); free_dma_desc_resources(priv, &priv->dma_conf);
/* Disable the MAC Rx/Tx */ /* Disable the MAC Rx/Tx */
stmmac_mac_set(priv, priv->ioaddr, false); stmmac_mac_set(priv, priv->ioaddr, false);
...@@ -6454,14 +6587,14 @@ int stmmac_xdp_open(struct net_device *dev) ...@@ -6454,14 +6587,14 @@ int stmmac_xdp_open(struct net_device *dev)
u32 chan; u32 chan;
int ret; int ret;
ret = alloc_dma_desc_resources(priv); ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
if (ret < 0) { if (ret < 0) {
netdev_err(dev, "%s: DMA descriptors allocation failed\n", netdev_err(dev, "%s: DMA descriptors allocation failed\n",
__func__); __func__);
goto dma_desc_error; goto dma_desc_error;
} }
ret = init_dma_desc_rings(dev, GFP_KERNEL); ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
if (ret < 0) { if (ret < 0) {
netdev_err(dev, "%s: DMA descriptors initialization failed\n", netdev_err(dev, "%s: DMA descriptors initialization failed\n",
__func__); __func__);
...@@ -6479,7 +6612,7 @@ int stmmac_xdp_open(struct net_device *dev) ...@@ -6479,7 +6612,7 @@ int stmmac_xdp_open(struct net_device *dev)
/* DMA RX Channel Configuration */ /* DMA RX Channel Configuration */
for (chan = 0; chan < rx_cnt; chan++) { for (chan = 0; chan < rx_cnt; chan++) {
rx_q = &priv->rx_queue[chan]; rx_q = &priv->dma_conf.rx_queue[chan];
stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
rx_q->dma_rx_phy, chan); rx_q->dma_rx_phy, chan);
...@@ -6497,7 +6630,7 @@ int stmmac_xdp_open(struct net_device *dev) ...@@ -6497,7 +6630,7 @@ int stmmac_xdp_open(struct net_device *dev)
rx_q->queue_index); rx_q->queue_index);
} else { } else {
stmmac_set_dma_bfsize(priv, priv->ioaddr, stmmac_set_dma_bfsize(priv, priv->ioaddr,
priv->dma_buf_sz, priv->dma_conf.dma_buf_sz,
rx_q->queue_index); rx_q->queue_index);
} }
...@@ -6506,7 +6639,7 @@ int stmmac_xdp_open(struct net_device *dev) ...@@ -6506,7 +6639,7 @@ int stmmac_xdp_open(struct net_device *dev)
/* DMA TX Channel Configuration */ /* DMA TX Channel Configuration */
for (chan = 0; chan < tx_cnt; chan++) { for (chan = 0; chan < tx_cnt; chan++) {
tx_q = &priv->tx_queue[chan]; tx_q = &priv->dma_conf.tx_queue[chan];
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, chan); tx_q->dma_tx_phy, chan);
...@@ -6539,11 +6672,11 @@ int stmmac_xdp_open(struct net_device *dev) ...@@ -6539,11 +6672,11 @@ int stmmac_xdp_open(struct net_device *dev)
irq_error: irq_error:
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
hrtimer_cancel(&priv->tx_queue[chan].txtimer); hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
stmmac_hw_teardown(dev); stmmac_hw_teardown(dev);
init_error: init_error:
free_dma_desc_resources(priv); free_dma_desc_resources(priv, &priv->dma_conf);
dma_desc_error: dma_desc_error:
return ret; return ret;
} }
...@@ -6566,8 +6699,8 @@ int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) ...@@ -6566,8 +6699,8 @@ int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
queue >= priv->plat->tx_queues_to_use) queue >= priv->plat->tx_queues_to_use)
return -EINVAL; return -EINVAL;
rx_q = &priv->rx_queue[queue]; rx_q = &priv->dma_conf.rx_queue[queue];
tx_q = &priv->tx_queue[queue]; tx_q = &priv->dma_conf.tx_queue[queue];
ch = &priv->channel[queue]; ch = &priv->channel[queue];
if (!rx_q->xsk_pool && !tx_q->xsk_pool) if (!rx_q->xsk_pool && !tx_q->xsk_pool)
...@@ -6822,8 +6955,8 @@ int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) ...@@ -6822,8 +6955,8 @@ int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
if (netif_running(dev)) if (netif_running(dev))
stmmac_release(dev); stmmac_release(dev);
priv->dma_rx_size = rx_size; priv->dma_conf.dma_rx_size = rx_size;
priv->dma_tx_size = tx_size; priv->dma_conf.dma_tx_size = tx_size;
if (netif_running(dev)) if (netif_running(dev))
ret = stmmac_open(dev); ret = stmmac_open(dev);
...@@ -7270,7 +7403,7 @@ int stmmac_suspend(struct device *dev) ...@@ -7270,7 +7403,7 @@ int stmmac_suspend(struct device *dev)
stmmac_disable_all_queues(priv); stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
hrtimer_cancel(&priv->tx_queue[chan].txtimer); hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
if (priv->eee_enabled) { if (priv->eee_enabled) {
priv->tx_path_in_lpi_mode = false; priv->tx_path_in_lpi_mode = false;
...@@ -7319,32 +7452,40 @@ int stmmac_suspend(struct device *dev) ...@@ -7319,32 +7452,40 @@ int stmmac_suspend(struct device *dev)
} }
EXPORT_SYMBOL_GPL(stmmac_suspend); EXPORT_SYMBOL_GPL(stmmac_suspend);
/** static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
* stmmac_reset_queues_param - reset queue parameters
* @priv: device pointer
*/
static void stmmac_reset_queues_param(struct stmmac_priv *priv)
{ {
u32 rx_cnt = priv->plat->rx_queues_to_use; struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 queue;
for (queue = 0; queue < rx_cnt; queue++) {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
rx_q->cur_rx = 0; rx_q->cur_rx = 0;
rx_q->dirty_rx = 0; rx_q->dirty_rx = 0;
} }
for (queue = 0; queue < tx_cnt; queue++) { static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; {
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
tx_q->cur_tx = 0; tx_q->cur_tx = 0;
tx_q->dirty_tx = 0; tx_q->dirty_tx = 0;
tx_q->mss = 0; tx_q->mss = 0;
netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
} }
/**
* stmmac_reset_queues_param - reset queue parameters
* @priv: device pointer
*/
static void stmmac_reset_queues_param(struct stmmac_priv *priv)
{
u32 rx_cnt = priv->plat->rx_queues_to_use;
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 queue;
for (queue = 0; queue < rx_cnt; queue++)
stmmac_reset_rx_queue(priv, queue);
for (queue = 0; queue < tx_cnt; queue++)
stmmac_reset_tx_queue(priv, queue);
} }
/** /**
...@@ -7404,7 +7545,7 @@ int stmmac_resume(struct device *dev) ...@@ -7404,7 +7545,7 @@ int stmmac_resume(struct device *dev)
stmmac_reset_queues_param(priv); stmmac_reset_queues_param(priv);
stmmac_free_tx_skbufs(priv); stmmac_free_tx_skbufs(priv);
stmmac_clear_descriptors(priv); stmmac_clear_descriptors(priv, &priv->dma_conf);
stmmac_hw_setup(ndev, false); stmmac_hw_setup(ndev, false);
stmmac_init_coalesce(priv); stmmac_init_coalesce(priv);
......
...@@ -795,8 +795,8 @@ static int stmmac_test_flowctrl(struct stmmac_priv *priv) ...@@ -795,8 +795,8 @@ static int stmmac_test_flowctrl(struct stmmac_priv *priv)
struct stmmac_channel *ch = &priv->channel[i]; struct stmmac_channel *ch = &priv->channel[i];
u32 tail; u32 tail;
tail = priv->rx_queue[i].dma_rx_phy + tail = priv->dma_conf.rx_queue[i].dma_rx_phy +
(priv->dma_rx_size * sizeof(struct dma_desc)); (priv->dma_conf.dma_rx_size * sizeof(struct dma_desc));
stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i); stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i);
stmmac_start_rx(priv, priv->ioaddr, i); stmmac_start_rx(priv, priv->ioaddr, i);
...@@ -1680,7 +1680,7 @@ static int stmmac_test_arpoffload(struct stmmac_priv *priv) ...@@ -1680,7 +1680,7 @@ static int stmmac_test_arpoffload(struct stmmac_priv *priv)
static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue) static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue)
{ {
struct stmmac_packet_attrs attr = { }; struct stmmac_packet_attrs attr = { };
int size = priv->dma_buf_sz; int size = priv->dma_conf.dma_buf_sz;
attr.dst = priv->dev->dev_addr; attr.dst = priv->dev->dev_addr;
attr.max_size = size - ETH_FCS_LEN; attr.max_size = size - ETH_FCS_LEN;
...@@ -1763,7 +1763,7 @@ static int stmmac_test_tbs(struct stmmac_priv *priv) ...@@ -1763,7 +1763,7 @@ static int stmmac_test_tbs(struct stmmac_priv *priv)
/* Find first TBS enabled Queue, if any */ /* Find first TBS enabled Queue, if any */
for (i = 0; i < priv->plat->tx_queues_to_use; i++) for (i = 0; i < priv->plat->tx_queues_to_use; i++)
if (priv->tx_queue[i].tbs & STMMAC_TBS_AVAIL) if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_AVAIL)
break; break;
if (i >= priv->plat->tx_queues_to_use) if (i >= priv->plat->tx_queues_to_use)
......
...@@ -1091,13 +1091,13 @@ static int tc_setup_etf(struct stmmac_priv *priv, ...@@ -1091,13 +1091,13 @@ static int tc_setup_etf(struct stmmac_priv *priv,
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (qopt->queue >= priv->plat->tx_queues_to_use) if (qopt->queue >= priv->plat->tx_queues_to_use)
return -EINVAL; return -EINVAL;
if (!(priv->tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL)) if (!(priv->dma_conf.tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
return -EINVAL; return -EINVAL;
if (qopt->enable) if (qopt->enable)
priv->tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN; priv->dma_conf.tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
else else
priv->tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN; priv->dma_conf.tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
netdev_info(priv->dev, "%s ETF for Queue %d\n", netdev_info(priv->dev, "%s ETF for Queue %d\n",
qopt->enable ? "enabled" : "disabled", qopt->queue); qopt->enable ? "enabled" : "disabled", qopt->queue);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment