Commit ba39b344 authored by Christian Marangi's avatar Christian Marangi Committed by Jakub Kicinski

net: ethernet: stmicro: stmmac: generate stmmac dma conf before open

Rework the driver to generate the stmmac dma_conf before stmmac_open.
This permits a function to first check if it's possible to allocate a
new dma_config and then pass it directly to __stmmac_open and "open" the
interface with the new configuration.
Signed-off-by: default avatarChristian Marangi <ansuelsmth@gmail.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 8531c808
...@@ -1223,7 +1223,8 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) ...@@ -1223,7 +1223,8 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
return 0; return 0;
} }
static void stmmac_display_rx_rings(struct stmmac_priv *priv) static void stmmac_display_rx_rings(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf)
{ {
u32 rx_cnt = priv->plat->rx_queues_to_use; u32 rx_cnt = priv->plat->rx_queues_to_use;
unsigned int desc_size; unsigned int desc_size;
...@@ -1232,7 +1233,7 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv) ...@@ -1232,7 +1233,7 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv)
/* Display RX rings */ /* Display RX rings */
for (queue = 0; queue < rx_cnt; queue++) { for (queue = 0; queue < rx_cnt; queue++) {
struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
pr_info("\tRX Queue %u rings\n", queue); pr_info("\tRX Queue %u rings\n", queue);
...@@ -1245,12 +1246,13 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv) ...@@ -1245,12 +1246,13 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv)
} }
/* Display RX ring */ /* Display RX ring */
stmmac_display_ring(priv, head_rx, priv->dma_conf.dma_rx_size, true, stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
rx_q->dma_rx_phy, desc_size); rx_q->dma_rx_phy, desc_size);
} }
} }
static void stmmac_display_tx_rings(struct stmmac_priv *priv) static void stmmac_display_tx_rings(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf)
{ {
u32 tx_cnt = priv->plat->tx_queues_to_use; u32 tx_cnt = priv->plat->tx_queues_to_use;
unsigned int desc_size; unsigned int desc_size;
...@@ -1259,7 +1261,7 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv) ...@@ -1259,7 +1261,7 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
/* Display TX rings */ /* Display TX rings */
for (queue = 0; queue < tx_cnt; queue++) { for (queue = 0; queue < tx_cnt; queue++) {
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
pr_info("\tTX Queue %d rings\n", queue); pr_info("\tTX Queue %d rings\n", queue);
...@@ -1274,18 +1276,19 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv) ...@@ -1274,18 +1276,19 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
desc_size = sizeof(struct dma_desc); desc_size = sizeof(struct dma_desc);
} }
stmmac_display_ring(priv, head_tx, priv->dma_conf.dma_tx_size, false, stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
tx_q->dma_tx_phy, desc_size); tx_q->dma_tx_phy, desc_size);
} }
} }
static void stmmac_display_rings(struct stmmac_priv *priv) static void stmmac_display_rings(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf)
{ {
/* Display RX ring */ /* Display RX ring */
stmmac_display_rx_rings(priv); stmmac_display_rx_rings(priv, dma_conf);
/* Display TX ring */ /* Display TX ring */
stmmac_display_tx_rings(priv); stmmac_display_tx_rings(priv, dma_conf);
} }
static int stmmac_set_bfsize(int mtu, int bufsize) static int stmmac_set_bfsize(int mtu, int bufsize)
...@@ -1309,44 +1312,50 @@ static int stmmac_set_bfsize(int mtu, int bufsize) ...@@ -1309,44 +1312,50 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
/** /**
* stmmac_clear_rx_descriptors - clear RX descriptors * stmmac_clear_rx_descriptors - clear RX descriptors
* @priv: driver private structure * @priv: driver private structure
* @dma_conf: structure to take the dma data
* @queue: RX queue index * @queue: RX queue index
* Description: this function is called to clear the RX descriptors * Description: this function is called to clear the RX descriptors
* in case of both basic and extended descriptors are used. * in case of both basic and extended descriptors are used.
*/ */
static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
u32 queue)
{ {
struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i; int i;
/* Clear the RX descriptors */ /* Clear the RX descriptors */
for (i = 0; i < priv->dma_conf.dma_rx_size; i++) for (i = 0; i < dma_conf->dma_rx_size; i++)
if (priv->extend_desc) if (priv->extend_desc)
stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
priv->use_riwt, priv->mode, priv->use_riwt, priv->mode,
(i == priv->dma_conf.dma_rx_size - 1), (i == dma_conf->dma_rx_size - 1),
priv->dma_conf.dma_buf_sz); dma_conf->dma_buf_sz);
else else
stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
priv->use_riwt, priv->mode, priv->use_riwt, priv->mode,
(i == priv->dma_conf.dma_rx_size - 1), (i == dma_conf->dma_rx_size - 1),
priv->dma_conf.dma_buf_sz); dma_conf->dma_buf_sz);
} }
/** /**
* stmmac_clear_tx_descriptors - clear tx descriptors * stmmac_clear_tx_descriptors - clear tx descriptors
* @priv: driver private structure * @priv: driver private structure
* @dma_conf: structure to take the dma data
* @queue: TX queue index. * @queue: TX queue index.
* Description: this function is called to clear the TX descriptors * Description: this function is called to clear the TX descriptors
* in case of both basic and extended descriptors are used. * in case of both basic and extended descriptors are used.
*/ */
static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
u32 queue)
{ {
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
int i; int i;
/* Clear the TX descriptors */ /* Clear the TX descriptors */
for (i = 0; i < priv->dma_conf.dma_tx_size; i++) { for (i = 0; i < dma_conf->dma_tx_size; i++) {
int last = (i == (priv->dma_conf.dma_tx_size - 1)); int last = (i == (dma_conf->dma_tx_size - 1));
struct dma_desc *p; struct dma_desc *p;
if (priv->extend_desc) if (priv->extend_desc)
...@@ -1363,10 +1372,12 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) ...@@ -1363,10 +1372,12 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
/** /**
* stmmac_clear_descriptors - clear descriptors * stmmac_clear_descriptors - clear descriptors
* @priv: driver private structure * @priv: driver private structure
* @dma_conf: structure to take the dma data
* Description: this function is called to clear the TX and RX descriptors * Description: this function is called to clear the TX and RX descriptors
* in case of both basic and extended descriptors are used. * in case of both basic and extended descriptors are used.
*/ */
static void stmmac_clear_descriptors(struct stmmac_priv *priv) static void stmmac_clear_descriptors(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf)
{ {
u32 rx_queue_cnt = priv->plat->rx_queues_to_use; u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
u32 tx_queue_cnt = priv->plat->tx_queues_to_use; u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
...@@ -1374,16 +1385,17 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv) ...@@ -1374,16 +1385,17 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv)
/* Clear the RX descriptors */ /* Clear the RX descriptors */
for (queue = 0; queue < rx_queue_cnt; queue++) for (queue = 0; queue < rx_queue_cnt; queue++)
stmmac_clear_rx_descriptors(priv, queue); stmmac_clear_rx_descriptors(priv, dma_conf, queue);
/* Clear the TX descriptors */ /* Clear the TX descriptors */
for (queue = 0; queue < tx_queue_cnt; queue++) for (queue = 0; queue < tx_queue_cnt; queue++)
stmmac_clear_tx_descriptors(priv, queue); stmmac_clear_tx_descriptors(priv, dma_conf, queue);
} }
/** /**
* stmmac_init_rx_buffers - init the RX descriptor buffer. * stmmac_init_rx_buffers - init the RX descriptor buffer.
* @priv: driver private structure * @priv: driver private structure
* @dma_conf: structure to take the dma data
* @p: descriptor pointer * @p: descriptor pointer
* @i: descriptor index * @i: descriptor index
* @flags: gfp flag * @flags: gfp flag
...@@ -1391,10 +1403,12 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv) ...@@ -1391,10 +1403,12 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv)
* Description: this function is called to allocate a receive buffer, perform * Description: this function is called to allocate a receive buffer, perform
* the DMA mapping and init the descriptor. * the DMA mapping and init the descriptor.
*/ */
static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
struct dma_desc *p,
int i, gfp_t flags, u32 queue) int i, gfp_t flags, u32 queue)
{ {
struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
...@@ -1423,7 +1437,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, ...@@ -1423,7 +1437,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
stmmac_set_desc_addr(priv, p, buf->addr); stmmac_set_desc_addr(priv, p, buf->addr);
if (priv->dma_conf.dma_buf_sz == BUF_SIZE_16KiB) if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
stmmac_init_desc3(priv, p); stmmac_init_desc3(priv, p);
return 0; return 0;
...@@ -1432,12 +1446,13 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, ...@@ -1432,12 +1446,13 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
/** /**
* stmmac_free_rx_buffer - free RX dma buffers * stmmac_free_rx_buffer - free RX dma buffers
* @priv: private structure * @priv: private structure
* @queue: RX queue index * @rx_q: RX queue
* @i: buffer index. * @i: buffer index.
*/ */
static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i) static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
struct stmmac_rx_queue *rx_q,
int i)
{ {
struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
if (buf->page) if (buf->page)
...@@ -1452,12 +1467,15 @@ static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i) ...@@ -1452,12 +1467,15 @@ static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
/** /**
* stmmac_free_tx_buffer - free RX dma buffers * stmmac_free_tx_buffer - free RX dma buffers
* @priv: private structure * @priv: private structure
* @dma_conf: structure to take the dma data
* @queue: RX queue index * @queue: RX queue index
* @i: buffer index. * @i: buffer index.
*/ */
static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
u32 queue, int i)
{ {
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
if (tx_q->tx_skbuff_dma[i].buf && if (tx_q->tx_skbuff_dma[i].buf &&
tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) { tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
...@@ -1496,23 +1514,28 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) ...@@ -1496,23 +1514,28 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
/** /**
* dma_free_rx_skbufs - free RX dma buffers * dma_free_rx_skbufs - free RX dma buffers
* @priv: private structure * @priv: private structure
* @dma_conf: structure to take the dma data
* @queue: RX queue index * @queue: RX queue index
*/ */
static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) static void dma_free_rx_skbufs(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
u32 queue)
{ {
struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i; int i;
for (i = 0; i < priv->dma_conf.dma_rx_size; i++) for (i = 0; i < dma_conf->dma_rx_size; i++)
stmmac_free_rx_buffer(priv, queue, i); stmmac_free_rx_buffer(priv, rx_q, i);
} }
static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue, static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
gfp_t flags) struct stmmac_dma_conf *dma_conf,
u32 queue, gfp_t flags)
{ {
struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i; int i;
for (i = 0; i < priv->dma_conf.dma_rx_size; i++) { for (i = 0; i < dma_conf->dma_rx_size; i++) {
struct dma_desc *p; struct dma_desc *p;
int ret; int ret;
...@@ -1521,7 +1544,7 @@ static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue, ...@@ -1521,7 +1544,7 @@ static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
else else
p = rx_q->dma_rx + i; p = rx_q->dma_rx + i;
ret = stmmac_init_rx_buffers(priv, p, i, flags, ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
queue); queue);
if (ret) if (ret)
return ret; return ret;
...@@ -1535,14 +1558,17 @@ static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue, ...@@ -1535,14 +1558,17 @@ static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
/** /**
* dma_free_rx_xskbufs - free RX dma buffers from XSK pool * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
* @priv: private structure * @priv: private structure
* @dma_conf: structure to take the dma data
* @queue: RX queue index * @queue: RX queue index
*/ */
static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue) static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
u32 queue)
{ {
struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i; int i;
for (i = 0; i < priv->dma_conf.dma_rx_size; i++) { for (i = 0; i < dma_conf->dma_rx_size; i++) {
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
if (!buf->xdp) if (!buf->xdp)
...@@ -1553,12 +1579,14 @@ static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue) ...@@ -1553,12 +1579,14 @@ static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
} }
} }
static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue) static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
u32 queue)
{ {
struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i; int i;
for (i = 0; i < priv->dma_conf.dma_rx_size; i++) { for (i = 0; i < dma_conf->dma_rx_size; i++) {
struct stmmac_rx_buffer *buf; struct stmmac_rx_buffer *buf;
dma_addr_t dma_addr; dma_addr_t dma_addr;
struct dma_desc *p; struct dma_desc *p;
...@@ -1593,22 +1621,25 @@ static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 q ...@@ -1593,22 +1621,25 @@ static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 q
/** /**
* __init_dma_rx_desc_rings - init the RX descriptor ring (per queue) * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
* @priv: driver private structure * @priv: driver private structure
* @dma_conf: structure to take the dma data
* @queue: RX queue index * @queue: RX queue index
* @flags: gfp flag. * @flags: gfp flag.
* Description: this function initializes the DMA RX descriptors * Description: this function initializes the DMA RX descriptors
* and allocates the socket buffers. It supports the chained and ring * and allocates the socket buffers. It supports the chained and ring
* modes. * modes.
*/ */
static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags) static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
u32 queue, gfp_t flags)
{ {
struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int ret; int ret;
netif_dbg(priv, probe, priv->dev, netif_dbg(priv, probe, priv->dev,
"(%s) dma_rx_phy=0x%08x\n", __func__, "(%s) dma_rx_phy=0x%08x\n", __func__,
(u32)rx_q->dma_rx_phy); (u32)rx_q->dma_rx_phy);
stmmac_clear_rx_descriptors(priv, queue); stmmac_clear_rx_descriptors(priv, dma_conf, queue);
xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq); xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
...@@ -1635,9 +1666,9 @@ static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t f ...@@ -1635,9 +1666,9 @@ static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t f
/* RX XDP ZC buffer pool may not be populated, e.g. /* RX XDP ZC buffer pool may not be populated, e.g.
* xdpsock TX-only. * xdpsock TX-only.
*/ */
stmmac_alloc_rx_buffers_zc(priv, queue); stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
} else { } else {
ret = stmmac_alloc_rx_buffers(priv, queue, flags); ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
if (ret < 0) if (ret < 0)
return -ENOMEM; return -ENOMEM;
} }
...@@ -1647,17 +1678,19 @@ static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t f ...@@ -1647,17 +1678,19 @@ static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t f
if (priv->extend_desc) if (priv->extend_desc)
stmmac_mode_init(priv, rx_q->dma_erx, stmmac_mode_init(priv, rx_q->dma_erx,
rx_q->dma_rx_phy, rx_q->dma_rx_phy,
priv->dma_conf.dma_rx_size, 1); dma_conf->dma_rx_size, 1);
else else
stmmac_mode_init(priv, rx_q->dma_rx, stmmac_mode_init(priv, rx_q->dma_rx,
rx_q->dma_rx_phy, rx_q->dma_rx_phy,
priv->dma_conf.dma_rx_size, 0); dma_conf->dma_rx_size, 0);
} }
return 0; return 0;
} }
static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) static int init_dma_rx_desc_rings(struct net_device *dev,
struct stmmac_dma_conf *dma_conf,
gfp_t flags)
{ {
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_count = priv->plat->rx_queues_to_use; u32 rx_count = priv->plat->rx_queues_to_use;
...@@ -1669,7 +1702,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) ...@@ -1669,7 +1702,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
"SKB addresses:\nskb\t\tskb data\tdma data\n"); "SKB addresses:\nskb\t\tskb data\tdma data\n");
for (queue = 0; queue < rx_count; queue++) { for (queue = 0; queue < rx_count; queue++) {
ret = __init_dma_rx_desc_rings(priv, queue, flags); ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
if (ret) if (ret)
goto err_init_rx_buffers; goto err_init_rx_buffers;
} }
...@@ -1678,12 +1711,12 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) ...@@ -1678,12 +1711,12 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
err_init_rx_buffers: err_init_rx_buffers:
while (queue >= 0) { while (queue >= 0) {
struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
if (rx_q->xsk_pool) if (rx_q->xsk_pool)
dma_free_rx_xskbufs(priv, queue); dma_free_rx_xskbufs(priv, dma_conf, queue);
else else
dma_free_rx_skbufs(priv, queue); dma_free_rx_skbufs(priv, dma_conf, queue);
rx_q->buf_alloc_num = 0; rx_q->buf_alloc_num = 0;
rx_q->xsk_pool = NULL; rx_q->xsk_pool = NULL;
...@@ -1697,14 +1730,17 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) ...@@ -1697,14 +1730,17 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
/** /**
* __init_dma_tx_desc_rings - init the TX descriptor ring (per queue) * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
* @priv: driver private structure * @priv: driver private structure
* @queue : TX queue index * @dma_conf: structure to take the dma data
* @queue: TX queue index
* Description: this function initializes the DMA TX descriptors * Description: this function initializes the DMA TX descriptors
* and allocates the socket buffers. It supports the chained and ring * and allocates the socket buffers. It supports the chained and ring
* modes. * modes.
*/ */
static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue) static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
u32 queue)
{ {
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
int i; int i;
netif_dbg(priv, probe, priv->dev, netif_dbg(priv, probe, priv->dev,
...@@ -1716,16 +1752,16 @@ static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue) ...@@ -1716,16 +1752,16 @@ static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
if (priv->extend_desc) if (priv->extend_desc)
stmmac_mode_init(priv, tx_q->dma_etx, stmmac_mode_init(priv, tx_q->dma_etx,
tx_q->dma_tx_phy, tx_q->dma_tx_phy,
priv->dma_conf.dma_tx_size, 1); dma_conf->dma_tx_size, 1);
else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
stmmac_mode_init(priv, tx_q->dma_tx, stmmac_mode_init(priv, tx_q->dma_tx,
tx_q->dma_tx_phy, tx_q->dma_tx_phy,
priv->dma_conf.dma_tx_size, 0); dma_conf->dma_tx_size, 0);
} }
tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
for (i = 0; i < priv->dma_conf.dma_tx_size; i++) { for (i = 0; i < dma_conf->dma_tx_size; i++) {
struct dma_desc *p; struct dma_desc *p;
if (priv->extend_desc) if (priv->extend_desc)
...@@ -1747,7 +1783,8 @@ static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue) ...@@ -1747,7 +1783,8 @@ static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
return 0; return 0;
} }
static int init_dma_tx_desc_rings(struct net_device *dev) static int init_dma_tx_desc_rings(struct net_device *dev,
struct stmmac_dma_conf *dma_conf)
{ {
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
u32 tx_queue_cnt; u32 tx_queue_cnt;
...@@ -1756,7 +1793,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev) ...@@ -1756,7 +1793,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
tx_queue_cnt = priv->plat->tx_queues_to_use; tx_queue_cnt = priv->plat->tx_queues_to_use;
for (queue = 0; queue < tx_queue_cnt; queue++) for (queue = 0; queue < tx_queue_cnt; queue++)
__init_dma_tx_desc_rings(priv, queue); __init_dma_tx_desc_rings(priv, dma_conf, queue);
return 0; return 0;
} }
...@@ -1764,26 +1801,29 @@ static int init_dma_tx_desc_rings(struct net_device *dev) ...@@ -1764,26 +1801,29 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
/** /**
* init_dma_desc_rings - init the RX/TX descriptor rings * init_dma_desc_rings - init the RX/TX descriptor rings
* @dev: net device structure * @dev: net device structure
* @dma_conf: structure to take the dma data
* @flags: gfp flag. * @flags: gfp flag.
* Description: this function initializes the DMA RX/TX descriptors * Description: this function initializes the DMA RX/TX descriptors
* and allocates the socket buffers. It supports the chained and ring * and allocates the socket buffers. It supports the chained and ring
* modes. * modes.
*/ */
static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) static int init_dma_desc_rings(struct net_device *dev,
struct stmmac_dma_conf *dma_conf,
gfp_t flags)
{ {
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
int ret; int ret;
ret = init_dma_rx_desc_rings(dev, flags); ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
if (ret) if (ret)
return ret; return ret;
ret = init_dma_tx_desc_rings(dev); ret = init_dma_tx_desc_rings(dev, dma_conf);
stmmac_clear_descriptors(priv); stmmac_clear_descriptors(priv, dma_conf);
if (netif_msg_hw(priv)) if (netif_msg_hw(priv))
stmmac_display_rings(priv); stmmac_display_rings(priv, dma_conf);
return ret; return ret;
} }
...@@ -1791,17 +1831,20 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) ...@@ -1791,17 +1831,20 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
/** /**
* dma_free_tx_skbufs - free TX dma buffers * dma_free_tx_skbufs - free TX dma buffers
* @priv: private structure * @priv: private structure
* @dma_conf: structure to take the dma data
* @queue: TX queue index * @queue: TX queue index
*/ */
static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) static void dma_free_tx_skbufs(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
u32 queue)
{ {
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
int i; int i;
tx_q->xsk_frames_done = 0; tx_q->xsk_frames_done = 0;
for (i = 0; i < priv->dma_conf.dma_tx_size; i++) for (i = 0; i < dma_conf->dma_tx_size; i++)
stmmac_free_tx_buffer(priv, queue, i); stmmac_free_tx_buffer(priv, dma_conf, queue, i);
if (tx_q->xsk_pool && tx_q->xsk_frames_done) { if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
...@@ -1820,34 +1863,37 @@ static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) ...@@ -1820,34 +1863,37 @@ static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
u32 queue; u32 queue;
for (queue = 0; queue < tx_queue_cnt; queue++) for (queue = 0; queue < tx_queue_cnt; queue++)
dma_free_tx_skbufs(priv, queue); dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
} }
/** /**
* __free_dma_rx_desc_resources - free RX dma desc resources (per queue) * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
* @priv: private structure * @priv: private structure
* @dma_conf: structure to take the dma data
* @queue: RX queue index * @queue: RX queue index
*/ */
static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
u32 queue)
{ {
struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
/* Release the DMA RX socket buffers */ /* Release the DMA RX socket buffers */
if (rx_q->xsk_pool) if (rx_q->xsk_pool)
dma_free_rx_xskbufs(priv, queue); dma_free_rx_xskbufs(priv, dma_conf, queue);
else else
dma_free_rx_skbufs(priv, queue); dma_free_rx_skbufs(priv, dma_conf, queue);
rx_q->buf_alloc_num = 0; rx_q->buf_alloc_num = 0;
rx_q->xsk_pool = NULL; rx_q->xsk_pool = NULL;
/* Free DMA regions of consistent memory previously allocated */ /* Free DMA regions of consistent memory previously allocated */
if (!priv->extend_desc) if (!priv->extend_desc)
dma_free_coherent(priv->device, priv->dma_conf.dma_rx_size * dma_free_coherent(priv->device, dma_conf->dma_rx_size *
sizeof(struct dma_desc), sizeof(struct dma_desc),
rx_q->dma_rx, rx_q->dma_rx_phy); rx_q->dma_rx, rx_q->dma_rx_phy);
else else
dma_free_coherent(priv->device, priv->dma_conf.dma_rx_size * dma_free_coherent(priv->device, dma_conf->dma_rx_size *
sizeof(struct dma_extended_desc), sizeof(struct dma_extended_desc),
rx_q->dma_erx, rx_q->dma_rx_phy); rx_q->dma_erx, rx_q->dma_rx_phy);
...@@ -1859,29 +1905,33 @@ static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) ...@@ -1859,29 +1905,33 @@ static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
page_pool_destroy(rx_q->page_pool); page_pool_destroy(rx_q->page_pool);
} }
static void free_dma_rx_desc_resources(struct stmmac_priv *priv) static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf)
{ {
u32 rx_count = priv->plat->rx_queues_to_use; u32 rx_count = priv->plat->rx_queues_to_use;
u32 queue; u32 queue;
/* Free RX queue resources */ /* Free RX queue resources */
for (queue = 0; queue < rx_count; queue++) for (queue = 0; queue < rx_count; queue++)
__free_dma_rx_desc_resources(priv, queue); __free_dma_rx_desc_resources(priv, dma_conf, queue);
} }
/** /**
* __free_dma_tx_desc_resources - free TX dma desc resources (per queue) * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
* @priv: private structure * @priv: private structure
* @dma_conf: structure to take the dma data
* @queue: TX queue index * @queue: TX queue index
*/ */
static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
u32 queue)
{ {
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
size_t size; size_t size;
void *addr; void *addr;
/* Release the DMA TX socket buffers */ /* Release the DMA TX socket buffers */
dma_free_tx_skbufs(priv, queue); dma_free_tx_skbufs(priv, dma_conf, queue);
if (priv->extend_desc) { if (priv->extend_desc) {
size = sizeof(struct dma_extended_desc); size = sizeof(struct dma_extended_desc);
...@@ -1894,7 +1944,7 @@ static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) ...@@ -1894,7 +1944,7 @@ static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
addr = tx_q->dma_tx; addr = tx_q->dma_tx;
} }
size *= priv->dma_conf.dma_tx_size; size *= dma_conf->dma_tx_size;
dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
...@@ -1902,28 +1952,32 @@ static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) ...@@ -1902,28 +1952,32 @@ static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
kfree(tx_q->tx_skbuff); kfree(tx_q->tx_skbuff);
} }
static void free_dma_tx_desc_resources(struct stmmac_priv *priv) static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf)
{ {
u32 tx_count = priv->plat->tx_queues_to_use; u32 tx_count = priv->plat->tx_queues_to_use;
u32 queue; u32 queue;
/* Free TX queue resources */ /* Free TX queue resources */
for (queue = 0; queue < tx_count; queue++) for (queue = 0; queue < tx_count; queue++)
__free_dma_tx_desc_resources(priv, queue); __free_dma_tx_desc_resources(priv, dma_conf, queue);
} }
/** /**
* __alloc_dma_rx_desc_resources - alloc RX resources (per queue). * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
* @priv: private structure * @priv: private structure
* @dma_conf: structure to take the dma data
* @queue: RX queue index * @queue: RX queue index
* Description: according to which descriptor can be used (extend or basic) * Description: according to which descriptor can be used (extend or basic)
* this function allocates the resources for TX and RX paths. In case of * this function allocates the resources for TX and RX paths. In case of
* reception, for example, it pre-allocated the RX socket buffer in order to * reception, for example, it pre-allocated the RX socket buffer in order to
* allow zero-copy mechanism. * allow zero-copy mechanism.
*/ */
static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
u32 queue)
{ {
struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue]; struct stmmac_channel *ch = &priv->channel[queue];
bool xdp_prog = stmmac_xdp_is_enabled(priv); bool xdp_prog = stmmac_xdp_is_enabled(priv);
struct page_pool_params pp_params = { 0 }; struct page_pool_params pp_params = { 0 };
...@@ -1935,8 +1989,8 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) ...@@ -1935,8 +1989,8 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
rx_q->priv_data = priv; rx_q->priv_data = priv;
pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pp_params.pool_size = priv->dma_conf.dma_rx_size; pp_params.pool_size = dma_conf->dma_rx_size;
num_pages = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE); num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
pp_params.order = ilog2(num_pages); pp_params.order = ilog2(num_pages);
pp_params.nid = dev_to_node(priv->device); pp_params.nid = dev_to_node(priv->device);
pp_params.dev = priv->device; pp_params.dev = priv->device;
...@@ -1951,7 +2005,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) ...@@ -1951,7 +2005,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
return ret; return ret;
} }
rx_q->buf_pool = kcalloc(priv->dma_conf.dma_rx_size, rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
sizeof(*rx_q->buf_pool), sizeof(*rx_q->buf_pool),
GFP_KERNEL); GFP_KERNEL);
if (!rx_q->buf_pool) if (!rx_q->buf_pool)
...@@ -1959,7 +2013,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) ...@@ -1959,7 +2013,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
if (priv->extend_desc) { if (priv->extend_desc) {
rx_q->dma_erx = dma_alloc_coherent(priv->device, rx_q->dma_erx = dma_alloc_coherent(priv->device,
priv->dma_conf.dma_rx_size * dma_conf->dma_rx_size *
sizeof(struct dma_extended_desc), sizeof(struct dma_extended_desc),
&rx_q->dma_rx_phy, &rx_q->dma_rx_phy,
GFP_KERNEL); GFP_KERNEL);
...@@ -1968,7 +2022,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) ...@@ -1968,7 +2022,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
} else { } else {
rx_q->dma_rx = dma_alloc_coherent(priv->device, rx_q->dma_rx = dma_alloc_coherent(priv->device,
priv->dma_conf.dma_rx_size * dma_conf->dma_rx_size *
sizeof(struct dma_desc), sizeof(struct dma_desc),
&rx_q->dma_rx_phy, &rx_q->dma_rx_phy,
GFP_KERNEL); GFP_KERNEL);
...@@ -1993,7 +2047,8 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) ...@@ -1993,7 +2047,8 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
return 0; return 0;
} }
static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf)
{ {
u32 rx_count = priv->plat->rx_queues_to_use; u32 rx_count = priv->plat->rx_queues_to_use;
u32 queue; u32 queue;
...@@ -2001,7 +2056,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) ...@@ -2001,7 +2056,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
/* RX queues buffers and DMA */ /* RX queues buffers and DMA */
for (queue = 0; queue < rx_count; queue++) { for (queue = 0; queue < rx_count; queue++) {
ret = __alloc_dma_rx_desc_resources(priv, queue); ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
if (ret) if (ret)
goto err_dma; goto err_dma;
} }
...@@ -2009,7 +2064,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) ...@@ -2009,7 +2064,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
return 0; return 0;
err_dma: err_dma:
free_dma_rx_desc_resources(priv); free_dma_rx_desc_resources(priv, dma_conf);
return ret; return ret;
} }
...@@ -2017,28 +2072,31 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) ...@@ -2017,28 +2072,31 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
/** /**
* __alloc_dma_tx_desc_resources - alloc TX resources (per queue). * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
* @priv: private structure * @priv: private structure
* @dma_conf: structure to take the dma data
* @queue: TX queue index * @queue: TX queue index
* Description: according to which descriptor can be used (extend or basic) * Description: according to which descriptor can be used (extend or basic)
* this function allocates the resources for TX and RX paths. In case of * this function allocates the resources for TX and RX paths. In case of
* reception, for example, it pre-allocated the RX socket buffer in order to * reception, for example, it pre-allocated the RX socket buffer in order to
* allow zero-copy mechanism. * allow zero-copy mechanism.
*/ */
static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf,
u32 queue)
{ {
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
size_t size; size_t size;
void *addr; void *addr;
tx_q->queue_index = queue; tx_q->queue_index = queue;
tx_q->priv_data = priv; tx_q->priv_data = priv;
tx_q->tx_skbuff_dma = kcalloc(priv->dma_conf.dma_tx_size, tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
sizeof(*tx_q->tx_skbuff_dma), sizeof(*tx_q->tx_skbuff_dma),
GFP_KERNEL); GFP_KERNEL);
if (!tx_q->tx_skbuff_dma) if (!tx_q->tx_skbuff_dma)
return -ENOMEM; return -ENOMEM;
tx_q->tx_skbuff = kcalloc(priv->dma_conf.dma_tx_size, tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
sizeof(struct sk_buff *), sizeof(struct sk_buff *),
GFP_KERNEL); GFP_KERNEL);
if (!tx_q->tx_skbuff) if (!tx_q->tx_skbuff)
...@@ -2051,7 +2109,7 @@ static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) ...@@ -2051,7 +2109,7 @@ static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
else else
size = sizeof(struct dma_desc); size = sizeof(struct dma_desc);
size *= priv->dma_conf.dma_tx_size; size *= dma_conf->dma_tx_size;
addr = dma_alloc_coherent(priv->device, size, addr = dma_alloc_coherent(priv->device, size,
&tx_q->dma_tx_phy, GFP_KERNEL); &tx_q->dma_tx_phy, GFP_KERNEL);
...@@ -2068,7 +2126,8 @@ static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) ...@@ -2068,7 +2126,8 @@ static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
return 0; return 0;
} }
static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf)
{ {
u32 tx_count = priv->plat->tx_queues_to_use; u32 tx_count = priv->plat->tx_queues_to_use;
u32 queue; u32 queue;
...@@ -2076,7 +2135,7 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) ...@@ -2076,7 +2135,7 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
/* TX queues buffers and DMA */ /* TX queues buffers and DMA */
for (queue = 0; queue < tx_count; queue++) { for (queue = 0; queue < tx_count; queue++) {
ret = __alloc_dma_tx_desc_resources(priv, queue); ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
if (ret) if (ret)
goto err_dma; goto err_dma;
} }
...@@ -2084,27 +2143,29 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) ...@@ -2084,27 +2143,29 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
return 0; return 0;
err_dma: err_dma:
free_dma_tx_desc_resources(priv); free_dma_tx_desc_resources(priv, dma_conf);
return ret; return ret;
} }
/** /**
* alloc_dma_desc_resources - alloc TX/RX resources. * alloc_dma_desc_resources - alloc TX/RX resources.
* @priv: private structure * @priv: private structure
* @dma_conf: structure to take the dma data
* Description: according to which descriptor can be used (extend or basic) * Description: according to which descriptor can be used (extend or basic)
* this function allocates the resources for TX and RX paths. In case of * this function allocates the resources for TX and RX paths. In case of
* reception, for example, it pre-allocated the RX socket buffer in order to * reception, for example, it pre-allocated the RX socket buffer in order to
* allow zero-copy mechanism. * allow zero-copy mechanism.
*/ */
static int alloc_dma_desc_resources(struct stmmac_priv *priv) static int alloc_dma_desc_resources(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf)
{ {
/* RX Allocation */ /* RX Allocation */
int ret = alloc_dma_rx_desc_resources(priv); int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
if (ret) if (ret)
return ret; return ret;
ret = alloc_dma_tx_desc_resources(priv); ret = alloc_dma_tx_desc_resources(priv, dma_conf);
return ret; return ret;
} }
...@@ -2112,16 +2173,18 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv) ...@@ -2112,16 +2173,18 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
/** /**
* free_dma_desc_resources - free dma desc resources * free_dma_desc_resources - free dma desc resources
* @priv: private structure * @priv: private structure
* @dma_conf: structure to take the dma data
*/ */
static void free_dma_desc_resources(struct stmmac_priv *priv) static void free_dma_desc_resources(struct stmmac_priv *priv,
struct stmmac_dma_conf *dma_conf)
{ {
/* Release the DMA TX socket buffers */ /* Release the DMA TX socket buffers */
free_dma_tx_desc_resources(priv); free_dma_tx_desc_resources(priv, dma_conf);
/* Release the DMA RX socket buffers later /* Release the DMA RX socket buffers later
* to ensure all pending XDP_TX buffers are returned. * to ensure all pending XDP_TX buffers are returned.
*/ */
free_dma_rx_desc_resources(priv); free_dma_rx_desc_resources(priv, dma_conf);
} }
/** /**
...@@ -2627,8 +2690,8 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) ...@@ -2627,8 +2690,8 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
stmmac_stop_tx_dma(priv, chan); stmmac_stop_tx_dma(priv, chan);
dma_free_tx_skbufs(priv, chan); dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
stmmac_clear_tx_descriptors(priv, chan); stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
stmmac_reset_tx_queue(priv, chan); stmmac_reset_tx_queue(priv, chan);
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, chan); tx_q->dma_tx_phy, chan);
...@@ -3619,19 +3682,93 @@ static int stmmac_request_irq(struct net_device *dev) ...@@ -3619,19 +3682,93 @@ static int stmmac_request_irq(struct net_device *dev)
} }
/** /**
* stmmac_open - open entry point of the driver * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
* @priv: driver private structure
* @mtu: MTU to setup the dma queue and buf with
* Description: Allocate and generate a dma_conf based on the provided MTU.
* Allocate the Tx/Rx DMA queue and init them.
* Return value:
* the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
*/
static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
{
struct stmmac_dma_conf *dma_conf;
int chan, bfsize, ret;
dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
if (!dma_conf) {
netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
__func__);
return ERR_PTR(-ENOMEM);
}
bfsize = stmmac_set_16kib_bfsize(priv, mtu);
if (bfsize < 0)
bfsize = 0;
if (bfsize < BUF_SIZE_16KiB)
bfsize = stmmac_set_bfsize(mtu, 0);
dma_conf->dma_buf_sz = bfsize;
/* Chose the tx/rx size from the already defined one in the
* priv struct. (if defined)
*/
dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
if (!dma_conf->dma_tx_size)
dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
if (!dma_conf->dma_rx_size)
dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
/* Earlier check for TBS */
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
/* Setup per-TXQ tbs flag before TX descriptor alloc */
tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
}
ret = alloc_dma_desc_resources(priv, dma_conf);
if (ret < 0) {
netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
__func__);
goto alloc_error;
}
ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
if (ret < 0) {
netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
__func__);
goto init_error;
}
return dma_conf;
init_error:
free_dma_desc_resources(priv, dma_conf);
alloc_error:
kfree(dma_conf);
return ERR_PTR(ret);
}
/**
* __stmmac_open - open entry point of the driver
* @dev : pointer to the device structure. * @dev : pointer to the device structure.
* @dma_conf : structure to take the dma data
* Description: * Description:
* This function is the open entry point of the driver. * This function is the open entry point of the driver.
* Return value: * Return value:
* 0 on success and an appropriate (-)ve integer as defined in errno.h * 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure. * file on failure.
*/ */
static int stmmac_open(struct net_device *dev) static int __stmmac_open(struct net_device *dev,
struct stmmac_dma_conf *dma_conf)
{ {
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
int mode = priv->plat->phy_interface; int mode = priv->plat->phy_interface;
int bfsize = 0;
u32 chan; u32 chan;
int ret; int ret;
...@@ -3656,45 +3793,10 @@ static int stmmac_open(struct net_device *dev) ...@@ -3656,45 +3793,10 @@ static int stmmac_open(struct net_device *dev)
memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
priv->xstats.threshold = tc; priv->xstats.threshold = tc;
bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
if (bfsize < 0)
bfsize = 0;
if (bfsize < BUF_SIZE_16KiB)
bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_conf.dma_buf_sz);
priv->dma_conf.dma_buf_sz = bfsize;
buf_sz = bfsize;
priv->rx_copybreak = STMMAC_RX_COPYBREAK; priv->rx_copybreak = STMMAC_RX_COPYBREAK;
if (!priv->dma_conf.dma_tx_size) buf_sz = dma_conf->dma_buf_sz;
priv->dma_conf.dma_tx_size = DMA_DEFAULT_TX_SIZE; memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
if (!priv->dma_conf.dma_rx_size)
priv->dma_conf.dma_rx_size = DMA_DEFAULT_RX_SIZE;
/* Earlier check for TBS */
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
/* Setup per-TXQ tbs flag before TX descriptor alloc */
tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
}
ret = alloc_dma_desc_resources(priv);
if (ret < 0) {
netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
__func__);
goto dma_desc_error;
}
ret = init_dma_desc_rings(dev, GFP_KERNEL);
if (ret < 0) {
netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
__func__);
goto init_error;
}
stmmac_reset_queues_param(priv); stmmac_reset_queues_param(priv);
...@@ -3728,14 +3830,28 @@ static int stmmac_open(struct net_device *dev) ...@@ -3728,14 +3830,28 @@ static int stmmac_open(struct net_device *dev)
stmmac_hw_teardown(dev); stmmac_hw_teardown(dev);
init_error: init_error:
free_dma_desc_resources(priv); free_dma_desc_resources(priv, &priv->dma_conf);
dma_desc_error:
phylink_disconnect_phy(priv->phylink); phylink_disconnect_phy(priv->phylink);
init_phy_error: init_phy_error:
pm_runtime_put(priv->device); pm_runtime_put(priv->device);
return ret; return ret;
} }
static int stmmac_open(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
struct stmmac_dma_conf *dma_conf;
int ret;
dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
if (IS_ERR(dma_conf))
return PTR_ERR(dma_conf);
ret = __stmmac_open(dev, dma_conf);
kfree(dma_conf);
return ret;
}
static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
{ {
set_bit(__FPE_REMOVING, &priv->fpe_task_state); set_bit(__FPE_REMOVING, &priv->fpe_task_state);
...@@ -3782,7 +3898,7 @@ static int stmmac_release(struct net_device *dev) ...@@ -3782,7 +3898,7 @@ static int stmmac_release(struct net_device *dev)
stmmac_stop_all_dma(priv); stmmac_stop_all_dma(priv);
/* Release and free the Rx/Tx resources */ /* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv); free_dma_desc_resources(priv, &priv->dma_conf);
/* Disable the MAC Rx/Tx */ /* Disable the MAC Rx/Tx */
stmmac_mac_set(priv, priv->ioaddr, false); stmmac_mac_set(priv, priv->ioaddr, false);
...@@ -6304,7 +6420,7 @@ void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue) ...@@ -6304,7 +6420,7 @@ void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
spin_unlock_irqrestore(&ch->lock, flags); spin_unlock_irqrestore(&ch->lock, flags);
stmmac_stop_rx_dma(priv, queue); stmmac_stop_rx_dma(priv, queue);
__free_dma_rx_desc_resources(priv, queue); __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
} }
void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
...@@ -6315,21 +6431,21 @@ void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) ...@@ -6315,21 +6431,21 @@ void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
u32 buf_size; u32 buf_size;
int ret; int ret;
ret = __alloc_dma_rx_desc_resources(priv, queue); ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
if (ret) { if (ret) {
netdev_err(priv->dev, "Failed to alloc RX desc.\n"); netdev_err(priv->dev, "Failed to alloc RX desc.\n");
return; return;
} }
ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL); ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
if (ret) { if (ret) {
__free_dma_rx_desc_resources(priv, queue); __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
netdev_err(priv->dev, "Failed to init RX desc.\n"); netdev_err(priv->dev, "Failed to init RX desc.\n");
return; return;
} }
stmmac_reset_rx_queue(priv, queue); stmmac_reset_rx_queue(priv, queue);
stmmac_clear_rx_descriptors(priv, queue); stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
rx_q->dma_rx_phy, rx_q->queue_index); rx_q->dma_rx_phy, rx_q->queue_index);
...@@ -6367,7 +6483,7 @@ void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue) ...@@ -6367,7 +6483,7 @@ void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
spin_unlock_irqrestore(&ch->lock, flags); spin_unlock_irqrestore(&ch->lock, flags);
stmmac_stop_tx_dma(priv, queue); stmmac_stop_tx_dma(priv, queue);
__free_dma_tx_desc_resources(priv, queue); __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
} }
void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
...@@ -6377,21 +6493,21 @@ void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) ...@@ -6377,21 +6493,21 @@ void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
unsigned long flags; unsigned long flags;
int ret; int ret;
ret = __alloc_dma_tx_desc_resources(priv, queue); ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
if (ret) { if (ret) {
netdev_err(priv->dev, "Failed to alloc TX desc.\n"); netdev_err(priv->dev, "Failed to alloc TX desc.\n");
return; return;
} }
ret = __init_dma_tx_desc_rings(priv, queue); ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
if (ret) { if (ret) {
__free_dma_tx_desc_resources(priv, queue); __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
netdev_err(priv->dev, "Failed to init TX desc.\n"); netdev_err(priv->dev, "Failed to init TX desc.\n");
return; return;
} }
stmmac_reset_tx_queue(priv, queue); stmmac_reset_tx_queue(priv, queue);
stmmac_clear_tx_descriptors(priv, queue); stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, tx_q->queue_index); tx_q->dma_tx_phy, tx_q->queue_index);
...@@ -6428,7 +6544,7 @@ void stmmac_xdp_release(struct net_device *dev) ...@@ -6428,7 +6544,7 @@ void stmmac_xdp_release(struct net_device *dev)
stmmac_stop_all_dma(priv); stmmac_stop_all_dma(priv);
/* Release and free the Rx/Tx resources */ /* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv); free_dma_desc_resources(priv, &priv->dma_conf);
/* Disable the MAC Rx/Tx */ /* Disable the MAC Rx/Tx */
stmmac_mac_set(priv, priv->ioaddr, false); stmmac_mac_set(priv, priv->ioaddr, false);
...@@ -6453,14 +6569,14 @@ int stmmac_xdp_open(struct net_device *dev) ...@@ -6453,14 +6569,14 @@ int stmmac_xdp_open(struct net_device *dev)
u32 chan; u32 chan;
int ret; int ret;
ret = alloc_dma_desc_resources(priv); ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
if (ret < 0) { if (ret < 0) {
netdev_err(dev, "%s: DMA descriptors allocation failed\n", netdev_err(dev, "%s: DMA descriptors allocation failed\n",
__func__); __func__);
goto dma_desc_error; goto dma_desc_error;
} }
ret = init_dma_desc_rings(dev, GFP_KERNEL); ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
if (ret < 0) { if (ret < 0) {
netdev_err(dev, "%s: DMA descriptors initialization failed\n", netdev_err(dev, "%s: DMA descriptors initialization failed\n",
__func__); __func__);
...@@ -6542,7 +6658,7 @@ int stmmac_xdp_open(struct net_device *dev) ...@@ -6542,7 +6658,7 @@ int stmmac_xdp_open(struct net_device *dev)
stmmac_hw_teardown(dev); stmmac_hw_teardown(dev);
init_error: init_error:
free_dma_desc_resources(priv); free_dma_desc_resources(priv, &priv->dma_conf);
dma_desc_error: dma_desc_error:
return ret; return ret;
} }
...@@ -7411,7 +7527,7 @@ int stmmac_resume(struct device *dev) ...@@ -7411,7 +7527,7 @@ int stmmac_resume(struct device *dev)
stmmac_reset_queues_param(priv); stmmac_reset_queues_param(priv);
stmmac_free_tx_skbufs(priv); stmmac_free_tx_skbufs(priv);
stmmac_clear_descriptors(priv); stmmac_clear_descriptors(priv, &priv->dma_conf);
stmmac_hw_setup(ndev, false); stmmac_hw_setup(ndev, false);
stmmac_init_coalesce(priv); stmmac_init_coalesce(priv);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment