Commit e6c29506 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'eliminate-config_nr_cpus-dependency-in-dpaa-eth-and-enable-compile_test-in-fsl_qbman'

Vladimir Oltean says:

====================
Eliminate CONFIG_NR_CPUS dependency in dpaa-eth and enable COMPILE_TEST in fsl_qbman

Breno's previous attempt at enabling COMPILE_TEST for the fsl_qbman
driver (now included here as patch 5/5) triggered compilation warnings
for large CONFIG_NR_CPUS values:
https://lore.kernel.org/all/202406261920.l5pzM1rj-lkp@intel.com/

Patch 1/5 switches two NR_CPUS arrays in the dpaa-eth driver to dynamic
allocation to avoid that warning. There is more NR_CPUS usage in the
fsl-qbman driver, but that looks relatively harmless and I couldn't find
a good reason to change it.

I noticed, while testing, that the driver doesn't actually work properly
with high CONFIG_NR_CPUS values, and patch 2/5 addresses that.

During code analysis, I have identified two places which treat
conditions that can never happen. Patches 3/5 and 4/5 simplify the
probing code - dpaa_fq_setup() - just a little bit.

Finally we have at 5/5 the patch that triggered all of this. There is
an okay from Herbert to take it via netdev, despite it being on soc/qbman:
https://lore.kernel.org/all/Zns%2FeVVBc7pdv0yM@gondor.apana.org.au/

Link to v1:
https://lore.kernel.org/netdev/20240710230025.46487-1-vladimir.oltean@nxp.com/
====================

Link: https://patch.msgid.link/20240713225336.1746343-1-vladimir.oltean@nxp.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 62fdd170 782fe08e
......@@ -371,6 +371,7 @@ static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data)
{
struct dpaa_priv *priv = netdev_priv(net_dev);
int num_txqs_per_tc = dpaa_num_txqs_per_tc();
struct tc_mqprio_qopt *mqprio = type_data;
u8 num_tc;
int i;
......@@ -398,12 +399,12 @@ static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
netdev_set_num_tc(net_dev, num_tc);
for (i = 0; i < num_tc; i++)
netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM,
i * DPAA_TC_TXQ_NUM);
netdev_set_tc_queue(net_dev, i, num_txqs_per_tc,
i * num_txqs_per_tc);
out:
priv->num_tc = num_tc ? : 1;
netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
netif_set_real_num_tx_queues(net_dev, priv->num_tc * num_txqs_per_tc);
return 0;
}
......@@ -649,7 +650,7 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
fq->wq = 6;
break;
case FQ_TYPE_TX:
switch (idx / DPAA_TC_TXQ_NUM) {
switch (idx / dpaa_num_txqs_per_tc()) {
case 0:
/* Low priority (best effort) */
fq->wq = 6;
......@@ -667,8 +668,8 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
fq->wq = 0;
break;
default:
WARN(1, "Too many TX FQs: more than %d!\n",
DPAA_ETH_TXQ_NUM);
WARN(1, "Too many TX FQs: more than %zu!\n",
dpaa_max_num_txqs());
}
break;
default:
......@@ -740,7 +741,8 @@ static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
port_fqs->rx_pcdq = &dpaa_fq[0];
if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ))
if (!dpaa_fq_alloc(dev, 0, dpaa_max_num_txqs(), list,
FQ_TYPE_TX_CONF_MQ))
goto fq_alloc_failed;
dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR);
......@@ -755,7 +757,7 @@ static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
port_fqs->tx_defq = &dpaa_fq[0];
if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX))
if (!dpaa_fq_alloc(dev, 0, dpaa_max_num_txqs(), list, FQ_TYPE_TX))
goto fq_alloc_failed;
return 0;
......@@ -931,14 +933,18 @@ static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
}
}
static void dpaa_fq_setup(struct dpaa_priv *priv,
const struct dpaa_fq_cbs *fq_cbs,
struct fman_port *tx_port)
static int dpaa_fq_setup(struct dpaa_priv *priv,
const struct dpaa_fq_cbs *fq_cbs,
struct fman_port *tx_port)
{
int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu;
const cpumask_t *affine_cpus = qman_affine_cpus();
u16 channels[NR_CPUS];
struct dpaa_fq *fq;
u16 *channels;
channels = kcalloc(num_possible_cpus(), sizeof(u16), GFP_KERNEL);
if (!channels)
return -ENOMEM;
for_each_cpu_and(cpu, affine_cpus, cpu_online_mask)
channels[num_portals++] = qman_affine_channel(cpu);
......@@ -965,11 +971,7 @@ static void dpaa_fq_setup(struct dpaa_priv *priv,
case FQ_TYPE_TX:
dpaa_setup_egress(priv, fq, tx_port,
&fq_cbs->egress_ern);
/* If we have more Tx queues than the number of cores,
* just ignore the extra ones.
*/
if (egress_cnt < DPAA_ETH_TXQ_NUM)
priv->egress_fqs[egress_cnt++] = &fq->fq_base;
priv->egress_fqs[egress_cnt++] = &fq->fq_base;
break;
case FQ_TYPE_TX_CONF_MQ:
priv->conf_fqs[conf_cnt++] = &fq->fq_base;
......@@ -987,16 +989,9 @@ static void dpaa_fq_setup(struct dpaa_priv *priv,
}
}
/* Make sure all CPUs receive a corresponding Tx queue. */
while (egress_cnt < DPAA_ETH_TXQ_NUM) {
list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
if (fq->fq_type != FQ_TYPE_TX)
continue;
priv->egress_fqs[egress_cnt++] = &fq->fq_base;
if (egress_cnt == DPAA_ETH_TXQ_NUM)
break;
}
}
kfree(channels);
return 0;
}
static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
......@@ -1004,7 +999,7 @@ static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
{
int i;
for (i = 0; i < DPAA_ETH_TXQ_NUM; i++)
for (i = 0; i < dpaa_max_num_txqs(); i++)
if (priv->egress_fqs[i] == tx_fq)
return i;
......@@ -3324,7 +3319,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
/* Allocate this early, so we can store relevant information in
* the private area
*/
net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM);
net_dev = alloc_etherdev_mq(sizeof(*priv), dpaa_max_num_txqs());
if (!net_dev) {
dev_err(dev, "alloc_etherdev_mq() failed\n");
return -ENOMEM;
......@@ -3339,6 +3334,22 @@ static int dpaa_eth_probe(struct platform_device *pdev)
priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT);
priv->egress_fqs = devm_kcalloc(dev, dpaa_max_num_txqs(),
sizeof(*priv->egress_fqs),
GFP_KERNEL);
if (!priv->egress_fqs) {
err = -ENOMEM;
goto free_netdev;
}
priv->conf_fqs = devm_kcalloc(dev, dpaa_max_num_txqs(),
sizeof(*priv->conf_fqs),
GFP_KERNEL);
if (!priv->conf_fqs) {
err = -ENOMEM;
goto free_netdev;
}
mac_dev = dpaa_mac_dev_get(pdev);
if (IS_ERR(mac_dev)) {
netdev_err(net_dev, "dpaa_mac_dev_get() failed\n");
......@@ -3416,7 +3427,9 @@ static int dpaa_eth_probe(struct platform_device *pdev)
*/
dpaa_eth_add_channel(priv->channel, &pdev->dev);
dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
err = dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
if (err)
goto free_dpaa_bps;
/* Create a congestion group for this netdev, with
* dynamically-allocated CGR ID.
......@@ -3462,7 +3475,8 @@ static int dpaa_eth_probe(struct platform_device *pdev)
}
priv->num_tc = 1;
netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
netif_set_real_num_tx_queues(net_dev,
priv->num_tc * dpaa_num_txqs_per_tc());
/* Initialize NAPI */
err = dpaa_napi_add(net_dev);
......
......@@ -18,10 +18,6 @@
/* Number of prioritised traffic classes */
#define DPAA_TC_NUM 4
/* Number of Tx queues per traffic class */
#define DPAA_TC_TXQ_NUM NR_CPUS
/* Total number of Tx queues */
#define DPAA_ETH_TXQ_NUM (DPAA_TC_NUM * DPAA_TC_TXQ_NUM)
/* More detailed FQ types - used for fine-grained WQ assignments */
enum dpaa_fq_type {
......@@ -142,8 +138,8 @@ struct dpaa_priv {
struct mac_device *mac_dev;
struct device *rx_dma_dev;
struct device *tx_dma_dev;
struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM];
struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM];
struct qman_fq **egress_fqs;
struct qman_fq **conf_fqs;
u16 channel;
struct list_head dpaa_fq_list;
......@@ -185,4 +181,16 @@ extern const struct ethtool_ops dpaa_ethtool_ops;
/* from dpaa_eth_sysfs.c */
void dpaa_eth_sysfs_remove(struct device *dev);
void dpaa_eth_sysfs_init(struct device *dev);
static inline size_t dpaa_num_txqs_per_tc(void)
{
return num_possible_cpus();
}
/* Total number of Tx queues */
static inline size_t dpaa_max_num_txqs(void)
{
return DPAA_TC_NUM * dpaa_num_txqs_per_tc();
}
#endif /* __DPAA_H */
......@@ -457,12 +457,16 @@ static int dpaa_set_coalesce(struct net_device *dev,
struct netlink_ext_ack *extack)
{
const cpumask_t *cpus = qman_affine_cpus();
bool needs_revert[NR_CPUS] = {false};
struct qman_portal *portal;
u32 period, prev_period;
u8 thresh, prev_thresh;
bool *needs_revert;
int cpu, res;
needs_revert = kcalloc(num_possible_cpus(), sizeof(bool), GFP_KERNEL);
if (!needs_revert)
return -ENOMEM;
period = c->rx_coalesce_usecs;
thresh = c->rx_max_coalesced_frames;
......@@ -485,6 +489,8 @@ static int dpaa_set_coalesce(struct net_device *dev,
needs_revert[cpu] = true;
}
kfree(needs_revert);
return 0;
revert_values:
......@@ -498,6 +504,8 @@ static int dpaa_set_coalesce(struct net_device *dev,
qman_dqrr_set_ithresh(portal, prev_thresh);
}
kfree(needs_revert);
return res;
}
......
# SPDX-License-Identifier: GPL-2.0-only
menuconfig FSL_DPAA
bool "QorIQ DPAA1 framework support"
depends on ((FSL_SOC_BOOKE || ARCH_LAYERSCAPE) && ARCH_DMA_ADDR_T_64BIT)
depends on ((FSL_SOC_BOOKE || ARCH_LAYERSCAPE || COMPILE_TEST) && ARCH_DMA_ADDR_T_64BIT)
select GENERIC_ALLOCATOR
help
The Freescale Data Path Acceleration Architecture (DPAA) is a set of
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment