Commit 57bb1132 authored by David S. Miller's avatar David S. Miller

Merge branch 'dsa-rtnl'

Vladimir Oltean says:

====================
Drop rtnl_lock from DSA .port_fdb_{add,del}

As mentioned in the RFC posted 2 months ago:
https://patchwork.kernel.org/project/netdevbpf/cover/20210824114049.3814660-1-vladimir.oltean@nxp.com/

DSA is transitioning to a driver API where the rtnl_lock is not held
when calling ds->ops->port_fdb_add() and ds->ops->port_fdb_del().
Drivers cannot take that lock privately from those callbacks either.

This change is required so that DSA can wait for switchdev FDB work
items to finish before leaving the bridge. That change will be made in a
future patch series.

A small selftest is provided with the patch set in the hope that
concurrency issues uncovered by this series, but not spotted by me by
code inspection, will be caught.

A status of the existing drivers:

- mv88e6xxx_port_fdb_add() and mv88e6xxx_port_fdb_del() take
  mv88e6xxx_reg_lock() so they should be safe.

- qca8k_fdb_add() and qca8k_fdb_del() take mutex_lock(&priv->reg_mutex)
  so they should be safe.

- hellcreek_fdb_add() and hellcreek_fdb_add() take mutex_lock(&hellcreek->reg_lock)
  so they should be safe.

- ksz9477_port_fdb_add() and ksz9477_port_fdb_del() take mutex_lock(&dev->alu_mutex)
  so they should be safe.

- b53_fdb_add() and b53_fdb_del() did not have locking, so I've added a
  scheme based on my own judgement there (not tested).

- felix_fdb_add() and felix_fdb_del() did not have locking, I've added
  and tested a locking scheme there.

- mt7530_port_fdb_add() and mt7530_port_fdb_del() take
  mutex_lock(&priv->reg_mutex), so they should be safe.

- gswip_port_fdb() did not have locking, so I've added a non-expert
  locking scheme based on my own judgement (not tested).

- lan9303_alr_add_port() and lan9303_alr_del_port() take
  mutex_lock(&chip->alr_mutex) so they should be safe.

- sja1105_fdb_add() and sja1105_fdb_del() did not have locking, I've
  added and tested a locking scheme.

Changes in v3:
Unlock arl_mutex only once in b53_fdb_dump().

Changes in v4:
- Use __must_hold in ocelot and b53
- Add missing mutex_init in lantiq_gswip
- Clean up the selftest a bit.

Changes in v5:
- Replace __must_hold with a comment.
- Add a new patch (01/10).
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 2d7e73f0 eccd0a80
...@@ -13056,6 +13056,7 @@ F: include/linux/dsa/ ...@@ -13056,6 +13056,7 @@ F: include/linux/dsa/
F: include/linux/platform_data/dsa.h F: include/linux/platform_data/dsa.h
F: include/net/dsa.h F: include/net/dsa.h
F: net/dsa/ F: net/dsa/
F: tools/testing/selftests/drivers/net/dsa/
NETWORKING [GENERAL] NETWORKING [GENERAL]
M: "David S. Miller" <davem@davemloft.net> M: "David S. Miller" <davem@davemloft.net>
......
...@@ -1542,7 +1542,7 @@ int b53_vlan_del(struct dsa_switch *ds, int port, ...@@ -1542,7 +1542,7 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
} }
EXPORT_SYMBOL(b53_vlan_del); EXPORT_SYMBOL(b53_vlan_del);
/* Address Resolution Logic routines */ /* Address Resolution Logic routines. Caller must hold &dev->arl_mutex. */
static int b53_arl_op_wait(struct b53_device *dev) static int b53_arl_op_wait(struct b53_device *dev)
{ {
unsigned int timeout = 10; unsigned int timeout = 10;
...@@ -1707,6 +1707,7 @@ int b53_fdb_add(struct dsa_switch *ds, int port, ...@@ -1707,6 +1707,7 @@ int b53_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid) const unsigned char *addr, u16 vid)
{ {
struct b53_device *priv = ds->priv; struct b53_device *priv = ds->priv;
int ret;
/* 5325 and 5365 require some more massaging, but could /* 5325 and 5365 require some more massaging, but could
* be supported eventually * be supported eventually
...@@ -1714,7 +1715,11 @@ int b53_fdb_add(struct dsa_switch *ds, int port, ...@@ -1714,7 +1715,11 @@ int b53_fdb_add(struct dsa_switch *ds, int port,
if (is5325(priv) || is5365(priv)) if (is5325(priv) || is5365(priv))
return -EOPNOTSUPP; return -EOPNOTSUPP;
return b53_arl_op(priv, 0, port, addr, vid, true); mutex_lock(&priv->arl_mutex);
ret = b53_arl_op(priv, 0, port, addr, vid, true);
mutex_unlock(&priv->arl_mutex);
return ret;
} }
EXPORT_SYMBOL(b53_fdb_add); EXPORT_SYMBOL(b53_fdb_add);
...@@ -1722,8 +1727,13 @@ int b53_fdb_del(struct dsa_switch *ds, int port, ...@@ -1722,8 +1727,13 @@ int b53_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid) const unsigned char *addr, u16 vid)
{ {
struct b53_device *priv = ds->priv; struct b53_device *priv = ds->priv;
int ret;
return b53_arl_op(priv, 0, port, addr, vid, false); mutex_lock(&priv->arl_mutex);
ret = b53_arl_op(priv, 0, port, addr, vid, false);
mutex_unlock(&priv->arl_mutex);
return ret;
} }
EXPORT_SYMBOL(b53_fdb_del); EXPORT_SYMBOL(b53_fdb_del);
...@@ -1780,6 +1790,8 @@ int b53_fdb_dump(struct dsa_switch *ds, int port, ...@@ -1780,6 +1790,8 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
int ret; int ret;
u8 reg; u8 reg;
mutex_lock(&priv->arl_mutex);
/* Start search operation */ /* Start search operation */
reg = ARL_SRCH_STDN; reg = ARL_SRCH_STDN;
b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg); b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg);
...@@ -1787,18 +1799,18 @@ int b53_fdb_dump(struct dsa_switch *ds, int port, ...@@ -1787,18 +1799,18 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
do { do {
ret = b53_arl_search_wait(priv); ret = b53_arl_search_wait(priv);
if (ret) if (ret)
return ret; break;
b53_arl_search_rd(priv, 0, &results[0]); b53_arl_search_rd(priv, 0, &results[0]);
ret = b53_fdb_copy(port, &results[0], cb, data); ret = b53_fdb_copy(port, &results[0], cb, data);
if (ret) if (ret)
return ret; break;
if (priv->num_arl_bins > 2) { if (priv->num_arl_bins > 2) {
b53_arl_search_rd(priv, 1, &results[1]); b53_arl_search_rd(priv, 1, &results[1]);
ret = b53_fdb_copy(port, &results[1], cb, data); ret = b53_fdb_copy(port, &results[1], cb, data);
if (ret) if (ret)
return ret; break;
if (!results[0].is_valid && !results[1].is_valid) if (!results[0].is_valid && !results[1].is_valid)
break; break;
...@@ -1806,6 +1818,8 @@ int b53_fdb_dump(struct dsa_switch *ds, int port, ...@@ -1806,6 +1818,8 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
} while (count++ < b53_max_arl_entries(priv) / 2); } while (count++ < b53_max_arl_entries(priv) / 2);
mutex_unlock(&priv->arl_mutex);
return 0; return 0;
} }
EXPORT_SYMBOL(b53_fdb_dump); EXPORT_SYMBOL(b53_fdb_dump);
...@@ -1814,6 +1828,7 @@ int b53_mdb_add(struct dsa_switch *ds, int port, ...@@ -1814,6 +1828,7 @@ int b53_mdb_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb) const struct switchdev_obj_port_mdb *mdb)
{ {
struct b53_device *priv = ds->priv; struct b53_device *priv = ds->priv;
int ret;
/* 5325 and 5365 require some more massaging, but could /* 5325 and 5365 require some more massaging, but could
* be supported eventually * be supported eventually
...@@ -1821,7 +1836,11 @@ int b53_mdb_add(struct dsa_switch *ds, int port, ...@@ -1821,7 +1836,11 @@ int b53_mdb_add(struct dsa_switch *ds, int port,
if (is5325(priv) || is5365(priv)) if (is5325(priv) || is5365(priv))
return -EOPNOTSUPP; return -EOPNOTSUPP;
return b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true); mutex_lock(&priv->arl_mutex);
ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true);
mutex_unlock(&priv->arl_mutex);
return ret;
} }
EXPORT_SYMBOL(b53_mdb_add); EXPORT_SYMBOL(b53_mdb_add);
...@@ -1831,7 +1850,9 @@ int b53_mdb_del(struct dsa_switch *ds, int port, ...@@ -1831,7 +1850,9 @@ int b53_mdb_del(struct dsa_switch *ds, int port,
struct b53_device *priv = ds->priv; struct b53_device *priv = ds->priv;
int ret; int ret;
mutex_lock(&priv->arl_mutex);
ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false); ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false);
mutex_unlock(&priv->arl_mutex);
if (ret) if (ret)
dev_err(ds->dev, "failed to delete MDB entry\n"); dev_err(ds->dev, "failed to delete MDB entry\n");
...@@ -2668,6 +2689,7 @@ struct b53_device *b53_switch_alloc(struct device *base, ...@@ -2668,6 +2689,7 @@ struct b53_device *b53_switch_alloc(struct device *base,
mutex_init(&dev->reg_mutex); mutex_init(&dev->reg_mutex);
mutex_init(&dev->stats_mutex); mutex_init(&dev->stats_mutex);
mutex_init(&dev->arl_mutex);
return dev; return dev;
} }
......
...@@ -107,6 +107,7 @@ struct b53_device { ...@@ -107,6 +107,7 @@ struct b53_device {
struct mutex reg_mutex; struct mutex reg_mutex;
struct mutex stats_mutex; struct mutex stats_mutex;
struct mutex arl_mutex;
const struct b53_io_ops *ops; const struct b53_io_ops *ops;
/* chip specific data */ /* chip specific data */
......
...@@ -276,6 +276,7 @@ struct gswip_priv { ...@@ -276,6 +276,7 @@ struct gswip_priv {
int num_gphy_fw; int num_gphy_fw;
struct gswip_gphy_fw *gphy_fw; struct gswip_gphy_fw *gphy_fw;
u32 port_vlan_filter; u32 port_vlan_filter;
struct mutex pce_table_lock;
}; };
struct gswip_pce_table_entry { struct gswip_pce_table_entry {
...@@ -523,10 +524,14 @@ static int gswip_pce_table_entry_read(struct gswip_priv *priv, ...@@ -523,10 +524,14 @@ static int gswip_pce_table_entry_read(struct gswip_priv *priv,
u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD : u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD :
GSWIP_PCE_TBL_CTRL_OPMOD_ADRD; GSWIP_PCE_TBL_CTRL_OPMOD_ADRD;
mutex_lock(&priv->pce_table_lock);
err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
GSWIP_PCE_TBL_CTRL_BAS); GSWIP_PCE_TBL_CTRL_BAS);
if (err) if (err) {
mutex_unlock(&priv->pce_table_lock);
return err; return err;
}
gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR); gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
...@@ -536,8 +541,10 @@ static int gswip_pce_table_entry_read(struct gswip_priv *priv, ...@@ -536,8 +541,10 @@ static int gswip_pce_table_entry_read(struct gswip_priv *priv,
err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
GSWIP_PCE_TBL_CTRL_BAS); GSWIP_PCE_TBL_CTRL_BAS);
if (err) if (err) {
mutex_unlock(&priv->pce_table_lock);
return err; return err;
}
for (i = 0; i < ARRAY_SIZE(tbl->key); i++) for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i)); tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i));
...@@ -553,6 +560,8 @@ static int gswip_pce_table_entry_read(struct gswip_priv *priv, ...@@ -553,6 +560,8 @@ static int gswip_pce_table_entry_read(struct gswip_priv *priv,
tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD); tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD);
tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7; tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7;
mutex_unlock(&priv->pce_table_lock);
return 0; return 0;
} }
...@@ -565,10 +574,14 @@ static int gswip_pce_table_entry_write(struct gswip_priv *priv, ...@@ -565,10 +574,14 @@ static int gswip_pce_table_entry_write(struct gswip_priv *priv,
u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR : u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR :
GSWIP_PCE_TBL_CTRL_OPMOD_ADWR; GSWIP_PCE_TBL_CTRL_OPMOD_ADWR;
mutex_lock(&priv->pce_table_lock);
err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
GSWIP_PCE_TBL_CTRL_BAS); GSWIP_PCE_TBL_CTRL_BAS);
if (err) if (err) {
mutex_unlock(&priv->pce_table_lock);
return err; return err;
}
gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR); gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
...@@ -600,8 +613,12 @@ static int gswip_pce_table_entry_write(struct gswip_priv *priv, ...@@ -600,8 +613,12 @@ static int gswip_pce_table_entry_write(struct gswip_priv *priv,
crtl |= GSWIP_PCE_TBL_CTRL_BAS; crtl |= GSWIP_PCE_TBL_CTRL_BAS;
gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL); gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL);
return gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
GSWIP_PCE_TBL_CTRL_BAS); GSWIP_PCE_TBL_CTRL_BAS);
mutex_unlock(&priv->pce_table_lock);
return err;
} }
/* Add the LAN port into a bridge with the CPU port by /* Add the LAN port into a bridge with the CPU port by
...@@ -2104,6 +2121,7 @@ static int gswip_probe(struct platform_device *pdev) ...@@ -2104,6 +2121,7 @@ static int gswip_probe(struct platform_device *pdev)
priv->ds->priv = priv; priv->ds->priv = priv;
priv->ds->ops = priv->hw_info->ops; priv->ds->ops = priv->hw_info->ops;
priv->dev = dev; priv->dev = dev;
mutex_init(&priv->pce_table_lock);
version = gswip_switch_r(priv, GSWIP_VERSION); version = gswip_switch_r(priv, GSWIP_VERSION);
np = dev->of_node; np = dev->of_node;
......
...@@ -261,6 +261,8 @@ struct sja1105_private { ...@@ -261,6 +261,8 @@ struct sja1105_private {
* the switch doesn't confuse them with one another. * the switch doesn't confuse them with one another.
*/ */
struct mutex mgmt_lock; struct mutex mgmt_lock;
/* Serializes access to the dynamic config interface */
struct mutex dynamic_config_lock;
struct devlink_region **regions; struct devlink_region **regions;
struct sja1105_cbs_entry *cbs; struct sja1105_cbs_entry *cbs;
struct mii_bus *mdio_base_t1; struct mii_bus *mdio_base_t1;
......
...@@ -1170,6 +1170,56 @@ const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN] = { ...@@ -1170,6 +1170,56 @@ const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN] = {
}, },
}; };
#define SJA1105_DYNAMIC_CONFIG_SLEEP_US 10
#define SJA1105_DYNAMIC_CONFIG_TIMEOUT_US 100000
static int
sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
struct sja1105_dyn_cmd *cmd,
const struct sja1105_dynamic_table_ops *ops)
{
u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {};
int rc;
/* We don't _need_ to read the full entry, just the command area which
* is a fixed SJA1105_SIZE_DYN_CMD. But our cmd_packing() API expects a
* buffer that contains the full entry too. Additionally, our API
* doesn't really know how many bytes into the buffer does the command
* area really begin. So just read back the whole entry.
*/
rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf,
ops->packed_size);
if (rc)
return rc;
/* Unpack the command structure, and return it to the caller in case it
* needs to perform further checks on it (VALIDENT).
*/
memset(cmd, 0, sizeof(*cmd));
ops->cmd_packing(packed_buf, cmd, UNPACK);
/* Hardware hasn't cleared VALID => still working on it */
return cmd->valid ? -EAGAIN : 0;
}
/* Poll the dynamic config entry's control area until the hardware has
* cleared the VALID bit, which means we have confirmation that it has
* finished processing the command.
*/
static int
sja1105_dynamic_config_wait_complete(struct sja1105_private *priv,
struct sja1105_dyn_cmd *cmd,
const struct sja1105_dynamic_table_ops *ops)
{
int rc;
return read_poll_timeout(sja1105_dynamic_config_poll_valid,
rc, rc != -EAGAIN,
SJA1105_DYNAMIC_CONFIG_SLEEP_US,
SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
false, priv, cmd, ops);
}
/* Provides read access to the settings through the dynamic interface /* Provides read access to the settings through the dynamic interface
* of the switch. * of the switch.
* @blk_idx is used as key to select from the sja1105_dynamic_table_ops. * @blk_idx is used as key to select from the sja1105_dynamic_table_ops.
...@@ -1196,7 +1246,6 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv, ...@@ -1196,7 +1246,6 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
struct sja1105_dyn_cmd cmd = {0}; struct sja1105_dyn_cmd cmd = {0};
/* SPI payload buffer */ /* SPI payload buffer */
u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {0}; u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {0};
int retries = 3;
int rc; int rc;
if (blk_idx >= BLK_IDX_MAX_DYN) if (blk_idx >= BLK_IDX_MAX_DYN)
...@@ -1234,33 +1283,21 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv, ...@@ -1234,33 +1283,21 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
ops->entry_packing(packed_buf, entry, PACK); ops->entry_packing(packed_buf, entry, PACK);
/* Send SPI write operation: read config table entry */ /* Send SPI write operation: read config table entry */
mutex_lock(&priv->dynamic_config_lock);
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf, rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
ops->packed_size); ops->packed_size);
if (rc < 0) if (rc < 0) {
mutex_unlock(&priv->dynamic_config_lock);
return rc; return rc;
}
/* Loop until we have confirmation that hardware has finished rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
* processing the command and has cleared the VALID field mutex_unlock(&priv->dynamic_config_lock);
*/ if (rc < 0)
do { return rc;
memset(packed_buf, 0, ops->packed_size);
/* Retrieve the read operation's result */
rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf,
ops->packed_size);
if (rc < 0)
return rc;
cmd = (struct sja1105_dyn_cmd) {0};
ops->cmd_packing(packed_buf, &cmd, UNPACK);
if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY))
return -ENOENT;
cpu_relax();
} while (cmd.valid && --retries);
if (cmd.valid) if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY))
return -ETIMEDOUT; return -ENOENT;
/* Don't dereference possibly NULL pointer - maybe caller /* Don't dereference possibly NULL pointer - maybe caller
* only wanted to see whether the entry existed or not. * only wanted to see whether the entry existed or not.
...@@ -1316,8 +1353,16 @@ int sja1105_dynamic_config_write(struct sja1105_private *priv, ...@@ -1316,8 +1353,16 @@ int sja1105_dynamic_config_write(struct sja1105_private *priv,
ops->entry_packing(packed_buf, entry, PACK); ops->entry_packing(packed_buf, entry, PACK);
/* Send SPI write operation: read config table entry */ /* Send SPI write operation: read config table entry */
mutex_lock(&priv->dynamic_config_lock);
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf, rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
ops->packed_size); ops->packed_size);
if (rc < 0) {
mutex_unlock(&priv->dynamic_config_lock);
return rc;
}
rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
mutex_unlock(&priv->dynamic_config_lock);
if (rc < 0) if (rc < 0)
return rc; return rc;
......
...@@ -3365,6 +3365,7 @@ static int sja1105_probe(struct spi_device *spi) ...@@ -3365,6 +3365,7 @@ static int sja1105_probe(struct spi_device *spi)
priv->ds = ds; priv->ds = ds;
mutex_init(&priv->ptp_data.lock); mutex_init(&priv->ptp_data.lock);
mutex_init(&priv->dynamic_config_lock);
mutex_init(&priv->mgmt_lock); mutex_init(&priv->mgmt_lock);
rc = sja1105_parse_dt(priv); rc = sja1105_parse_dt(priv);
......
...@@ -20,11 +20,13 @@ struct ocelot_mact_entry { ...@@ -20,11 +20,13 @@ struct ocelot_mact_entry {
enum macaccess_entry_type type; enum macaccess_entry_type type;
}; };
/* Caller must hold &ocelot->mact_lock */
static inline u32 ocelot_mact_read_macaccess(struct ocelot *ocelot) static inline u32 ocelot_mact_read_macaccess(struct ocelot *ocelot)
{ {
return ocelot_read(ocelot, ANA_TABLES_MACACCESS); return ocelot_read(ocelot, ANA_TABLES_MACACCESS);
} }
/* Caller must hold &ocelot->mact_lock */
static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot) static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot)
{ {
u32 val; u32 val;
...@@ -36,6 +38,7 @@ static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot) ...@@ -36,6 +38,7 @@ static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot)
TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US); TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
} }
/* Caller must hold &ocelot->mact_lock */
static void ocelot_mact_select(struct ocelot *ocelot, static void ocelot_mact_select(struct ocelot *ocelot,
const unsigned char mac[ETH_ALEN], const unsigned char mac[ETH_ALEN],
unsigned int vid) unsigned int vid)
...@@ -67,6 +70,7 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port, ...@@ -67,6 +70,7 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port,
ANA_TABLES_MACACCESS_ENTRYTYPE(type) | ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN); ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN);
unsigned int mc_ports; unsigned int mc_ports;
int err;
/* Set MAC_CPU_COPY if the CPU port is used by a multicast entry */ /* Set MAC_CPU_COPY if the CPU port is used by a multicast entry */
if (type == ENTRYTYPE_MACv4) if (type == ENTRYTYPE_MACv4)
...@@ -79,18 +83,28 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port, ...@@ -79,18 +83,28 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port,
if (mc_ports & BIT(ocelot->num_phys_ports)) if (mc_ports & BIT(ocelot->num_phys_ports))
cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY; cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY;
mutex_lock(&ocelot->mact_lock);
ocelot_mact_select(ocelot, mac, vid); ocelot_mact_select(ocelot, mac, vid);
/* Issue a write command */ /* Issue a write command */
ocelot_write(ocelot, cmd, ANA_TABLES_MACACCESS); ocelot_write(ocelot, cmd, ANA_TABLES_MACACCESS);
return ocelot_mact_wait_for_completion(ocelot); err = ocelot_mact_wait_for_completion(ocelot);
mutex_unlock(&ocelot->mact_lock);
return err;
} }
EXPORT_SYMBOL(ocelot_mact_learn); EXPORT_SYMBOL(ocelot_mact_learn);
int ocelot_mact_forget(struct ocelot *ocelot, int ocelot_mact_forget(struct ocelot *ocelot,
const unsigned char mac[ETH_ALEN], unsigned int vid) const unsigned char mac[ETH_ALEN], unsigned int vid)
{ {
int err;
mutex_lock(&ocelot->mact_lock);
ocelot_mact_select(ocelot, mac, vid); ocelot_mact_select(ocelot, mac, vid);
/* Issue a forget command */ /* Issue a forget command */
...@@ -98,7 +112,11 @@ int ocelot_mact_forget(struct ocelot *ocelot, ...@@ -98,7 +112,11 @@ int ocelot_mact_forget(struct ocelot *ocelot,
ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_FORGET), ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_FORGET),
ANA_TABLES_MACACCESS); ANA_TABLES_MACACCESS);
return ocelot_mact_wait_for_completion(ocelot); err = ocelot_mact_wait_for_completion(ocelot);
mutex_unlock(&ocelot->mact_lock);
return err;
} }
EXPORT_SYMBOL(ocelot_mact_forget); EXPORT_SYMBOL(ocelot_mact_forget);
...@@ -114,7 +132,9 @@ static void ocelot_mact_init(struct ocelot *ocelot) ...@@ -114,7 +132,9 @@ static void ocelot_mact_init(struct ocelot *ocelot)
| ANA_AGENCTRL_LEARN_IGNORE_VLAN, | ANA_AGENCTRL_LEARN_IGNORE_VLAN,
ANA_AGENCTRL); ANA_AGENCTRL);
/* Clear the MAC table */ /* Clear the MAC table. We are not concurrent with anyone, so
* holding &ocelot->mact_lock is pointless.
*/
ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS); ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS);
} }
...@@ -1170,6 +1190,7 @@ int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid, ...@@ -1170,6 +1190,7 @@ int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
} }
EXPORT_SYMBOL(ocelot_port_fdb_do_dump); EXPORT_SYMBOL(ocelot_port_fdb_do_dump);
/* Caller must hold &ocelot->mact_lock */
static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col, static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col,
struct ocelot_mact_entry *entry) struct ocelot_mact_entry *entry)
{ {
...@@ -1220,33 +1241,40 @@ static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col, ...@@ -1220,33 +1241,40 @@ static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col,
int ocelot_fdb_dump(struct ocelot *ocelot, int port, int ocelot_fdb_dump(struct ocelot *ocelot, int port,
dsa_fdb_dump_cb_t *cb, void *data) dsa_fdb_dump_cb_t *cb, void *data)
{ {
int err = 0;
int i, j; int i, j;
/* We could take the lock just around ocelot_mact_read, but doing so
* thousands of times in a row seems rather pointless and inefficient.
*/
mutex_lock(&ocelot->mact_lock);
/* Loop through all the mac tables entries. */ /* Loop through all the mac tables entries. */
for (i = 0; i < ocelot->num_mact_rows; i++) { for (i = 0; i < ocelot->num_mact_rows; i++) {
for (j = 0; j < 4; j++) { for (j = 0; j < 4; j++) {
struct ocelot_mact_entry entry; struct ocelot_mact_entry entry;
bool is_static; bool is_static;
int ret;
ret = ocelot_mact_read(ocelot, port, i, j, &entry); err = ocelot_mact_read(ocelot, port, i, j, &entry);
/* If the entry is invalid (wrong port, invalid...), /* If the entry is invalid (wrong port, invalid...),
* skip it. * skip it.
*/ */
if (ret == -EINVAL) if (err == -EINVAL)
continue; continue;
else if (ret) else if (err)
return ret; break;
is_static = (entry.type == ENTRYTYPE_LOCKED); is_static = (entry.type == ENTRYTYPE_LOCKED);
ret = cb(entry.mac, entry.vid, is_static, data); err = cb(entry.mac, entry.vid, is_static, data);
if (ret) if (err)
return ret; break;
} }
} }
return 0; mutex_unlock(&ocelot->mact_lock);
return err;
} }
EXPORT_SYMBOL(ocelot_fdb_dump); EXPORT_SYMBOL(ocelot_fdb_dump);
...@@ -2231,6 +2259,7 @@ int ocelot_init(struct ocelot *ocelot) ...@@ -2231,6 +2259,7 @@ int ocelot_init(struct ocelot *ocelot)
mutex_init(&ocelot->stats_lock); mutex_init(&ocelot->stats_lock);
mutex_init(&ocelot->ptp_lock); mutex_init(&ocelot->ptp_lock);
mutex_init(&ocelot->mact_lock);
spin_lock_init(&ocelot->ptp_clock_lock); spin_lock_init(&ocelot->ptp_clock_lock);
spin_lock_init(&ocelot->ts_id_lock); spin_lock_init(&ocelot->ts_id_lock);
snprintf(queue_name, sizeof(queue_name), "%s-stats", snprintf(queue_name, sizeof(queue_name), "%s-stats",
......
...@@ -287,6 +287,7 @@ struct dsa_port { ...@@ -287,6 +287,7 @@ struct dsa_port {
/* List of MAC addresses that must be forwarded on this port. /* List of MAC addresses that must be forwarded on this port.
* These are only valid on CPU ports and DSA links. * These are only valid on CPU ports and DSA links.
*/ */
struct mutex addr_lists_lock;
struct list_head fdbs; struct list_head fdbs;
struct list_head mdbs; struct list_head mdbs;
......
...@@ -675,6 +675,9 @@ struct ocelot { ...@@ -675,6 +675,9 @@ struct ocelot {
struct delayed_work stats_work; struct delayed_work stats_work;
struct workqueue_struct *stats_queue; struct workqueue_struct *stats_queue;
/* Lock for serializing access to the MAC table */
struct mutex mact_lock;
struct workqueue_struct *owq; struct workqueue_struct *owq;
u8 ptp:1; u8 ptp:1;
......
...@@ -433,6 +433,7 @@ static int dsa_port_setup(struct dsa_port *dp) ...@@ -433,6 +433,7 @@ static int dsa_port_setup(struct dsa_port *dp)
if (dp->setup) if (dp->setup)
return 0; return 0;
mutex_init(&dp->addr_lists_lock);
INIT_LIST_HEAD(&dp->fdbs); INIT_LIST_HEAD(&dp->fdbs);
INIT_LIST_HEAD(&dp->mdbs); INIT_LIST_HEAD(&dp->mdbs);
......
...@@ -2413,7 +2413,6 @@ static void dsa_slave_switchdev_event_work(struct work_struct *work) ...@@ -2413,7 +2413,6 @@ static void dsa_slave_switchdev_event_work(struct work_struct *work)
dp = dsa_to_port(ds, switchdev_work->port); dp = dsa_to_port(ds, switchdev_work->port);
rtnl_lock();
switch (switchdev_work->event) { switch (switchdev_work->event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE: case SWITCHDEV_FDB_ADD_TO_DEVICE:
if (switchdev_work->host_addr) if (switchdev_work->host_addr)
...@@ -2448,7 +2447,6 @@ static void dsa_slave_switchdev_event_work(struct work_struct *work) ...@@ -2448,7 +2447,6 @@ static void dsa_slave_switchdev_event_work(struct work_struct *work)
break; break;
} }
rtnl_unlock();
dev_put(switchdev_work->dev); dev_put(switchdev_work->dev);
kfree(switchdev_work); kfree(switchdev_work);
......
...@@ -215,26 +215,30 @@ static int dsa_port_do_mdb_add(struct dsa_port *dp, ...@@ -215,26 +215,30 @@ static int dsa_port_do_mdb_add(struct dsa_port *dp,
struct dsa_switch *ds = dp->ds; struct dsa_switch *ds = dp->ds;
struct dsa_mac_addr *a; struct dsa_mac_addr *a;
int port = dp->index; int port = dp->index;
int err; int err = 0;
/* No need to bother with refcounting for user ports */ /* No need to bother with refcounting for user ports */
if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
return ds->ops->port_mdb_add(ds, port, mdb); return ds->ops->port_mdb_add(ds, port, mdb);
mutex_lock(&dp->addr_lists_lock);
a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid); a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
if (a) { if (a) {
refcount_inc(&a->refcount); refcount_inc(&a->refcount);
return 0; goto out;
} }
a = kzalloc(sizeof(*a), GFP_KERNEL); a = kzalloc(sizeof(*a), GFP_KERNEL);
if (!a) if (!a) {
return -ENOMEM; err = -ENOMEM;
goto out;
}
err = ds->ops->port_mdb_add(ds, port, mdb); err = ds->ops->port_mdb_add(ds, port, mdb);
if (err) { if (err) {
kfree(a); kfree(a);
return err; goto out;
} }
ether_addr_copy(a->addr, mdb->addr); ether_addr_copy(a->addr, mdb->addr);
...@@ -242,7 +246,10 @@ static int dsa_port_do_mdb_add(struct dsa_port *dp, ...@@ -242,7 +246,10 @@ static int dsa_port_do_mdb_add(struct dsa_port *dp,
refcount_set(&a->refcount, 1); refcount_set(&a->refcount, 1);
list_add_tail(&a->list, &dp->mdbs); list_add_tail(&a->list, &dp->mdbs);
return 0; out:
mutex_unlock(&dp->addr_lists_lock);
return err;
} }
static int dsa_port_do_mdb_del(struct dsa_port *dp, static int dsa_port_do_mdb_del(struct dsa_port *dp,
...@@ -251,29 +258,36 @@ static int dsa_port_do_mdb_del(struct dsa_port *dp, ...@@ -251,29 +258,36 @@ static int dsa_port_do_mdb_del(struct dsa_port *dp,
struct dsa_switch *ds = dp->ds; struct dsa_switch *ds = dp->ds;
struct dsa_mac_addr *a; struct dsa_mac_addr *a;
int port = dp->index; int port = dp->index;
int err; int err = 0;
/* No need to bother with refcounting for user ports */ /* No need to bother with refcounting for user ports */
if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
return ds->ops->port_mdb_del(ds, port, mdb); return ds->ops->port_mdb_del(ds, port, mdb);
mutex_lock(&dp->addr_lists_lock);
a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid); a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
if (!a) if (!a) {
return -ENOENT; err = -ENOENT;
goto out;
}
if (!refcount_dec_and_test(&a->refcount)) if (!refcount_dec_and_test(&a->refcount))
return 0; goto out;
err = ds->ops->port_mdb_del(ds, port, mdb); err = ds->ops->port_mdb_del(ds, port, mdb);
if (err) { if (err) {
refcount_inc(&a->refcount); refcount_set(&a->refcount, 1);
return err; goto out;
} }
list_del(&a->list); list_del(&a->list);
kfree(a); kfree(a);
return 0; out:
mutex_unlock(&dp->addr_lists_lock);
return err;
} }
static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr, static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
...@@ -282,26 +296,30 @@ static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr, ...@@ -282,26 +296,30 @@ static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
struct dsa_switch *ds = dp->ds; struct dsa_switch *ds = dp->ds;
struct dsa_mac_addr *a; struct dsa_mac_addr *a;
int port = dp->index; int port = dp->index;
int err; int err = 0;
/* No need to bother with refcounting for user ports */ /* No need to bother with refcounting for user ports */
if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
return ds->ops->port_fdb_add(ds, port, addr, vid); return ds->ops->port_fdb_add(ds, port, addr, vid);
mutex_lock(&dp->addr_lists_lock);
a = dsa_mac_addr_find(&dp->fdbs, addr, vid); a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
if (a) { if (a) {
refcount_inc(&a->refcount); refcount_inc(&a->refcount);
return 0; goto out;
} }
a = kzalloc(sizeof(*a), GFP_KERNEL); a = kzalloc(sizeof(*a), GFP_KERNEL);
if (!a) if (!a) {
return -ENOMEM; err = -ENOMEM;
goto out;
}
err = ds->ops->port_fdb_add(ds, port, addr, vid); err = ds->ops->port_fdb_add(ds, port, addr, vid);
if (err) { if (err) {
kfree(a); kfree(a);
return err; goto out;
} }
ether_addr_copy(a->addr, addr); ether_addr_copy(a->addr, addr);
...@@ -309,7 +327,10 @@ static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr, ...@@ -309,7 +327,10 @@ static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
refcount_set(&a->refcount, 1); refcount_set(&a->refcount, 1);
list_add_tail(&a->list, &dp->fdbs); list_add_tail(&a->list, &dp->fdbs);
return 0; out:
mutex_unlock(&dp->addr_lists_lock);
return err;
} }
static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr, static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
...@@ -318,29 +339,36 @@ static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr, ...@@ -318,29 +339,36 @@ static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
struct dsa_switch *ds = dp->ds; struct dsa_switch *ds = dp->ds;
struct dsa_mac_addr *a; struct dsa_mac_addr *a;
int port = dp->index; int port = dp->index;
int err; int err = 0;
/* No need to bother with refcounting for user ports */ /* No need to bother with refcounting for user ports */
if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
return ds->ops->port_fdb_del(ds, port, addr, vid); return ds->ops->port_fdb_del(ds, port, addr, vid);
mutex_lock(&dp->addr_lists_lock);
a = dsa_mac_addr_find(&dp->fdbs, addr, vid); a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
if (!a) if (!a) {
return -ENOENT; err = -ENOENT;
goto out;
}
if (!refcount_dec_and_test(&a->refcount)) if (!refcount_dec_and_test(&a->refcount))
return 0; goto out;
err = ds->ops->port_fdb_del(ds, port, addr, vid); err = ds->ops->port_fdb_del(ds, port, addr, vid);
if (err) { if (err) {
refcount_inc(&a->refcount); refcount_set(&a->refcount, 1);
return err; goto out;
} }
list_del(&a->list); list_del(&a->list);
kfree(a); kfree(a);
return 0; out:
mutex_unlock(&dp->addr_lists_lock);
return err;
} }
static int dsa_switch_host_fdb_add(struct dsa_switch *ds, static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
......
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
# Bridge FDB entries can be offloaded to DSA switches without holding the
# rtnl_mutex. Traditionally this mutex has conferred drivers implicit
# serialization, which means their code paths are not well tested in the
# presence of concurrency.
# This test creates a background task that stresses the FDB by adding and
# deleting an entry many times in a row without the rtnl_mutex held.
# It then tests the driver resistance to concurrency by calling .ndo_fdb_dump
# (with rtnl_mutex held) from a foreground task.
# Since either the FDB dump or the additions/removals can fail, but the
# additions and removals are performed in deferred as opposed to process
# context, we cannot simply check for user space error codes.
WAIT_TIME=1
NUM_NETIFS=1
REQUIRE_JQ="no"
REQUIRE_MZ="no"
NETIF_CREATE="no"
lib_dir=$(dirname $0)/../../../net/forwarding
source $lib_dir/lib.sh
cleanup() {
echo "Cleaning up"
kill $pid && wait $pid &> /dev/null
ip link del br0
echo "Please check kernel log for errors"
}
trap 'cleanup' EXIT
eth=${NETIFS[p1]}
ip link del br0 2&>1 >/dev/null || :
ip link add br0 type bridge && ip link set $eth master br0
(while :; do
bridge fdb add 00:01:02:03:04:05 dev $eth master static
bridge fdb del 00:01:02:03:04:05 dev $eth master static
done) &
pid=$!
for i in $(seq 1 50); do
bridge fdb show > /dev/null
sleep 3
echo "$((${i} * 2))% complete..."
done
...@@ -23,6 +23,8 @@ MC_CLI=${MC_CLI:=smcroutectl} ...@@ -23,6 +23,8 @@ MC_CLI=${MC_CLI:=smcroutectl}
PING_TIMEOUT=${PING_TIMEOUT:=5} PING_TIMEOUT=${PING_TIMEOUT:=5}
WAIT_TIMEOUT=${WAIT_TIMEOUT:=20} WAIT_TIMEOUT=${WAIT_TIMEOUT:=20}
INTERFACE_TIMEOUT=${INTERFACE_TIMEOUT:=600} INTERFACE_TIMEOUT=${INTERFACE_TIMEOUT:=600}
REQUIRE_JQ=${REQUIRE_JQ:=yes}
REQUIRE_MZ=${REQUIRE_MZ:=yes}
relative_path="${BASH_SOURCE%/*}" relative_path="${BASH_SOURCE%/*}"
if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then
...@@ -141,8 +143,12 @@ require_command() ...@@ -141,8 +143,12 @@ require_command()
fi fi
} }
require_command jq if [[ "$REQUIRE_JQ" = "yes" ]]; then
require_command $MZ require_command jq
fi
if [[ "$REQUIRE_MZ" = "yes" ]]; then
require_command $MZ
fi
if [[ ! -v NUM_NETIFS ]]; then if [[ ! -v NUM_NETIFS ]]; then
echo "SKIP: importer does not define \"NUM_NETIFS\"" echo "SKIP: importer does not define \"NUM_NETIFS\""
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment