Commit 53110c67 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'fdb-entries-on-dsa-lag-interfaces'

Vladimir Oltean says:

====================
FDB entries on DSA LAG interfaces

This work permits having static and local FDB entries on LAG interfaces
that are offloaded by DSA ports. New API needs to be introduced in
drivers. To maintain consistency with the bridging offload code, I've
taken the liberty to reorganize the data structures added by Tobias in
the DSA core a little bit.

Tested on NXP LS1028A (felix switch). Would appreciate feedback/testing
on other platforms too. Testing procedure was the one described here:
https://patchwork.kernel.org/project/netdevbpf/cover/20210205130240.4072854-1-vladimir.oltean@nxp.com/

with this script:

ip link del bond0
ip link add bond0 type bond mode 802.3ad
ip link set swp1 down && ip link set swp1 master bond0 && ip link set swp1 up
ip link set swp2 down && ip link set swp2 master bond0 && ip link set swp2 up
ip link del br0
ip link add br0 type bridge && ip link set br0 up
ip link set br0 arp off
ip link set bond0 master br0 && ip link set bond0 up
ip link set swp0 master br0 && ip link set swp0 up
ip link set dev bond0 type bridge_slave flood off learning off
bridge fdb add dev bond0 <mac address of other eno0> master static

I'm noticing a problem in 'bridge fdb dump' with the 'self' entries, and
I didn't solve this. On Ocelot, an entry learned on a LAG is reported as
being on the first member port of it (so instead of saying 'self bond0',
it says 'self swp1'). This is better than not seeing the entry at all,
but when DSA queries for the FDBs on a port via ds->ops->port_fdb_dump,
it never queries for FDBs on a LAG. Not clear what we should do there,
we aren't in control of the ->ndo_fdb_dump of the bonding/team drivers.
Alternatively, we could just consider the 'self' entries reported via
ndo_fdb_dump as "better than nothing", and concentrate on the 'master'
entries that are in sync with the bridge when packets are flooded to
software.
====================

Link: https://lore.kernel.org/r/20220223140054.3379617-1-vladimir.oltean@nxp.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 89183b6e 961d8b69
......@@ -1625,15 +1625,16 @@ static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port)
ds = dsa_switch_find(dst->index, dev);
dp = ds ? dsa_to_port(ds, port) : NULL;
if (dp && dp->lag_dev) {
if (dp && dp->lag) {
/* As the PVT is used to limit flooding of
* FORWARD frames, which use the LAG ID as the
* source port, we must translate dev/port to
* the special "LAG device" in the PVT, using
* the LAG ID as the port number.
* the LAG ID (one-based) as the port number
* (zero-based).
*/
dev = MV88E6XXX_G2_PVT_ADDR_DEV_TRUNK;
port = dsa_lag_id(dst, dp->lag_dev);
port = dsa_port_lag_id_get(dp) - 1;
}
}
......@@ -1671,7 +1672,7 @@ static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
struct mv88e6xxx_chip *chip = ds->priv;
int err;
if (dsa_to_port(ds, port)->lag_dev)
if (dsa_to_port(ds, port)->lag)
/* Hardware is incapable of fast-aging a LAG through a
* regular ATU move operation. Until we have something
* more fancy in place this is a no-op.
......@@ -6175,21 +6176,20 @@ static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port,
}
static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
struct net_device *lag,
struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct dsa_port *dp;
int id, members = 0;
int members = 0;
if (!mv88e6xxx_has_lag(chip))
return false;
id = dsa_lag_id(ds->dst, lag);
if (id < 0 || id >= ds->num_lag_ids)
if (!lag.id)
return false;
dsa_lag_foreach_port(dp, ds->dst, lag)
dsa_lag_foreach_port(dp, ds->dst, &lag)
/* Includes the port joining the LAG */
members++;
......@@ -6209,20 +6209,21 @@ static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
return true;
}
static int mv88e6xxx_lag_sync_map(struct dsa_switch *ds, struct net_device *lag)
static int mv88e6xxx_lag_sync_map(struct dsa_switch *ds, struct dsa_lag lag)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct dsa_port *dp;
u16 map = 0;
int id;
id = dsa_lag_id(ds->dst, lag);
/* DSA LAG IDs are one-based, hardware is zero-based */
id = lag.id - 1;
/* Build the map of all ports to distribute flows destined for
* this LAG. This can be either a local user port, or a DSA
* port if the LAG port is on a remote chip.
*/
dsa_lag_foreach_port(dp, ds->dst, lag)
dsa_lag_foreach_port(dp, ds->dst, &lag)
map |= BIT(dsa_towards_port(ds, dp->ds->index, dp->index));
return mv88e6xxx_g2_trunk_mapping_write(chip, id, map);
......@@ -6267,8 +6268,8 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
{
struct mv88e6xxx_chip *chip = ds->priv;
unsigned int id, num_tx;
struct net_device *lag;
struct dsa_port *dp;
struct dsa_lag *lag;
int i, err, nth;
u16 mask[8];
u16 ivec;
......@@ -6277,8 +6278,8 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
ivec = BIT(mv88e6xxx_num_ports(chip)) - 1;
/* Disable all masks for ports that _are_ members of a LAG. */
list_for_each_entry(dp, &ds->dst->ports, list) {
if (!dp->lag_dev || dp->ds != ds)
dsa_switch_for_each_port(dp, ds) {
if (!dp->lag)
continue;
ivec &= ~BIT(dp->index);
......@@ -6291,7 +6292,7 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
* are in the Tx set.
*/
dsa_lags_foreach_id(id, ds->dst) {
lag = dsa_lag_dev(ds->dst, id);
lag = dsa_lag_by_id(ds->dst, id);
if (!lag)
continue;
......@@ -6327,7 +6328,7 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
}
static int mv88e6xxx_lag_sync_masks_map(struct dsa_switch *ds,
struct net_device *lag)
struct dsa_lag lag)
{
int err;
......@@ -6351,7 +6352,7 @@ static int mv88e6xxx_port_lag_change(struct dsa_switch *ds, int port)
}
static int mv88e6xxx_port_lag_join(struct dsa_switch *ds, int port,
struct net_device *lag,
struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct mv88e6xxx_chip *chip = ds->priv;
......@@ -6360,7 +6361,8 @@ static int mv88e6xxx_port_lag_join(struct dsa_switch *ds, int port,
if (!mv88e6xxx_lag_can_offload(ds, lag, info))
return -EOPNOTSUPP;
id = dsa_lag_id(ds->dst, lag);
/* DSA LAG IDs are one-based */
id = lag.id - 1;
mv88e6xxx_reg_lock(chip);
......@@ -6383,7 +6385,7 @@ static int mv88e6xxx_port_lag_join(struct dsa_switch *ds, int port,
}
static int mv88e6xxx_port_lag_leave(struct dsa_switch *ds, int port,
struct net_device *lag)
struct dsa_lag lag)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err_sync, err_trunk;
......@@ -6408,7 +6410,7 @@ static int mv88e6xxx_crosschip_lag_change(struct dsa_switch *ds, int sw_index,
}
static int mv88e6xxx_crosschip_lag_join(struct dsa_switch *ds, int sw_index,
int port, struct net_device *lag,
int port, struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct mv88e6xxx_chip *chip = ds->priv;
......@@ -6431,7 +6433,7 @@ static int mv88e6xxx_crosschip_lag_join(struct dsa_switch *ds, int sw_index,
}
static int mv88e6xxx_crosschip_lag_leave(struct dsa_switch *ds, int sw_index,
int port, struct net_device *lag)
int port, struct dsa_lag lag)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err_sync, err_pvt;
......
......@@ -614,6 +614,22 @@ static int felix_fdb_del(struct dsa_switch *ds, int port,
return ocelot_fdb_del(ocelot, port, addr, vid);
}
static int felix_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag lag,
const unsigned char *addr, u16 vid)
{
struct ocelot *ocelot = ds->priv;
return ocelot_lag_fdb_add(ocelot, lag.dev, addr, vid);
}
static int felix_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag lag,
const unsigned char *addr, u16 vid)
{
struct ocelot *ocelot = ds->priv;
return ocelot_lag_fdb_del(ocelot, lag.dev, addr, vid);
}
static int felix_mdb_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb)
{
......@@ -677,20 +693,20 @@ static void felix_bridge_leave(struct dsa_switch *ds, int port,
}
static int felix_lag_join(struct dsa_switch *ds, int port,
struct net_device *bond,
struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct ocelot *ocelot = ds->priv;
return ocelot_port_lag_join(ocelot, port, bond, info);
return ocelot_port_lag_join(ocelot, port, lag.dev, info);
}
static int felix_lag_leave(struct dsa_switch *ds, int port,
struct net_device *bond)
struct dsa_lag lag)
{
struct ocelot *ocelot = ds->priv;
ocelot_port_lag_leave(ocelot, port, bond);
ocelot_port_lag_leave(ocelot, port, lag.dev);
return 0;
}
......@@ -1579,6 +1595,8 @@ const struct dsa_switch_ops felix_switch_ops = {
.port_fdb_dump = felix_fdb_dump,
.port_fdb_add = felix_fdb_add,
.port_fdb_del = felix_fdb_del,
.lag_fdb_add = felix_lag_fdb_add,
.lag_fdb_del = felix_lag_fdb_del,
.port_mdb_add = felix_mdb_add,
.port_mdb_del = felix_mdb_del,
.port_pre_bridge_flags = felix_pre_bridge_flags,
......
......@@ -2646,18 +2646,16 @@ qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
}
static bool
qca8k_lag_can_offload(struct dsa_switch *ds,
struct net_device *lag,
qca8k_lag_can_offload(struct dsa_switch *ds, struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct dsa_port *dp;
int id, members = 0;
int members = 0;
id = dsa_lag_id(ds->dst, lag);
if (id < 0 || id >= ds->num_lag_ids)
if (!lag.id)
return false;
dsa_lag_foreach_port(dp, ds->dst, lag)
dsa_lag_foreach_port(dp, ds->dst, &lag)
/* Includes the port joining the LAG */
members++;
......@@ -2675,16 +2673,14 @@ qca8k_lag_can_offload(struct dsa_switch *ds,
}
static int
qca8k_lag_setup_hash(struct dsa_switch *ds,
struct net_device *lag,
qca8k_lag_setup_hash(struct dsa_switch *ds, struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct net_device *lag_dev = lag.dev;
struct qca8k_priv *priv = ds->priv;
bool unique_lag = true;
unsigned int i;
u32 hash = 0;
int i, id;
id = dsa_lag_id(ds->dst, lag);
switch (info->hash_type) {
case NETDEV_LAG_HASH_L23:
......@@ -2701,7 +2697,7 @@ qca8k_lag_setup_hash(struct dsa_switch *ds,
/* Check if we are the unique configured LAG */
dsa_lags_foreach_id(i, ds->dst)
if (i != id && dsa_lag_dev(ds->dst, i)) {
if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
unique_lag = false;
break;
}
......@@ -2716,7 +2712,7 @@ qca8k_lag_setup_hash(struct dsa_switch *ds,
if (unique_lag) {
priv->lag_hash_mode = hash;
} else if (priv->lag_hash_mode != hash) {
netdev_err(lag, "Error: Mismatched Hash Mode across different lag is not supported\n");
netdev_err(lag_dev, "Error: Mismatched Hash Mode across different lag is not supported\n");
return -EOPNOTSUPP;
}
......@@ -2726,13 +2722,14 @@ qca8k_lag_setup_hash(struct dsa_switch *ds,
static int
qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
struct net_device *lag, bool delete)
struct dsa_lag lag, bool delete)
{
struct qca8k_priv *priv = ds->priv;
int ret, id, i;
u32 val;
id = dsa_lag_id(ds->dst, lag);
/* DSA LAG IDs are one-based, hardware is zero-based */
id = lag.id - 1;
/* Read current port member */
ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
......@@ -2794,8 +2791,7 @@ qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
}
static int
qca8k_port_lag_join(struct dsa_switch *ds, int port,
struct net_device *lag,
qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
int ret;
......@@ -2812,7 +2808,7 @@ qca8k_port_lag_join(struct dsa_switch *ds, int port,
static int
qca8k_port_lag_leave(struct dsa_switch *ds, int port,
struct net_device *lag)
struct dsa_lag lag)
{
return qca8k_lag_refresh_portmap(ds, port, lag, true);
}
......
......@@ -419,6 +419,9 @@ static int lan966x_netdevice_event(struct notifier_block *nb,
return notifier_from_errno(ret);
}
/* We don't offload uppers such as LAG as bridge ports, so every device except
* the bridge itself is foreign.
*/
static bool lan966x_foreign_dev_check(const struct net_device *dev,
const struct net_device *foreign_dev)
{
......@@ -426,10 +429,10 @@ static bool lan966x_foreign_dev_check(const struct net_device *dev,
struct lan966x *lan966x = port->lan966x;
if (netif_is_bridge_master(foreign_dev))
if (lan966x->bridge != foreign_dev)
return true;
if (lan966x->bridge == foreign_dev)
return false;
return false;
return true;
}
static int lan966x_switchdev_event(struct notifier_block *nb,
......@@ -449,8 +452,7 @@ static int lan966x_switchdev_event(struct notifier_block *nb,
err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
lan966x_netdevice_check,
lan966x_foreign_dev_check,
lan966x_handle_fdb,
NULL);
lan966x_handle_fdb);
return notifier_from_errno(err);
}
......
......@@ -1907,6 +1907,8 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond)
u32 mask = 0;
int port;
lockdep_assert_held(&ocelot->fwd_domain_lock);
for (port = 0; port < ocelot->num_phys_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
......@@ -1920,6 +1922,19 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond)
return mask;
}
/* The logical port number of a LAG is equal to the lowest numbered physical
* port ID present in that LAG. It may change if that port ever leaves the LAG.
*/
static int ocelot_bond_get_id(struct ocelot *ocelot, struct net_device *bond)
{
int bond_mask = ocelot_get_bond_mask(ocelot, bond);
if (!bond_mask)
return -ENOENT;
return __ffs(bond_mask);
}
u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port)
{
struct ocelot_port *ocelot_port = ocelot->ports[src_port];
......@@ -2413,7 +2428,7 @@ static void ocelot_setup_logical_port_ids(struct ocelot *ocelot)
bond = ocelot_port->bond;
if (bond) {
int lag = __ffs(ocelot_get_bond_mask(ocelot, bond));
int lag = ocelot_bond_get_id(ocelot, bond);
ocelot_rmw_gix(ocelot,
ANA_PORT_PORT_CFG_PORTID_VAL(lag),
......@@ -2428,6 +2443,46 @@ static void ocelot_setup_logical_port_ids(struct ocelot *ocelot)
}
}
/* Documentation for PORTID_VAL says:
* Logical port number for front port. If port is not a member of a LLAG,
* then PORTID must be set to the physical port number.
* If port is a member of a LLAG, then PORTID must be set to the common
* PORTID_VAL used for all member ports of the LLAG.
* The value must not exceed the number of physical ports on the device.
*
* This means we have little choice but to migrate FDB entries pointing towards
* a logical port when that changes.
*/
static void ocelot_migrate_lag_fdbs(struct ocelot *ocelot,
struct net_device *bond,
int lag)
{
struct ocelot_lag_fdb *fdb;
int err;
lockdep_assert_held(&ocelot->fwd_domain_lock);
list_for_each_entry(fdb, &ocelot->lag_fdbs, list) {
if (fdb->bond != bond)
continue;
err = ocelot_mact_forget(ocelot, fdb->addr, fdb->vid);
if (err) {
dev_err(ocelot->dev,
"failed to delete LAG %s FDB %pM vid %d: %pe\n",
bond->name, fdb->addr, fdb->vid, ERR_PTR(err));
}
err = ocelot_mact_learn(ocelot, lag, fdb->addr, fdb->vid,
ENTRYTYPE_LOCKED);
if (err) {
dev_err(ocelot->dev,
"failed to migrate LAG %s FDB %pM vid %d: %pe\n",
bond->name, fdb->addr, fdb->vid, ERR_PTR(err));
}
}
}
int ocelot_port_lag_join(struct ocelot *ocelot, int port,
struct net_device *bond,
struct netdev_lag_upper_info *info)
......@@ -2452,14 +2507,23 @@ EXPORT_SYMBOL(ocelot_port_lag_join);
void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
struct net_device *bond)
{
int old_lag_id, new_lag_id;
mutex_lock(&ocelot->fwd_domain_lock);
old_lag_id = ocelot_bond_get_id(ocelot, bond);
ocelot->ports[port]->bond = NULL;
ocelot_setup_logical_port_ids(ocelot);
ocelot_apply_bridge_fwd_mask(ocelot, false);
ocelot_set_aggr_pgids(ocelot);
new_lag_id = ocelot_bond_get_id(ocelot, bond);
if (new_lag_id >= 0 && old_lag_id != new_lag_id)
ocelot_migrate_lag_fdbs(ocelot, bond, new_lag_id);
mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL(ocelot_port_lag_leave);
......@@ -2468,13 +2532,74 @@ void ocelot_port_lag_change(struct ocelot *ocelot, int port, bool lag_tx_active)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
mutex_lock(&ocelot->fwd_domain_lock);
ocelot_port->lag_tx_active = lag_tx_active;
/* Rebalance the LAGs */
ocelot_set_aggr_pgids(ocelot);
mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL(ocelot_port_lag_change);
int ocelot_lag_fdb_add(struct ocelot *ocelot, struct net_device *bond,
const unsigned char *addr, u16 vid)
{
struct ocelot_lag_fdb *fdb;
int lag, err;
fdb = kzalloc(sizeof(*fdb), GFP_KERNEL);
if (!fdb)
return -ENOMEM;
ether_addr_copy(fdb->addr, addr);
fdb->vid = vid;
fdb->bond = bond;
mutex_lock(&ocelot->fwd_domain_lock);
lag = ocelot_bond_get_id(ocelot, bond);
err = ocelot_mact_learn(ocelot, lag, addr, vid, ENTRYTYPE_LOCKED);
if (err) {
mutex_unlock(&ocelot->fwd_domain_lock);
kfree(fdb);
return err;
}
list_add_tail(&fdb->list, &ocelot->lag_fdbs);
mutex_unlock(&ocelot->fwd_domain_lock);
return 0;
}
EXPORT_SYMBOL_GPL(ocelot_lag_fdb_add);
int ocelot_lag_fdb_del(struct ocelot *ocelot, struct net_device *bond,
const unsigned char *addr, u16 vid)
{
struct ocelot_lag_fdb *fdb, *tmp;
mutex_lock(&ocelot->fwd_domain_lock);
list_for_each_entry_safe(fdb, tmp, &ocelot->lag_fdbs, list) {
if (!ether_addr_equal(fdb->addr, addr) || fdb->vid != vid ||
fdb->bond != bond)
continue;
ocelot_mact_forget(ocelot, addr, vid);
list_del(&fdb->list);
mutex_unlock(&ocelot->fwd_domain_lock);
kfree(fdb);
return 0;
}
mutex_unlock(&ocelot->fwd_domain_lock);
return -ENOENT;
}
EXPORT_SYMBOL_GPL(ocelot_lag_fdb_del);
/* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu.
* The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG.
* In the special case that it's the NPI port that we're configuring, the
......@@ -2769,6 +2894,7 @@ int ocelot_init(struct ocelot *ocelot)
INIT_LIST_HEAD(&ocelot->multicast);
INIT_LIST_HEAD(&ocelot->pgids);
INIT_LIST_HEAD(&ocelot->vlans);
INIT_LIST_HEAD(&ocelot->lag_fdbs);
ocelot_detect_features(ocelot);
ocelot_mact_init(ocelot);
ocelot_vlan_init(ocelot);
......
......@@ -116,6 +116,14 @@ struct dsa_netdevice_ops {
#define MODULE_ALIAS_DSA_TAG_DRIVER(__proto) \
MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS __stringify(__proto##_VALUE))
struct dsa_lag {
struct net_device *dev;
unsigned int id;
struct mutex fdb_lock;
struct list_head fdbs;
refcount_t refcount;
};
struct dsa_switch_tree {
struct list_head list;
......@@ -134,7 +142,7 @@ struct dsa_switch_tree {
/* Maps offloaded LAG netdevs to a zero-based linear ID for
* drivers that need it.
*/
struct net_device **lags;
struct dsa_lag **lags;
/* Tagging protocol operations */
const struct dsa_device_ops *tag_ops;
......@@ -163,32 +171,36 @@ struct dsa_switch_tree {
unsigned int last_switch;
};
/* LAG IDs are one-based, the dst->lags array is zero-based */
#define dsa_lags_foreach_id(_id, _dst) \
for ((_id) = 0; (_id) < (_dst)->lags_len; (_id)++) \
if ((_dst)->lags[(_id)])
for ((_id) = 1; (_id) <= (_dst)->lags_len; (_id)++) \
if ((_dst)->lags[(_id) - 1])
#define dsa_lag_foreach_port(_dp, _dst, _lag) \
list_for_each_entry((_dp), &(_dst)->ports, list) \
if ((_dp)->lag_dev == (_lag))
if (dsa_port_offloads_lag((_dp), (_lag)))
#define dsa_hsr_foreach_port(_dp, _ds, _hsr) \
list_for_each_entry((_dp), &(_ds)->dst->ports, list) \
if ((_dp)->ds == (_ds) && (_dp)->hsr_dev == (_hsr))
static inline struct net_device *dsa_lag_dev(struct dsa_switch_tree *dst,
unsigned int id)
static inline struct dsa_lag *dsa_lag_by_id(struct dsa_switch_tree *dst,
unsigned int id)
{
return dst->lags[id];
/* DSA LAG IDs are one-based, dst->lags is zero-based */
return dst->lags[id - 1];
}
static inline int dsa_lag_id(struct dsa_switch_tree *dst,
struct net_device *lag)
struct net_device *lag_dev)
{
unsigned int id;
dsa_lags_foreach_id(id, dst) {
if (dsa_lag_dev(dst, id) == lag)
return id;
struct dsa_lag *lag = dsa_lag_by_id(dst, id);
if (lag->dev == lag_dev)
return lag->id;
}
return -ENODEV;
......@@ -291,7 +303,7 @@ struct dsa_port {
struct devlink_port devlink_port;
struct phylink *pl;
struct phylink_config pl_config;
struct net_device *lag_dev;
struct dsa_lag *lag;
struct net_device *hsr_dev;
struct list_head list;
......@@ -641,14 +653,30 @@ static inline bool dsa_port_is_vlan_filtering(const struct dsa_port *dp)
return dp->vlan_filtering;
}
static inline unsigned int dsa_port_lag_id_get(struct dsa_port *dp)
{
return dp->lag ? dp->lag->id : 0;
}
static inline struct net_device *dsa_port_lag_dev_get(struct dsa_port *dp)
{
return dp->lag ? dp->lag->dev : NULL;
}
static inline bool dsa_port_offloads_lag(struct dsa_port *dp,
const struct dsa_lag *lag)
{
return dsa_port_lag_dev_get(dp) == lag->dev;
}
static inline
struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp)
{
if (!dp->bridge)
return NULL;
if (dp->lag_dev)
return dp->lag_dev;
if (dp->lag)
return dp->lag->dev;
else if (dp->hsr_dev)
return dp->hsr_dev;
......@@ -918,6 +946,10 @@ struct dsa_switch_ops {
const unsigned char *addr, u16 vid);
int (*port_fdb_dump)(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data);
int (*lag_fdb_add)(struct dsa_switch *ds, struct dsa_lag lag,
const unsigned char *addr, u16 vid);
int (*lag_fdb_del)(struct dsa_switch *ds, struct dsa_lag lag,
const unsigned char *addr, u16 vid);
/*
* Multicast database
......@@ -966,10 +998,10 @@ struct dsa_switch_ops {
int (*crosschip_lag_change)(struct dsa_switch *ds, int sw_index,
int port);
int (*crosschip_lag_join)(struct dsa_switch *ds, int sw_index,
int port, struct net_device *lag,
int port, struct dsa_lag lag,
struct netdev_lag_upper_info *info);
int (*crosschip_lag_leave)(struct dsa_switch *ds, int sw_index,
int port, struct net_device *lag);
int port, struct dsa_lag lag);
/*
* PTP functionality
......@@ -1041,10 +1073,10 @@ struct dsa_switch_ops {
*/
int (*port_lag_change)(struct dsa_switch *ds, int port);
int (*port_lag_join)(struct dsa_switch *ds, int port,
struct net_device *lag,
struct dsa_lag lag,
struct netdev_lag_upper_info *info);
int (*port_lag_leave)(struct dsa_switch *ds, int port,
struct net_device *lag);
struct dsa_lag lag);
/*
* HSR integration
......
......@@ -313,10 +313,7 @@ int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long e
const struct net_device *foreign_dev),
int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
unsigned long event, const void *ctx,
const struct switchdev_notifier_fdb_info *fdb_info),
int (*lag_mod_cb)(struct net_device *dev, struct net_device *orig_dev,
unsigned long event, const void *ctx,
const struct switchdev_notifier_fdb_info *fdb_info));
const struct switchdev_notifier_fdb_info *fdb_info));
int switchdev_handle_port_obj_add(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
......@@ -443,10 +440,7 @@ switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event
const struct net_device *foreign_dev),
int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
unsigned long event, const void *ctx,
const struct switchdev_notifier_fdb_info *fdb_info),
int (*lag_mod_cb)(struct net_device *dev, struct net_device *orig_dev,
unsigned long event, const void *ctx,
const struct switchdev_notifier_fdb_info *fdb_info))
const struct switchdev_notifier_fdb_info *fdb_info))
{
return 0;
}
......
......@@ -635,6 +635,13 @@ enum macaccess_entry_type {
#define OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION BIT(0)
#define OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP BIT(1)
struct ocelot_lag_fdb {
unsigned char addr[ETH_ALEN];
u16 vid;
struct net_device *bond;
struct list_head list;
};
struct ocelot_port {
struct ocelot *ocelot;
......@@ -690,6 +697,7 @@ struct ocelot {
struct list_head vlans;
struct list_head traps;
struct list_head lag_fdbs;
/* Switches like VSC9959 have flooding per traffic class */
int num_flooding_pgids;
......@@ -866,6 +874,10 @@ int ocelot_fdb_add(struct ocelot *ocelot, int port,
const unsigned char *addr, u16 vid);
int ocelot_fdb_del(struct ocelot *ocelot, int port,
const unsigned char *addr, u16 vid);
int ocelot_lag_fdb_add(struct ocelot *ocelot, struct net_device *bond,
const unsigned char *addr, u16 vid);
int ocelot_lag_fdb_del(struct ocelot *ocelot, struct net_device *bond,
const unsigned char *addr, u16 vid);
int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid,
bool untagged, struct netlink_ext_ack *extack);
int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
......
......@@ -72,27 +72,24 @@ int dsa_broadcast(unsigned long e, void *v)
}
/**
* dsa_lag_map() - Map LAG netdev to a linear LAG ID
* dsa_lag_map() - Map LAG structure to a linear LAG array
* @dst: Tree in which to record the mapping.
* @lag: Netdev that is to be mapped to an ID.
* @lag: LAG structure that is to be mapped to the tree's array.
*
* dsa_lag_id/dsa_lag_dev can then be used to translate between the
* dsa_lag_id/dsa_lag_by_id can then be used to translate between the
* two spaces. The size of the mapping space is determined by the
* driver by setting ds->num_lag_ids. It is perfectly legal to leave
* it unset if it is not needed, in which case these functions become
* no-ops.
*/
void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag)
void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag)
{
unsigned int id;
if (dsa_lag_id(dst, lag) >= 0)
/* Already mapped */
return;
for (id = 0; id < dst->lags_len; id++) {
if (!dsa_lag_dev(dst, id)) {
dst->lags[id] = lag;
for (id = 1; id <= dst->lags_len; id++) {
if (!dsa_lag_by_id(dst, id)) {
dst->lags[id - 1] = lag;
lag->id = id;
return;
}
}
......@@ -108,28 +105,36 @@ void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag)
/**
* dsa_lag_unmap() - Remove a LAG ID mapping
* @dst: Tree in which the mapping is recorded.
* @lag: Netdev that was mapped.
* @lag: LAG structure that was mapped.
*
* As there may be multiple users of the mapping, it is only removed
* if there are no other references to it.
*/
void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag)
void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag)
{
struct dsa_port *dp;
unsigned int id;
dsa_lag_foreach_port(dp, dst, lag)
/* There are remaining users of this mapping */
return;
dsa_lags_foreach_id(id, dst) {
if (dsa_lag_dev(dst, id) == lag) {
dst->lags[id] = NULL;
if (dsa_lag_by_id(dst, id) == lag) {
dst->lags[id - 1] = NULL;
lag->id = 0;
break;
}
}
}
struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
const struct net_device *lag_dev)
{
struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list)
if (dsa_port_lag_dev_get(dp) == lag_dev)
return dp->lag;
return NULL;
}
struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
const struct net_device *br)
{
......
......@@ -25,6 +25,8 @@ enum {
DSA_NOTIFIER_FDB_DEL,
DSA_NOTIFIER_HOST_FDB_ADD,
DSA_NOTIFIER_HOST_FDB_DEL,
DSA_NOTIFIER_LAG_FDB_ADD,
DSA_NOTIFIER_LAG_FDB_DEL,
DSA_NOTIFIER_LAG_CHANGE,
DSA_NOTIFIER_LAG_JOIN,
DSA_NOTIFIER_LAG_LEAVE,
......@@ -67,6 +69,13 @@ struct dsa_notifier_fdb_info {
u16 vid;
};
/* DSA_NOTIFIER_LAG_FDB_* */
struct dsa_notifier_lag_fdb_info {
struct dsa_lag *lag;
const unsigned char *addr;
u16 vid;
};
/* DSA_NOTIFIER_MDB_* */
struct dsa_notifier_mdb_info {
const struct switchdev_obj_port_mdb *mdb;
......@@ -76,7 +85,7 @@ struct dsa_notifier_mdb_info {
/* DSA_NOTIFIER_LAG_* */
struct dsa_notifier_lag_info {
struct net_device *lag;
struct dsa_lag lag;
int sw_index;
int port;
......@@ -119,9 +128,8 @@ struct dsa_notifier_master_state_info {
};
struct dsa_switchdev_event_work {
struct dsa_switch *ds;
int port;
struct net_device *dev;
struct net_device *orig_dev;
struct work_struct work;
unsigned long event;
/* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and
......@@ -215,6 +223,10 @@ int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
u16 vid);
int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
u16 vid);
int dsa_port_lag_fdb_add(struct dsa_port *dp, const unsigned char *addr,
u16 vid);
int dsa_port_lag_fdb_del(struct dsa_port *dp, const unsigned char *addr,
u16 vid);
int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data);
int dsa_port_mdb_add(const struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb);
......@@ -487,8 +499,10 @@ int dsa_switch_register_notifier(struct dsa_switch *ds);
void dsa_switch_unregister_notifier(struct dsa_switch *ds);
/* dsa2.c */
void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag);
void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag);
void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag);
void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag);
struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
const struct net_device *lag_dev);
int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v);
int dsa_broadcast(unsigned long e, void *v);
int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
......
......@@ -429,7 +429,7 @@ int dsa_port_lag_change(struct dsa_port *dp,
};
bool tx_enabled;
if (!dp->lag_dev)
if (!dp->lag)
return 0;
/* On statically configured aggregates (e.g. loadbalance
......@@ -447,27 +447,70 @@ int dsa_port_lag_change(struct dsa_port *dp,
return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info);
}
int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag,
static int dsa_port_lag_create(struct dsa_port *dp,
struct net_device *lag_dev)
{
struct dsa_switch *ds = dp->ds;
struct dsa_lag *lag;
lag = dsa_tree_lag_find(ds->dst, lag_dev);
if (lag) {
refcount_inc(&lag->refcount);
dp->lag = lag;
return 0;
}
lag = kzalloc(sizeof(*lag), GFP_KERNEL);
if (!lag)
return -ENOMEM;
refcount_set(&lag->refcount, 1);
mutex_init(&lag->fdb_lock);
INIT_LIST_HEAD(&lag->fdbs);
lag->dev = lag_dev;
dsa_lag_map(ds->dst, lag);
dp->lag = lag;
return 0;
}
static void dsa_port_lag_destroy(struct dsa_port *dp)
{
struct dsa_lag *lag = dp->lag;
dp->lag = NULL;
dp->lag_tx_enabled = false;
if (!refcount_dec_and_test(&lag->refcount))
return;
WARN_ON(!list_empty(&lag->fdbs));
dsa_lag_unmap(dp->ds->dst, lag);
kfree(lag);
}
int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
struct netdev_lag_upper_info *uinfo,
struct netlink_ext_ack *extack)
{
struct dsa_notifier_lag_info info = {
.sw_index = dp->ds->index,
.port = dp->index,
.lag = lag,
.info = uinfo,
};
struct net_device *bridge_dev;
int err;
dsa_lag_map(dp->ds->dst, lag);
dp->lag_dev = lag;
err = dsa_port_lag_create(dp, lag_dev);
if (err)
goto err_lag_create;
info.lag = *dp->lag;
err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
if (err)
goto err_lag_join;
bridge_dev = netdev_master_upper_dev_get(lag);
bridge_dev = netdev_master_upper_dev_get(lag_dev);
if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
return 0;
......@@ -480,12 +523,12 @@ int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag,
err_bridge_join:
dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
err_lag_join:
dp->lag_dev = NULL;
dsa_lag_unmap(dp->ds->dst, lag);
dsa_port_lag_destroy(dp);
err_lag_create:
return err;
}
void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag)
void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev)
{
struct net_device *br = dsa_port_bridge_dev_get(dp);
......@@ -493,17 +536,16 @@ void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag)
dsa_port_pre_bridge_leave(dp, br);
}
void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag)
void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev)
{
struct net_device *br = dsa_port_bridge_dev_get(dp);
struct dsa_notifier_lag_info info = {
.sw_index = dp->ds->index,
.port = dp->index,
.lag = lag,
};
int err;
if (!dp->lag_dev)
if (!dp->lag)
return;
/* Port might have been part of a LAG that in turn was
......@@ -512,16 +554,15 @@ void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag)
if (br)
dsa_port_bridge_leave(dp, br);
dp->lag_tx_enabled = false;
dp->lag_dev = NULL;
info.lag = *dp->lag;
dsa_port_lag_destroy(dp);
err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
if (err)
dev_err(dp->ds->dev,
"port %d failed to notify DSA_NOTIFIER_LAG_LEAVE: %pe\n",
dp->index, ERR_PTR(err));
dsa_lag_unmap(dp->ds->dst, lag);
}
/* Must be called under rcu_read_lock() */
......@@ -822,6 +863,30 @@ int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info);
}
int dsa_port_lag_fdb_add(struct dsa_port *dp, const unsigned char *addr,
u16 vid)
{
struct dsa_notifier_lag_fdb_info info = {
.lag = dp->lag,
.addr = addr,
.vid = vid,
};
return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_ADD, &info);
}
int dsa_port_lag_fdb_del(struct dsa_port *dp, const unsigned char *addr,
u16 vid)
{
struct dsa_notifier_lag_fdb_info info = {
.lag = dp->lag,
.addr = addr,
.vid = vid,
};
return dsa_port_notify(dp, DSA_NOTIFIER_LAG_FDB_DEL, &info);
}
int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data)
{
struct dsa_switch *ds = dp->ds;
......
......@@ -2134,7 +2134,7 @@ dsa_slave_lag_changeupper(struct net_device *dev,
continue;
dp = dsa_slave_to_port(lower);
if (!dp->lag_dev)
if (!dp->lag)
/* Software LAG */
continue;
......@@ -2163,7 +2163,7 @@ dsa_slave_lag_prechangeupper(struct net_device *dev,
continue;
dp = dsa_slave_to_port(lower);
if (!dp->lag_dev)
if (!dp->lag)
/* Software LAG */
continue;
......@@ -2373,35 +2373,34 @@ static void
dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
{
struct switchdev_notifier_fdb_info info = {};
struct dsa_switch *ds = switchdev_work->ds;
struct dsa_port *dp;
if (!dsa_is_user_port(ds, switchdev_work->port))
return;
info.addr = switchdev_work->addr;
info.vid = switchdev_work->vid;
info.offloaded = true;
dp = dsa_to_port(ds, switchdev_work->port);
call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
dp->slave, &info.info, NULL);
switchdev_work->orig_dev, &info.info, NULL);
}
static void dsa_slave_switchdev_event_work(struct work_struct *work)
{
struct dsa_switchdev_event_work *switchdev_work =
container_of(work, struct dsa_switchdev_event_work, work);
struct dsa_switch *ds = switchdev_work->ds;
struct net_device *dev = switchdev_work->dev;
struct dsa_switch *ds;
struct dsa_port *dp;
int err;
dp = dsa_to_port(ds, switchdev_work->port);
dp = dsa_slave_to_port(dev);
ds = dp->ds;
switch (switchdev_work->event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
if (switchdev_work->host_addr)
err = dsa_port_host_fdb_add(dp, switchdev_work->addr,
switchdev_work->vid);
else if (dp->lag)
err = dsa_port_lag_fdb_add(dp, switchdev_work->addr,
switchdev_work->vid);
else
err = dsa_port_fdb_add(dp, switchdev_work->addr,
switchdev_work->vid);
......@@ -2419,6 +2418,9 @@ static void dsa_slave_switchdev_event_work(struct work_struct *work)
if (switchdev_work->host_addr)
err = dsa_port_host_fdb_del(dp, switchdev_work->addr,
switchdev_work->vid);
else if (dp->lag)
err = dsa_port_lag_fdb_del(dp, switchdev_work->addr,
switchdev_work->vid);
else
err = dsa_port_fdb_del(dp, switchdev_work->addr,
switchdev_work->vid);
......@@ -2464,19 +2466,17 @@ static int dsa_slave_fdb_event(struct net_device *dev,
if (ctx && ctx != dp)
return 0;
if (!ds->ops->port_fdb_add || !ds->ops->port_fdb_del)
return -EOPNOTSUPP;
if (switchdev_fdb_is_dynamically_learned(fdb_info)) {
if (dsa_port_offloads_bridge_port(dp, orig_dev))
return 0;
if (dsa_slave_dev_check(orig_dev) &&
switchdev_fdb_is_dynamically_learned(fdb_info))
return 0;
/* FDB entries learned by the software bridge should be installed as
* host addresses only if the driver requests assisted learning.
*/
if (switchdev_fdb_is_dynamically_learned(fdb_info) &&
!ds->assisted_learning_on_cpu_port)
return 0;
/* FDB entries learned by the software bridge or by foreign
* bridge ports should be installed as host addresses only if
* the driver requests assisted learning.
*/
if (!ds->assisted_learning_on_cpu_port)
return 0;
}
/* Also treat FDB entries on foreign interfaces bridged with us as host
* addresses.
......@@ -2484,6 +2484,18 @@ static int dsa_slave_fdb_event(struct net_device *dev,
if (dsa_foreign_dev_check(dev, orig_dev))
host_addr = true;
/* Check early that we're not doing work in vain.
* Host addresses on LAG ports still require regular FDB ops,
* since the CPU port isn't in a LAG.
*/
if (dp->lag && !host_addr) {
if (!ds->ops->lag_fdb_add || !ds->ops->lag_fdb_del)
return -EOPNOTSUPP;
} else {
if (!ds->ops->port_fdb_add || !ds->ops->port_fdb_del)
return -EOPNOTSUPP;
}
switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
if (!switchdev_work)
return -ENOMEM;
......@@ -2494,10 +2506,9 @@ static int dsa_slave_fdb_event(struct net_device *dev,
host_addr ? " as host address" : "");
INIT_WORK(&switchdev_work->work, dsa_slave_switchdev_event_work);
switchdev_work->ds = ds;
switchdev_work->port = dp->index;
switchdev_work->event = event;
switchdev_work->dev = dev;
switchdev_work->orig_dev = orig_dev;
ether_addr_copy(switchdev_work->addr, fdb_info->addr);
switchdev_work->vid = fdb_info->vid;
......@@ -2526,8 +2537,7 @@ static int dsa_slave_switchdev_event(struct notifier_block *unused,
err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
dsa_slave_dev_check,
dsa_foreign_dev_check,
dsa_slave_fdb_event,
NULL);
dsa_slave_fdb_event);
return notifier_from_errno(err);
default:
return NOTIFY_DONE;
......
......@@ -385,6 +385,75 @@ static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
return err;
}
static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag,
const unsigned char *addr, u16 vid)
{
struct dsa_mac_addr *a;
int err = 0;
mutex_lock(&lag->fdb_lock);
a = dsa_mac_addr_find(&lag->fdbs, addr, vid);
if (a) {
refcount_inc(&a->refcount);
goto out;
}
a = kzalloc(sizeof(*a), GFP_KERNEL);
if (!a) {
err = -ENOMEM;
goto out;
}
err = ds->ops->lag_fdb_add(ds, *lag, addr, vid);
if (err) {
kfree(a);
goto out;
}
ether_addr_copy(a->addr, addr);
a->vid = vid;
refcount_set(&a->refcount, 1);
list_add_tail(&a->list, &lag->fdbs);
out:
mutex_unlock(&lag->fdb_lock);
return err;
}
static int dsa_switch_do_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag *lag,
const unsigned char *addr, u16 vid)
{
struct dsa_mac_addr *a;
int err = 0;
mutex_lock(&lag->fdb_lock);
a = dsa_mac_addr_find(&lag->fdbs, addr, vid);
if (!a) {
err = -ENOENT;
goto out;
}
if (!refcount_dec_and_test(&a->refcount))
goto out;
err = ds->ops->lag_fdb_del(ds, *lag, addr, vid);
if (err) {
refcount_set(&a->refcount, 1);
goto out;
}
list_del(&a->list);
kfree(a);
out:
mutex_unlock(&lag->fdb_lock);
return err;
}
static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
struct dsa_notifier_fdb_info *info)
{
......@@ -451,6 +520,40 @@ static int dsa_switch_fdb_del(struct dsa_switch *ds,
return dsa_port_do_fdb_del(dp, info->addr, info->vid);
}
static int dsa_switch_lag_fdb_add(struct dsa_switch *ds,
struct dsa_notifier_lag_fdb_info *info)
{
struct dsa_port *dp;
if (!ds->ops->lag_fdb_add)
return -EOPNOTSUPP;
/* Notify switch only if it has a port in this LAG */
dsa_switch_for_each_port(dp, ds)
if (dsa_port_offloads_lag(dp, info->lag))
return dsa_switch_do_lag_fdb_add(ds, info->lag,
info->addr, info->vid);
return 0;
}
static int dsa_switch_lag_fdb_del(struct dsa_switch *ds,
struct dsa_notifier_lag_fdb_info *info)
{
struct dsa_port *dp;
if (!ds->ops->lag_fdb_del)
return -EOPNOTSUPP;
/* Notify switch only if it has a port in this LAG */
dsa_switch_for_each_port(dp, ds)
if (dsa_port_offloads_lag(dp, info->lag))
return dsa_switch_do_lag_fdb_del(ds, info->lag,
info->addr, info->vid);
return 0;
}
static int dsa_switch_lag_change(struct dsa_switch *ds,
struct dsa_notifier_lag_info *info)
{
......@@ -904,6 +1007,12 @@ static int dsa_switch_event(struct notifier_block *nb,
case DSA_NOTIFIER_HOST_FDB_DEL:
err = dsa_switch_host_fdb_del(ds, info);
break;
case DSA_NOTIFIER_LAG_FDB_ADD:
err = dsa_switch_lag_fdb_add(ds, info);
break;
case DSA_NOTIFIER_LAG_FDB_DEL:
err = dsa_switch_lag_fdb_del(ds, info);
break;
case DSA_NOTIFIER_LAG_CHANGE:
err = dsa_switch_lag_change(ds, info);
break;
......
......@@ -246,12 +246,14 @@ static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev,
if (trunk) {
struct dsa_port *cpu_dp = dev->dsa_ptr;
struct dsa_lag *lag;
/* The exact source port is not available in the tag,
* so we inject the frame directly on the upper
* team/bond.
*/
skb->dev = dsa_lag_dev(cpu_dp->dst, source_port);
lag = dsa_lag_by_id(cpu_dp->dst, source_port + 1);
skb->dev = lag ? lag->dev : NULL;
} else {
skb->dev = dsa_master_find_slave(dev, source_device,
source_port);
......
......@@ -458,63 +458,40 @@ static int __switchdev_handle_fdb_event_to_device(struct net_device *dev,
const struct net_device *foreign_dev),
int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
unsigned long event, const void *ctx,
const struct switchdev_notifier_fdb_info *fdb_info),
int (*lag_mod_cb)(struct net_device *dev, struct net_device *orig_dev,
unsigned long event, const void *ctx,
const struct switchdev_notifier_fdb_info *fdb_info))
const struct switchdev_notifier_fdb_info *fdb_info))
{
const struct switchdev_notifier_info *info = &fdb_info->info;
struct net_device *br, *lower_dev;
struct net_device *br, *lower_dev, *switchdev;
struct list_head *iter;
int err = -EOPNOTSUPP;
if (check_cb(dev))
return mod_cb(dev, orig_dev, event, info->ctx, fdb_info);
if (netif_is_lag_master(dev)) {
if (!switchdev_lower_dev_find_rcu(dev, check_cb, foreign_dev_check_cb))
goto maybe_bridged_with_us;
/* This is a LAG interface that we offload */
if (!lag_mod_cb)
return -EOPNOTSUPP;
return lag_mod_cb(dev, orig_dev, event, info->ctx, fdb_info);
}
/* Recurse through lower interfaces in case the FDB entry is pointing
* towards a bridge device.
* towards a bridge or a LAG device.
*/
if (netif_is_bridge_master(dev)) {
if (!switchdev_lower_dev_find_rcu(dev, check_cb, foreign_dev_check_cb))
return 0;
/* This is a bridge interface that we offload */
netdev_for_each_lower_dev(dev, lower_dev, iter) {
/* Do not propagate FDB entries across bridges */
if (netif_is_bridge_master(lower_dev))
continue;
/* Bridge ports might be either us, or LAG interfaces
* that we offload.
*/
if (!check_cb(lower_dev) &&
!switchdev_lower_dev_find_rcu(lower_dev, check_cb,
foreign_dev_check_cb))
continue;
err = __switchdev_handle_fdb_event_to_device(lower_dev, orig_dev,
event, fdb_info, check_cb,
foreign_dev_check_cb,
mod_cb, lag_mod_cb);
if (err && err != -EOPNOTSUPP)
return err;
}
netdev_for_each_lower_dev(dev, lower_dev, iter) {
/* Do not propagate FDB entries across bridges */
if (netif_is_bridge_master(lower_dev))
continue;
return 0;
/* Bridge ports might be either us, or LAG interfaces
* that we offload.
*/
if (!check_cb(lower_dev) &&
!switchdev_lower_dev_find_rcu(lower_dev, check_cb,
foreign_dev_check_cb))
continue;
err = __switchdev_handle_fdb_event_to_device(lower_dev, orig_dev,
event, fdb_info, check_cb,
foreign_dev_check_cb,
mod_cb);
if (err && err != -EOPNOTSUPP)
return err;
}
maybe_bridged_with_us:
/* Event is neither on a bridge nor a LAG. Check whether it is on an
* interface that is in a bridge with us.
*/
......@@ -522,12 +499,16 @@ static int __switchdev_handle_fdb_event_to_device(struct net_device *dev,
if (!br || !netif_is_bridge_master(br))
return 0;
if (!switchdev_lower_dev_find_rcu(br, check_cb, foreign_dev_check_cb))
switchdev = switchdev_lower_dev_find_rcu(br, check_cb, foreign_dev_check_cb);
if (!switchdev)
return 0;
if (!foreign_dev_check_cb(switchdev, dev))
return err;
return __switchdev_handle_fdb_event_to_device(br, orig_dev, event, fdb_info,
check_cb, foreign_dev_check_cb,
mod_cb, lag_mod_cb);
mod_cb);
}
int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event,
......@@ -537,16 +518,13 @@ int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long e
const struct net_device *foreign_dev),
int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
unsigned long event, const void *ctx,
const struct switchdev_notifier_fdb_info *fdb_info),
int (*lag_mod_cb)(struct net_device *dev, struct net_device *orig_dev,
unsigned long event, const void *ctx,
const struct switchdev_notifier_fdb_info *fdb_info))
const struct switchdev_notifier_fdb_info *fdb_info))
{
int err;
err = __switchdev_handle_fdb_event_to_device(dev, dev, event, fdb_info,
check_cb, foreign_dev_check_cb,
mod_cb, lag_mod_cb);
mod_cb);
if (err == -EOPNOTSUPP)
err = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment