Commit 4098ced4 authored by David S. Miller's avatar David S. Miller

Merge branch 'brport-flags'

Vladimir Oltean says:

====================
Cleanup in brport flags switchdev offload for DSA

The initial goal of this series was to have better support for
standalone ports mode on the DSA drivers like ocelot/felix and sja1105.
This turned out to require some API adjustments in both directions:
to the information presented to and by the switchdev notifier, and to
the API presented to the switch drivers by the DSA layer.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b0aae0bd 4d942354
......@@ -510,6 +510,39 @@ void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
}
EXPORT_SYMBOL(b53_imp_vlan_setup);
static void b53_port_set_ucast_flood(struct b53_device *dev, int port,
bool unicast)
{
u16 uc;
b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
if (unicast)
uc |= BIT(port);
else
uc &= ~BIT(port);
b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
}
static void b53_port_set_mcast_flood(struct b53_device *dev, int port,
bool multicast)
{
u16 mc;
b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
if (multicast)
mc |= BIT(port);
else
mc &= ~BIT(port);
b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
if (multicast)
mc |= BIT(port);
else
mc &= ~BIT(port);
b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
}
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
{
struct b53_device *dev = ds->priv;
......@@ -522,7 +555,8 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
b53_br_egress_floods(ds, port, true, true);
b53_port_set_ucast_flood(dev, port, true);
b53_port_set_mcast_flood(dev, port, true);
if (dev->ops->irq_enable)
ret = dev->ops->irq_enable(dev, port);
......@@ -655,7 +689,8 @@ static void b53_enable_cpu_port(struct b53_device *dev, int port)
b53_brcm_hdr_setup(dev->ds, port);
b53_br_egress_floods(dev->ds, port, true, true);
b53_port_set_ucast_flood(dev, port, true);
b53_port_set_mcast_flood(dev, port, true);
}
static void b53_enable_mib(struct b53_device *dev)
......@@ -1916,37 +1951,37 @@ void b53_br_fast_age(struct dsa_switch *ds, int port)
}
EXPORT_SYMBOL(b53_br_fast_age);
int b53_br_egress_floods(struct dsa_switch *ds, int port,
bool unicast, bool multicast)
static int b53_br_flags_pre(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
struct b53_device *dev = ds->priv;
u16 uc, mc;
b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
if (unicast)
uc |= BIT(port);
else
uc &= ~BIT(port);
b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD))
return -EINVAL;
b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
if (multicast)
mc |= BIT(port);
else
mc &= ~BIT(port);
b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
return 0;
}
b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
if (multicast)
mc |= BIT(port);
else
mc &= ~BIT(port);
b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
static int b53_br_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
if (flags.mask & BR_FLOOD)
b53_port_set_ucast_flood(ds->priv, port,
!!(flags.val & BR_FLOOD));
if (flags.mask & BR_MCAST_FLOOD)
b53_port_set_mcast_flood(ds->priv, port,
!!(flags.val & BR_MCAST_FLOOD));
return 0;
}
static int b53_set_mrouter(struct dsa_switch *ds, int port, bool mrouter,
struct netlink_ext_ack *extack)
{
b53_port_set_mcast_flood(ds->priv, port, mrouter);
return 0;
}
EXPORT_SYMBOL(b53_br_egress_floods);
static bool b53_possible_cpu_port(struct dsa_switch *ds, int port)
{
......@@ -2187,9 +2222,11 @@ static const struct dsa_switch_ops b53_switch_ops = {
.set_mac_eee = b53_set_mac_eee,
.port_bridge_join = b53_br_join,
.port_bridge_leave = b53_br_leave,
.port_pre_bridge_flags = b53_br_flags_pre,
.port_bridge_flags = b53_br_flags,
.port_set_mrouter = b53_set_mrouter,
.port_stp_state_set = b53_br_set_stp_state,
.port_fast_age = b53_br_fast_age,
.port_egress_floods = b53_br_egress_floods,
.port_vlan_filtering = b53_vlan_filtering,
.port_vlan_add = b53_vlan_add,
.port_vlan_del = b53_vlan_del,
......
......@@ -326,8 +326,6 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge);
void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge);
void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state);
void b53_br_fast_age(struct dsa_switch *ds, int port);
int b53_br_egress_floods(struct dsa_switch *ds, int port,
bool unicast, bool multicast);
int b53_setup_devlink_resources(struct dsa_switch *ds);
void b53_port_event(struct dsa_switch *ds, int port);
void b53_phylink_validate(struct dsa_switch *ds, int port,
......
This diff is collapsed.
......@@ -454,8 +454,10 @@ struct mv88e6xxx_ops {
int (*port_set_frame_mode)(struct mv88e6xxx_chip *chip, int port,
enum mv88e6xxx_frame_mode mode);
int (*port_set_egress_floods)(struct mv88e6xxx_chip *chip, int port,
bool unicast, bool multicast);
int (*port_set_ucast_flood)(struct mv88e6xxx_chip *chip, int port,
bool unicast);
int (*port_set_mcast_flood)(struct mv88e6xxx_chip *chip, int port,
bool multicast);
int (*port_set_ether_type)(struct mv88e6xxx_chip *chip, int port,
u16 etype);
int (*port_set_jumbo_size)(struct mv88e6xxx_chip *chip, int port,
......
......@@ -789,8 +789,8 @@ int mv88e6351_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
}
static int mv88e6185_port_set_forward_unknown(struct mv88e6xxx_chip *chip,
int port, bool unicast)
int mv88e6185_port_set_forward_unknown(struct mv88e6xxx_chip *chip,
int port, bool unicast)
{
int err;
u16 reg;
......@@ -807,8 +807,8 @@ static int mv88e6185_port_set_forward_unknown(struct mv88e6xxx_chip *chip,
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
}
int mv88e6352_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
bool unicast, bool multicast)
int mv88e6352_port_set_ucast_flood(struct mv88e6xxx_chip *chip, int port,
bool unicast)
{
int err;
u16 reg;
......@@ -817,16 +817,28 @@ int mv88e6352_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
reg &= ~MV88E6352_PORT_CTL0_EGRESS_FLOODS_MASK;
if (unicast)
reg |= MV88E6352_PORT_CTL0_EGRESS_FLOODS_UC;
else
reg &= ~MV88E6352_PORT_CTL0_EGRESS_FLOODS_UC;
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
}
int mv88e6352_port_set_mcast_flood(struct mv88e6xxx_chip *chip, int port,
bool multicast)
{
int err;
u16 reg;
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, &reg);
if (err)
return err;
if (unicast && multicast)
reg |= MV88E6352_PORT_CTL0_EGRESS_FLOODS_ALL_UNKNOWN_DA;
else if (unicast)
reg |= MV88E6352_PORT_CTL0_EGRESS_FLOODS_NO_UNKNOWN_MC_DA;
else if (multicast)
reg |= MV88E6352_PORT_CTL0_EGRESS_FLOODS_NO_UNKNOWN_UC_DA;
if (multicast)
reg |= MV88E6352_PORT_CTL0_EGRESS_FLOODS_MC;
else
reg |= MV88E6352_PORT_CTL0_EGRESS_FLOODS_NO_UNKNOWN_DA;
reg &= ~MV88E6352_PORT_CTL0_EGRESS_FLOODS_MC;
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
}
......@@ -1013,8 +1025,8 @@ static const char * const mv88e6xxx_port_8021q_mode_names[] = {
[MV88E6XXX_PORT_CTL2_8021Q_MODE_SECURE] = "Secure",
};
static int mv88e6185_port_set_default_forward(struct mv88e6xxx_chip *chip,
int port, bool multicast)
int mv88e6185_port_set_default_forward(struct mv88e6xxx_chip *chip,
int port, bool multicast)
{
int err;
u16 reg;
......@@ -1031,18 +1043,6 @@ static int mv88e6185_port_set_default_forward(struct mv88e6xxx_chip *chip,
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
}
int mv88e6185_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
bool unicast, bool multicast)
{
int err;
err = mv88e6185_port_set_forward_unknown(chip, port, unicast);
if (err)
return err;
return mv88e6185_port_set_default_forward(chip, port, multicast);
}
int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
int upstream_port)
{
......
......@@ -154,11 +154,8 @@
#define MV88E6185_PORT_CTL0_USE_IP 0x0020
#define MV88E6185_PORT_CTL0_USE_TAG 0x0010
#define MV88E6185_PORT_CTL0_FORWARD_UNKNOWN 0x0004
#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_MASK 0x000c
#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_NO_UNKNOWN_DA 0x0000
#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_NO_UNKNOWN_MC_DA 0x0004
#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_NO_UNKNOWN_UC_DA 0x0008
#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_ALL_UNKNOWN_DA 0x000c
#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_UC 0x0004
#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_MC 0x0008
#define MV88E6XXX_PORT_CTL0_STATE_MASK 0x0003
#define MV88E6XXX_PORT_CTL0_STATE_DISABLED 0x0000
#define MV88E6XXX_PORT_CTL0_STATE_BLOCKING 0x0001
......@@ -343,10 +340,14 @@ int mv88e6085_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
enum mv88e6xxx_frame_mode mode);
int mv88e6351_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
enum mv88e6xxx_frame_mode mode);
int mv88e6185_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
bool unicast, bool multicast);
int mv88e6352_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
bool unicast, bool multicast);
int mv88e6185_port_set_forward_unknown(struct mv88e6xxx_chip *chip,
int port, bool unicast);
int mv88e6185_port_set_default_forward(struct mv88e6xxx_chip *chip,
int port, bool multicast);
int mv88e6352_port_set_ucast_flood(struct mv88e6xxx_chip *chip, int port,
bool unicast);
int mv88e6352_port_set_mcast_flood(struct mv88e6xxx_chip *chip, int port,
bool multicast);
int mv88e6352_port_set_policy(struct mv88e6xxx_chip *chip, int port,
enum mv88e6xxx_policy_mapping mapping,
enum mv88e6xxx_policy_action action);
......
......@@ -299,6 +299,7 @@ static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu)
cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports));
ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC);
ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC);
ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_BC);
felix->dsa_8021q_ctx = kzalloc(sizeof(*felix->dsa_8021q_ctx),
GFP_KERNEL);
......@@ -411,6 +412,8 @@ static int felix_setup_tag_npi(struct dsa_switch *ds, int cpu)
*/
cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports));
ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_UC);
ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_MC);
ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_BC);
return 0;
}
......@@ -553,6 +556,26 @@ static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port,
return ocelot_bridge_stp_state_set(ocelot, port, state);
}
static int felix_pre_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags val,
struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
return ocelot_port_pre_bridge_flags(ocelot, port, val);
}
static int felix_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags val,
struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
ocelot_port_bridge_flags(ocelot, port, val);
return 0;
}
static int felix_bridge_join(struct dsa_switch *ds, int port,
struct net_device *br)
{
......@@ -1373,6 +1396,8 @@ const struct dsa_switch_ops felix_switch_ops = {
.port_fdb_del = felix_fdb_del,
.port_mdb_add = felix_mdb_add,
.port_mdb_del = felix_mdb_del,
.port_pre_bridge_flags = felix_pre_bridge_flags,
.port_bridge_flags = felix_bridge_flags,
.port_bridge_join = felix_bridge_join,
.port_bridge_leave = felix_bridge_leave,
.port_lag_join = felix_lag_join,
......
......@@ -94,6 +94,7 @@ struct sja1105_info {
* pop it when it's equal to TPID2.
*/
u16 qinq_tpid;
bool can_limit_mcast_flood;
int (*reset_cmd)(struct dsa_switch *ds);
int (*setup_rgmii_delay)(const void *ctx, int port);
/* Prototypes from include/net/dsa.h */
......@@ -204,6 +205,7 @@ struct sja1105_private {
bool rgmii_rx_delay[SJA1105_NUM_PORTS];
bool rgmii_tx_delay[SJA1105_NUM_PORTS];
bool best_effort_vlan_filtering;
unsigned long learn_ena;
const struct sja1105_info *info;
struct gpio_desc *reset_gpio;
struct spi_device *spidev;
......
......@@ -25,6 +25,8 @@
#include "sja1105_sgmii.h"
#include "sja1105_tas.h"
#define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull
static const struct dsa_switch_ops sja1105_switch_ops;
static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
......@@ -42,15 +44,10 @@ static void
sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd,
int from, int to, bool allow)
{
if (allow) {
l2_fwd[from].bc_domain |= BIT(to);
if (allow)
l2_fwd[from].reach_port |= BIT(to);
l2_fwd[from].fl_domain |= BIT(to);
} else {
l2_fwd[from].bc_domain &= ~BIT(to);
else
l2_fwd[from].reach_port &= ~BIT(to);
l2_fwd[from].fl_domain &= ~BIT(to);
}
}
/* Structure used to temporarily transport device tree
......@@ -220,17 +217,43 @@ static int sja1105_init_mii_settings(struct sja1105_private *priv,
static int sja1105_init_static_fdb(struct sja1105_private *priv)
{
struct sja1105_l2_lookup_entry *l2_lookup;
struct sja1105_table *table;
int port;
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
/* We only populate the FDB table through dynamic
* L2 Address Lookup entries
/* We only populate the FDB table through dynamic L2 Address Lookup
* entries, except for a special entry at the end which is a catch-all
* for unknown multicast and will be used to control flooding domain.
*/
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
}
if (!priv->info->can_limit_mcast_flood)
return 0;
table->entries = kcalloc(1, table->ops->unpacked_entry_size,
GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
table->entry_count = 1;
l2_lookup = table->entries;
/* All L2 multicast addresses have an odd first octet */
l2_lookup[0].macaddr = SJA1105_UNKNOWN_MULTICAST;
l2_lookup[0].mask_macaddr = SJA1105_UNKNOWN_MULTICAST;
l2_lookup[0].lockeds = true;
l2_lookup[0].index = SJA1105_MAX_L2_LOOKUP_COUNT - 1;
/* Flood multicast to every port by default */
for (port = 0; port < priv->ds->num_ports; port++)
if (!dsa_is_unused_port(priv->ds, port))
l2_lookup[0].destports |= BIT(port);
return 0;
}
......@@ -390,6 +413,12 @@ static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
sja1105_port_allow_traffic(l2fwd, i, upstream, true);
sja1105_port_allow_traffic(l2fwd, upstream, i, true);
l2fwd[i].bc_domain = BIT(upstream);
l2fwd[i].fl_domain = BIT(upstream);
l2fwd[upstream].bc_domain |= BIT(i);
l2fwd[upstream].fl_domain |= BIT(i);
}
/* Next 8 entries define VLAN PCP mapping from ingress to egress.
* Create a one-to-one mapping.
......@@ -1514,6 +1543,12 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
*/
if (!(l2_lookup.destports & BIT(port)))
continue;
/* We need to hide the FDB entry for unknown multicast */
if (l2_lookup.macaddr == SJA1105_UNKNOWN_MULTICAST &&
l2_lookup.mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
continue;
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
/* We need to hide the dsa_8021q VLANs from the user. */
......@@ -1605,12 +1640,12 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
case BR_STATE_LEARNING:
mac[port].ingress = true;
mac[port].egress = false;
mac[port].dyn_learn = true;
mac[port].dyn_learn = !!(priv->learn_ena & BIT(port));
break;
case BR_STATE_FORWARDING:
mac[port].ingress = true;
mac[port].egress = true;
mac[port].dyn_learn = true;
mac[port].dyn_learn = !!(priv->learn_ena & BIT(port));
break;
default:
dev_err(ds->dev, "invalid STP state: %d\n", state);
......@@ -3239,6 +3274,169 @@ static void sja1105_port_policer_del(struct dsa_switch *ds, int port)
sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
}
static int sja1105_port_set_learning(struct sja1105_private *priv, int port,
bool enabled)
{
struct sja1105_mac_config_entry *mac;
int rc;
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
mac[port].dyn_learn = !!(priv->learn_ena & BIT(port));
rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
&mac[port], true);
if (rc)
return rc;
if (enabled)
priv->learn_ena |= BIT(port);
else
priv->learn_ena &= ~BIT(port);
return 0;
}
/* Common function for unicast and broadcast flood configuration.
* Flooding is configured between each {ingress, egress} port pair, and since
* the bridge's semantics are those of "egress flooding", it means we must
* enable flooding towards this port from all ingress ports that are in the
* same bridge. In practice, we just enable flooding from all possible ingress
* ports regardless of whether they're in the same bridge or not, since the
* reach_port configuration will not allow flooded frames to leak across
* bridging domains anyway.
*/
static int sja1105_port_ucast_bcast_flood(struct sja1105_private *priv, int to,
struct switchdev_brport_flags flags)
{
struct sja1105_l2_forwarding_entry *l2_fwd;
int from, rc;
l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
for (from = 0; from < priv->ds->num_ports; from++) {
if (dsa_is_unused_port(priv->ds, from))
continue;
if (from == to)
continue;
/* Unicast */
if (flags.mask & BR_FLOOD) {
if (flags.val & BR_FLOOD)
l2_fwd[from].fl_domain |= BIT(to);
else
l2_fwd[from].fl_domain &= ~BIT(to);
}
/* Broadcast */
if (flags.mask & BR_BCAST_FLOOD) {
if (flags.val & BR_BCAST_FLOOD)
l2_fwd[from].bc_domain |= BIT(to);
else
l2_fwd[from].bc_domain &= ~BIT(to);
}
rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
from, &l2_fwd[from], true);
if (rc < 0)
return rc;
}
return 0;
}
static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
struct sja1105_l2_lookup_entry *l2_lookup;
struct sja1105_table *table;
int match;
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
l2_lookup = table->entries;
for (match = 0; match < table->entry_count; match++)
if (l2_lookup[match].macaddr == SJA1105_UNKNOWN_MULTICAST &&
l2_lookup[match].mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
break;
if (match == table->entry_count) {
NL_SET_ERR_MSG_MOD(extack,
"Could not find FDB entry for unknown multicast");
return -ENOSPC;
}
if (flags.val & BR_MCAST_FLOOD)
l2_lookup[match].destports |= BIT(to);
else
l2_lookup[match].destports &= ~BIT(to);
return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
l2_lookup[match].index,
&l2_lookup[match],
true);
}
static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
struct sja1105_private *priv = ds->priv;
if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
BR_BCAST_FLOOD))
return -EINVAL;
if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD) &&
!priv->info->can_limit_mcast_flood) {
bool multicast = !!(flags.val & BR_MCAST_FLOOD);
bool unicast = !!(flags.val & BR_FLOOD);
if (unicast != multicast) {
NL_SET_ERR_MSG_MOD(extack,
"This chip cannot configure multicast flooding independently of unicast");
return -EINVAL;
}
}
return 0;
}
static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
struct sja1105_private *priv = ds->priv;
int rc;
if (flags.mask & BR_LEARNING) {
bool learn_ena = !!(flags.val & BR_LEARNING);
rc = sja1105_port_set_learning(priv, port, learn_ena);
if (rc)
return rc;
}
if (flags.mask & (BR_FLOOD | BR_BCAST_FLOOD)) {
rc = sja1105_port_ucast_bcast_flood(priv, port, flags);
if (rc)
return rc;
}
/* For chips that can't offload BR_MCAST_FLOOD independently, there
* is nothing to do here, we ensured the configuration is in sync by
* offloading BR_FLOOD.
*/
if (flags.mask & BR_MCAST_FLOOD && priv->info->can_limit_mcast_flood) {
rc = sja1105_port_mcast_flood(priv, port, flags,
extack);
if (rc)
return rc;
}
return 0;
}
static const struct dsa_switch_ops sja1105_switch_ops = {
.get_tag_protocol = sja1105_get_tag_protocol,
.setup = sja1105_setup,
......@@ -3262,6 +3460,8 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.port_fdb_del = sja1105_fdb_del,
.port_bridge_join = sja1105_bridge_join,
.port_bridge_leave = sja1105_bridge_leave,
.port_pre_bridge_flags = sja1105_port_pre_bridge_flags,
.port_bridge_flags = sja1105_port_bridge_flags,
.port_stp_state_set = sja1105_bridge_stp_state_set,
.port_vlan_filtering = sja1105_vlan_filtering,
.port_vlan_add = sja1105_vlan_add,
......
......@@ -512,6 +512,7 @@ const struct sja1105_info sja1105e_info = {
.static_ops = sja1105e_table_ops,
.dyn_ops = sja1105et_dyn_ops,
.qinq_tpid = ETH_P_8021Q,
.can_limit_mcast_flood = false,
.ptp_ts_bits = 24,
.ptpegr_ts_bytes = 4,
.num_cbs_shapers = SJA1105ET_MAX_CBS_COUNT,
......@@ -529,6 +530,7 @@ const struct sja1105_info sja1105t_info = {
.static_ops = sja1105t_table_ops,
.dyn_ops = sja1105et_dyn_ops,
.qinq_tpid = ETH_P_8021Q,
.can_limit_mcast_flood = false,
.ptp_ts_bits = 24,
.ptpegr_ts_bytes = 4,
.num_cbs_shapers = SJA1105ET_MAX_CBS_COUNT,
......@@ -546,6 +548,7 @@ const struct sja1105_info sja1105p_info = {
.static_ops = sja1105p_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.qinq_tpid = ETH_P_8021AD,
.can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
......@@ -564,6 +567,7 @@ const struct sja1105_info sja1105q_info = {
.static_ops = sja1105q_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.qinq_tpid = ETH_P_8021AD,
.can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
......@@ -582,6 +586,7 @@ const struct sja1105_info sja1105r_info = {
.static_ops = sja1105r_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.qinq_tpid = ETH_P_8021AD,
.can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
......@@ -601,6 +606,7 @@ const struct sja1105_info sja1105s_info = {
.dyn_ops = sja1105pqrs_dyn_ops,
.regs = &sja1105pqrs_regs,
.qinq_tpid = ETH_P_8021AD,
.can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
......
......@@ -581,7 +581,7 @@ int prestera_bridge_port_event(struct net_device *dev, unsigned long event,
static int prestera_port_attr_br_flags_set(struct prestera_port *port,
struct net_device *dev,
unsigned long flags)
struct switchdev_brport_flags flags)
{
struct prestera_bridge_port *br_port;
int err;
......@@ -590,15 +590,20 @@ static int prestera_port_attr_br_flags_set(struct prestera_port *port,
if (!br_port)
return 0;
err = prestera_hw_port_flood_set(port, flags & BR_FLOOD);
if (err)
return err;
if (flags.mask & BR_FLOOD) {
err = prestera_hw_port_flood_set(port, flags.val & BR_FLOOD);
if (err)
return err;
}
err = prestera_hw_port_learning_set(port, flags & BR_LEARNING);
if (err)
return err;
if (flags.mask & BR_LEARNING) {
err = prestera_hw_port_learning_set(port,
flags.val & BR_LEARNING);
if (err)
return err;
}
memcpy(&br_port->flags, &flags, sizeof(flags));
memcpy(&br_port->flags, &flags.val, sizeof(flags.val));
return 0;
}
......@@ -695,7 +700,8 @@ static int prestera_port_attr_stp_state_set(struct prestera_port *port,
}
static int prestera_port_obj_attr_set(struct net_device *dev,
const struct switchdev_attr *attr)
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
{
struct prestera_port *port = netdev_priv(dev);
int err = 0;
......@@ -706,7 +712,7 @@ static int prestera_port_obj_attr_set(struct net_device *dev,
attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
if (attr->u.brport_flags &
if (attr->u.brport_flags.mask &
~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
err = -EINVAL;
break;
......
......@@ -653,11 +653,11 @@ mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
return err;
}
static int mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port
*mlxsw_sp_port,
unsigned long brport_flags)
static int
mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_brport_flags flags)
{
if (brport_flags & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
return -EINVAL;
return 0;
......@@ -665,7 +665,7 @@ static int mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port
static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *orig_dev,
unsigned long brport_flags)
struct switchdev_brport_flags flags)
{
struct mlxsw_sp_bridge_port *bridge_port;
int err;
......@@ -675,29 +675,37 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_port)
return 0;
err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
MLXSW_SP_FLOOD_TYPE_UC,
brport_flags & BR_FLOOD);
if (err)
return err;
if (flags.mask & BR_FLOOD) {
err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
bridge_port,
MLXSW_SP_FLOOD_TYPE_UC,
flags.val & BR_FLOOD);
if (err)
return err;
}
err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
brport_flags & BR_LEARNING);
if (err)
return err;
if (flags.mask & BR_LEARNING) {
err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port,
bridge_port,
flags.val & BR_LEARNING);
if (err)
return err;
}
if (bridge_port->bridge_device->multicast_enabled)
goto out;
err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
MLXSW_SP_FLOOD_TYPE_MC,
brport_flags &
BR_MCAST_FLOOD);
if (err)
return err;
if (flags.mask & BR_MCAST_FLOOD) {
err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
bridge_port,
MLXSW_SP_FLOOD_TYPE_MC,
flags.val & BR_MCAST_FLOOD);
if (err)
return err;
}
out:
memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
memcpy(&bridge_port->flags, &flags.val, sizeof(flags.val));
return 0;
}
......@@ -887,7 +895,8 @@ mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr)
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
int err;
......
......@@ -1038,6 +1038,7 @@ EXPORT_SYMBOL(ocelot_apply_bridge_fwd_mask);
void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
u32 port_cfg;
if (!(BIT(port) & ocelot->bridge_mask))
......@@ -1050,7 +1051,8 @@ void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
ocelot->bridge_fwd_mask |= BIT(port);
fallthrough;
case BR_STATE_LEARNING:
port_cfg |= ANA_PORT_PORT_CFG_LEARN_ENA;
if (ocelot_port->learn_ena)
port_cfg |= ANA_PORT_PORT_CFG_LEARN_ENA;
break;
default:
......@@ -1534,6 +1536,86 @@ int ocelot_get_max_mtu(struct ocelot *ocelot, int port)
}
EXPORT_SYMBOL(ocelot_get_max_mtu);
static void ocelot_port_set_learning(struct ocelot *ocelot, int port,
bool enabled)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
u32 val = 0;
if (enabled)
val = ANA_PORT_PORT_CFG_LEARN_ENA;
ocelot_rmw_gix(ocelot, val, ANA_PORT_PORT_CFG_LEARN_ENA,
ANA_PORT_PORT_CFG, port);
ocelot_port->learn_ena = enabled;
}
static void ocelot_port_set_ucast_flood(struct ocelot *ocelot, int port,
bool enabled)
{
u32 val = 0;
if (enabled)
val = BIT(port);
ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_UC);
}
static void ocelot_port_set_mcast_flood(struct ocelot *ocelot, int port,
bool enabled)
{
u32 val = 0;
if (enabled)
val = BIT(port);
ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MC);
}
static void ocelot_port_set_bcast_flood(struct ocelot *ocelot, int port,
bool enabled)
{
u32 val = 0;
if (enabled)
val = BIT(port);
ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_BC);
}
int ocelot_port_pre_bridge_flags(struct ocelot *ocelot, int port,
struct switchdev_brport_flags flags)
{
if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
BR_BCAST_FLOOD))
return -EINVAL;
return 0;
}
EXPORT_SYMBOL(ocelot_port_pre_bridge_flags);
void ocelot_port_bridge_flags(struct ocelot *ocelot, int port,
struct switchdev_brport_flags flags)
{
if (flags.mask & BR_LEARNING)
ocelot_port_set_learning(ocelot, port,
!!(flags.val & BR_LEARNING));
if (flags.mask & BR_FLOOD)
ocelot_port_set_ucast_flood(ocelot, port,
!!(flags.val & BR_FLOOD));
if (flags.mask & BR_MCAST_FLOOD)
ocelot_port_set_mcast_flood(ocelot, port,
!!(flags.val & BR_MCAST_FLOOD));
if (flags.mask & BR_BCAST_FLOOD)
ocelot_port_set_bcast_flood(ocelot, port,
!!(flags.val & BR_BCAST_FLOOD));
}
EXPORT_SYMBOL(ocelot_port_bridge_flags);
void ocelot_init_port(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
......@@ -1583,6 +1665,9 @@ void ocelot_init_port(struct ocelot *ocelot, int port)
REW_PORT_VLAN_CFG_PORT_TPID_M,
REW_PORT_VLAN_CFG, port);
/* Disable source address learning for standalone mode */
ocelot_port_set_learning(ocelot, port, false);
/* Enable vcap lookups */
ocelot_vcap_enable(ocelot, port);
}
......@@ -1716,7 +1801,7 @@ int ocelot_init(struct ocelot *ocelot)
/* Setup flooding PGIDs */
for (i = 0; i < ocelot->num_flooding_pgids; i++)
ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
ANA_FLOODING_FLD_BROADCAST(PGID_BC) |
ANA_FLOODING_FLD_UNICAST(PGID_UC),
ANA_FLOODING, i);
ocelot_write(ocelot, ANA_FLOODING_IPMC_FLD_MC6_DATA(PGID_MCIPV6) |
......@@ -1737,15 +1822,18 @@ int ocelot_init(struct ocelot *ocelot)
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_SRC + port);
}
/* Allow broadcast MAC frames. */
for_each_nonreserved_multicast_dest_pgid(ocelot, i) {
u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0));
ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i);
}
ocelot_write_rix(ocelot,
ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
ANA_PGID_PGID, PGID_MC);
/* Allow broadcast and unknown L2 multicast to the CPU. */
ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
ANA_PGID_PGID, PGID_MC);
ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
ANA_PGID_PGID, PGID_BC);
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV4);
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV6);
......
......@@ -1005,7 +1005,8 @@ static void ocelot_port_attr_mc_set(struct ocelot *ocelot, int port, bool mc)
}
static int ocelot_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr)
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
......@@ -1025,6 +1026,13 @@ static int ocelot_port_attr_set(struct net_device *dev,
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
ocelot_port_attr_mc_set(ocelot, port, !attr->u.mc_disabled);
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
err = ocelot_port_pre_bridge_flags(ocelot, port,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
ocelot_port_bridge_flags(ocelot, port, attr->u.brport_flags);
break;
default:
err = -EOPNOTSUPP;
break;
......@@ -1110,6 +1118,40 @@ static int ocelot_port_obj_del(struct net_device *dev,
return ret;
}
static int ocelot_netdevice_bridge_join(struct ocelot *ocelot, int port,
struct net_device *bridge)
{
struct switchdev_brport_flags flags;
int err;
flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
flags.val = flags.mask;
err = ocelot_port_bridge_join(ocelot, port, bridge);
if (err)
return err;
ocelot_port_bridge_flags(ocelot, port, flags);
return 0;
}
static int ocelot_netdevice_bridge_leave(struct ocelot *ocelot, int port,
struct net_device *bridge)
{
struct switchdev_brport_flags flags;
int err;
flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
flags.val = flags.mask & ~BR_LEARNING;
err = ocelot_port_bridge_leave(ocelot, port, bridge);
ocelot_port_bridge_flags(ocelot, port, flags);
return err;
}
static int ocelot_netdevice_changeupper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
......@@ -1121,11 +1163,11 @@ static int ocelot_netdevice_changeupper(struct net_device *dev,
if (netif_is_bridge_master(info->upper_dev)) {
if (info->linking) {
err = ocelot_port_bridge_join(ocelot, port,
info->upper_dev);
err = ocelot_netdevice_bridge_join(ocelot, port,
info->upper_dev);
} else {
err = ocelot_port_bridge_leave(ocelot, port,
info->upper_dev);
err = ocelot_netdevice_bridge_leave(ocelot, port,
info->upper_dev);
}
}
if (netif_is_lag_master(info->upper_dev)) {
......
......@@ -1576,7 +1576,7 @@ rocker_world_port_attr_bridge_flags_support_get(const struct rocker_port *
static int
rocker_world_port_attr_pre_bridge_flags_set(struct rocker_port *rocker_port,
unsigned long brport_flags)
struct switchdev_brport_flags flags)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
unsigned long brport_flags_s;
......@@ -1590,7 +1590,7 @@ rocker_world_port_attr_pre_bridge_flags_set(struct rocker_port *rocker_port,
if (err)
return err;
if (brport_flags & ~brport_flags_s)
if (flags.mask & ~brport_flags_s)
return -EINVAL;
return 0;
......@@ -1598,14 +1598,14 @@ rocker_world_port_attr_pre_bridge_flags_set(struct rocker_port *rocker_port,
static int
rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
unsigned long brport_flags)
struct switchdev_brport_flags flags)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
if (!wops->port_attr_bridge_flags_set)
return -EOPNOTSUPP;
return wops->port_attr_bridge_flags_set(rocker_port, brport_flags);
return wops->port_attr_bridge_flags_set(rocker_port, flags.val);
}
static int
......@@ -2058,7 +2058,7 @@ static int rocker_port_attr_set(struct net_device *dev,
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
err = rocker_world_port_attr_pre_bridge_flags_set(rocker_port,
attr->u.brport_flags);
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
err = rocker_world_port_attr_bridge_flags_set(rocker_port,
......
......@@ -55,33 +55,38 @@ static int am65_cpsw_port_stp_state_set(struct am65_cpsw_port *port, u8 state)
static int am65_cpsw_port_attr_br_flags_set(struct am65_cpsw_port *port,
struct net_device *orig_dev,
unsigned long brport_flags)
struct switchdev_brport_flags flags)
{
struct am65_cpsw_common *cpsw = port->common;
bool unreg_mcast_add = false;
if (brport_flags & BR_MCAST_FLOOD)
unreg_mcast_add = true;
netdev_dbg(port->ndev, "BR_MCAST_FLOOD: %d port %u\n",
unreg_mcast_add, port->port_id);
if (flags.mask & BR_MCAST_FLOOD) {
bool unreg_mcast_add = false;
cpsw_ale_set_unreg_mcast(cpsw->ale, BIT(port->port_id),
unreg_mcast_add);
if (flags.val & BR_MCAST_FLOOD)
unreg_mcast_add = true;
netdev_dbg(port->ndev, "BR_MCAST_FLOOD: %d port %u\n",
unreg_mcast_add, port->port_id);
cpsw_ale_set_unreg_mcast(cpsw->ale, BIT(port->port_id),
unreg_mcast_add);
}
return 0;
}
static int am65_cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
unsigned long flags)
struct switchdev_brport_flags flags)
{
if (flags & ~(BR_LEARNING | BR_MCAST_FLOOD))
if (flags.mask & ~(BR_LEARNING | BR_MCAST_FLOOD))
return -EINVAL;
return 0;
}
static int am65_cpsw_port_attr_set(struct net_device *ndev,
const struct switchdev_attr *attr)
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
int ret;
......
......@@ -57,33 +57,38 @@ static int cpsw_port_stp_state_set(struct cpsw_priv *priv, u8 state)
static int cpsw_port_attr_br_flags_set(struct cpsw_priv *priv,
struct net_device *orig_dev,
unsigned long brport_flags)
struct switchdev_brport_flags flags)
{
struct cpsw_common *cpsw = priv->cpsw;
bool unreg_mcast_add = false;
if (brport_flags & BR_MCAST_FLOOD)
unreg_mcast_add = true;
dev_dbg(priv->dev, "BR_MCAST_FLOOD: %d port %u\n",
unreg_mcast_add, priv->emac_port);
if (flags.mask & BR_MCAST_FLOOD) {
bool unreg_mcast_add = false;
cpsw_ale_set_unreg_mcast(cpsw->ale, BIT(priv->emac_port),
unreg_mcast_add);
if (flags.val & BR_MCAST_FLOOD)
unreg_mcast_add = true;
dev_dbg(priv->dev, "BR_MCAST_FLOOD: %d port %u\n",
unreg_mcast_add, priv->emac_port);
cpsw_ale_set_unreg_mcast(cpsw->ale, BIT(priv->emac_port),
unreg_mcast_add);
}
return 0;
}
static int cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
unsigned long flags)
struct switchdev_brport_flags flags)
{
if (flags & ~(BR_LEARNING | BR_MCAST_FLOOD))
if (flags.mask & ~(BR_LEARNING | BR_MCAST_FLOOD))
return -EINVAL;
return 0;
}
static int cpsw_port_attr_set(struct net_device *ndev,
const struct switchdev_attr *attr)
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
{
struct cpsw_priv *priv = netdev_priv(ndev);
int ret;
......
......@@ -908,31 +908,39 @@ static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev,
return dpaa2_switch_port_set_stp_state(port_priv, state);
}
static int dpaa2_switch_port_attr_br_flags_pre_set(struct net_device *netdev,
unsigned long flags)
static int
dpaa2_switch_port_attr_br_flags_pre_set(struct net_device *netdev,
struct switchdev_brport_flags flags)
{
if (flags & ~(BR_LEARNING | BR_FLOOD))
if (flags.mask & ~(BR_LEARNING | BR_FLOOD))
return -EINVAL;
return 0;
}
static int dpaa2_switch_port_attr_br_flags_set(struct net_device *netdev,
unsigned long flags)
static int
dpaa2_switch_port_attr_br_flags_set(struct net_device *netdev,
struct switchdev_brport_flags flags)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
int err = 0;
/* Learning is enabled per switch */
err = dpaa2_switch_set_learning(port_priv->ethsw_data,
!!(flags & BR_LEARNING));
if (err)
goto exit;
if (flags.mask & BR_LEARNING) {
/* Learning is enabled per switch */
err = dpaa2_switch_set_learning(port_priv->ethsw_data,
!!(flags.val & BR_LEARNING));
if (err)
return err;
}
err = dpaa2_switch_port_set_flood(port_priv, !!(flags & BR_FLOOD));
if (flags.mask & BR_FLOOD) {
err = dpaa2_switch_port_set_flood(port_priv,
!!(flags.val & BR_FLOOD));
if (err)
return err;
}
exit:
return err;
return 0;
}
static int dpaa2_switch_port_attr_set(struct net_device *netdev,
......
......@@ -626,8 +626,14 @@ struct dsa_switch_ops {
void (*port_stp_state_set)(struct dsa_switch *ds, int port,
u8 state);
void (*port_fast_age)(struct dsa_switch *ds, int port);
int (*port_egress_floods)(struct dsa_switch *ds, int port,
bool unicast, bool multicast);
int (*port_pre_bridge_flags)(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack);
int (*port_bridge_flags)(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack);
int (*port_set_mrouter)(struct dsa_switch *ds, int port, bool mrouter,
struct netlink_ext_ack *extack);
/*
* VLAN support
......
......@@ -32,6 +32,11 @@ enum switchdev_attr_id {
#endif
};
struct switchdev_brport_flags {
unsigned long val;
unsigned long mask;
};
struct switchdev_attr {
struct net_device *orig_dev;
enum switchdev_attr_id id;
......@@ -40,7 +45,7 @@ struct switchdev_attr {
void (*complete)(struct net_device *dev, int err, void *priv);
union {
u8 stp_state; /* PORT_STP_STATE */
unsigned long brport_flags; /* PORT_{PRE}_BRIDGE_FLAGS */
struct switchdev_brport_flags brport_flags; /* PORT_BRIDGE_FLAGS */
bool mrouter; /* PORT_MROUTER */
clock_t ageing_time; /* BRIDGE_AGEING_TIME */
bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */
......@@ -281,7 +286,8 @@ int switchdev_handle_port_attr_set(struct net_device *dev,
struct switchdev_notifier_port_attr_info *port_attr_info,
bool (*check_cb)(const struct net_device *dev),
int (*set_cb)(struct net_device *dev,
const struct switchdev_attr *attr));
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack));
#else
static inline void switchdev_deferred_process(void)
......@@ -372,7 +378,8 @@ switchdev_handle_port_attr_set(struct net_device *dev,
struct switchdev_notifier_port_attr_info *port_attr_info,
bool (*check_cb)(const struct net_device *dev),
int (*set_cb)(struct net_device *dev,
const struct switchdev_attr *attr))
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack))
{
return 0;
}
......
......@@ -54,16 +54,17 @@
* PGID_CPU: used for whitelisting certain MAC addresses, such as the addresses
* of the switch port net devices, towards the CPU port module.
* PGID_UC: the flooding destinations for unknown unicast traffic.
* PGID_MC: the flooding destinations for broadcast and non-IP multicast
* traffic.
* PGID_MC: the flooding destinations for non-IP multicast traffic.
* PGID_MCIPV4: the flooding destinations for IPv4 multicast traffic.
* PGID_MCIPV6: the flooding destinations for IPv6 multicast traffic.
* PGID_BC: the flooding destinations for broadcast traffic.
*/
#define PGID_CPU 59
#define PGID_UC 60
#define PGID_MC 61
#define PGID_MCIPV4 62
#define PGID_MCIPV6 63
#define PGID_CPU 58
#define PGID_UC 59
#define PGID_MC 60
#define PGID_MCIPV4 61
#define PGID_MCIPV6 62
#define PGID_BC 63
#define for_each_unicast_dest_pgid(ocelot, pgid) \
for ((pgid) = 0; \
......@@ -611,6 +612,7 @@ struct ocelot_port {
u8 *xmit_template;
bool is_dsa_8021q_cpu;
bool learn_ena;
struct net_device *bond;
bool lag_tx_active;
......@@ -765,6 +767,10 @@ void ocelot_adjust_link(struct ocelot *ocelot, int port,
int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, bool enabled);
void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state);
void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot);
int ocelot_port_pre_bridge_flags(struct ocelot *ocelot, int port,
struct switchdev_brport_flags val);
void ocelot_port_bridge_flags(struct ocelot *ocelot, int port,
struct switchdev_brport_flags val);
int ocelot_port_bridge_join(struct ocelot *ocelot, int port,
struct net_device *bridge);
int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
......
......@@ -853,87 +853,59 @@ static int br_set_port_state(struct net_bridge_port *p, u8 state)
}
/* Set/clear or port flags based on attribute */
static int br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
int attrtype, unsigned long mask)
static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
int attrtype, unsigned long mask)
{
unsigned long flags;
int err;
if (!tb[attrtype])
return 0;
return;
if (nla_get_u8(tb[attrtype]))
flags = p->flags | mask;
p->flags |= mask;
else
flags = p->flags & ~mask;
err = br_switchdev_set_port_flag(p, flags, mask);
if (err)
return err;
p->flags = flags;
return 0;
p->flags &= ~mask;
}
/* Process bridge protocol info on port */
static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
static int br_setport(struct net_bridge_port *p, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
unsigned long old_flags = p->flags;
bool br_vlan_tunnel_old = false;
unsigned long old_flags, changed_mask;
bool br_vlan_tunnel_old;
int err;
err = br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
if (err)
return err;
err = br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
if (err)
return err;
err = br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE);
if (err)
return err;
err = br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
if (err)
return err;
err = br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
if (err)
return err;
err = br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
if (err)
return err;
err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD);
if (err)
return err;
err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, BR_MULTICAST_TO_UNICAST);
if (err)
return err;
err = br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD);
if (err)
return err;
err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
if (err)
return err;
err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
if (err)
return err;
br_vlan_tunnel_old = (p->flags & BR_VLAN_TUNNEL) ? true : false;
err = br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL);
if (err)
old_flags = p->flags;
br_vlan_tunnel_old = (old_flags & BR_VLAN_TUNNEL) ? true : false;
br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE,
BR_MULTICAST_FAST_LEAVE);
br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD);
br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST,
BR_MULTICAST_TO_UNICAST);
br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD);
br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL);
br_set_port_flag(p, tb, IFLA_BRPORT_NEIGH_SUPPRESS, BR_NEIGH_SUPPRESS);
br_set_port_flag(p, tb, IFLA_BRPORT_ISOLATED, BR_ISOLATED);
changed_mask = old_flags ^ p->flags;
err = br_switchdev_set_port_flag(p, p->flags, changed_mask, extack);
if (err) {
p->flags = old_flags;
return err;
}
if (br_vlan_tunnel_old && !(p->flags & BR_VLAN_TUNNEL))
nbp_vlan_tunnel_info_flush(p);
br_port_flags_change(p, changed_mask);
if (tb[IFLA_BRPORT_COST]) {
err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
if (err)
......@@ -982,15 +954,6 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
p->group_fwd_mask = fwd_mask;
}
err = br_set_port_flag(p, tb, IFLA_BRPORT_NEIGH_SUPPRESS,
BR_NEIGH_SUPPRESS);
if (err)
return err;
err = br_set_port_flag(p, tb, IFLA_BRPORT_ISOLATED, BR_ISOLATED);
if (err)
return err;
if (tb[IFLA_BRPORT_BACKUP_PORT]) {
struct net_device *backup_dev = NULL;
u32 backup_ifindex;
......@@ -1008,7 +971,6 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
return err;
}
br_port_flags_change(p, old_flags ^ p->flags);
return 0;
}
......@@ -1046,7 +1008,7 @@ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags,
return err;
spin_lock_bh(&p->br->lock);
err = br_setport(p, tb);
err = br_setport(p, tb, extack);
spin_unlock_bh(&p->br->lock);
} else {
/* Binary compatibility with old RSTP */
......@@ -1141,7 +1103,7 @@ static int br_port_slave_changelink(struct net_device *brdev,
return 0;
spin_lock_bh(&br->lock);
ret = br_setport(br_port_get_rtnl(dev), data);
ret = br_setport(br_port_get_rtnl(dev), data, extack);
spin_unlock_bh(&br->lock);
return ret;
......
......@@ -1575,7 +1575,8 @@ bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
const struct sk_buff *skb);
int br_switchdev_set_port_flag(struct net_bridge_port *p,
unsigned long flags,
unsigned long mask);
unsigned long mask,
struct netlink_ext_ack *extack);
void br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb,
int type);
int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags,
......@@ -1605,7 +1606,8 @@ static inline bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
static inline int br_switchdev_set_port_flag(struct net_bridge_port *p,
unsigned long flags,
unsigned long mask)
unsigned long mask,
struct netlink_ext_ack *extack)
{
return 0;
}
......
......@@ -60,42 +60,45 @@ bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
int br_switchdev_set_port_flag(struct net_bridge_port *p,
unsigned long flags,
unsigned long mask)
unsigned long mask,
struct netlink_ext_ack *extack)
{
struct switchdev_attr attr = {
.orig_dev = p->dev,
.id = SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS,
.u.brport_flags = mask,
};
struct switchdev_notifier_port_attr_info info = {
.attr = &attr,
};
int err;
if (mask & ~BR_PORT_FLAGS_HW_OFFLOAD)
mask &= BR_PORT_FLAGS_HW_OFFLOAD;
if (!mask)
return 0;
attr.id = SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS;
attr.u.brport_flags.val = flags;
attr.u.brport_flags.mask = mask;
/* We run from atomic context here */
err = call_switchdev_notifiers(SWITCHDEV_PORT_ATTR_SET, p->dev,
&info.info, NULL);
&info.info, extack);
err = notifier_to_errno(err);
if (err == -EOPNOTSUPP)
return 0;
if (err) {
br_warn(p->br, "bridge flag offload is not supported %u(%s)\n",
(unsigned int)p->port_no, p->dev->name);
if (extack && !extack->_msg)
NL_SET_ERR_MSG_MOD(extack,
"bridge flag offload is not supported");
return -EOPNOTSUPP;
}
attr.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS;
attr.flags = SWITCHDEV_F_DEFER;
attr.u.brport_flags = flags;
err = switchdev_port_attr_set(p->dev, &attr);
if (err) {
br_warn(p->br, "error setting offload flag on port %u(%s)\n",
(unsigned int)p->port_no, p->dev->name);
NL_SET_ERR_MSG_MOD(extack, "error setting offload flag on port");
return err;
}
......
......@@ -59,6 +59,7 @@ static BRPORT_ATTR(_name, 0644, \
static int store_flag(struct net_bridge_port *p, unsigned long v,
unsigned long mask)
{
struct netlink_ext_ack extack = {0};
unsigned long flags = p->flags;
int err;
......@@ -68,9 +69,11 @@ static int store_flag(struct net_bridge_port *p, unsigned long v,
flags &= ~mask;
if (flags != p->flags) {
err = br_switchdev_set_port_flag(p, flags, mask);
if (err)
err = br_switchdev_set_port_flag(p, flags, mask, &extack);
if (err) {
netdev_err(p->dev, "%s\n", extack._msg);
return err;
}
p->flags = flags;
br_port_flags_change(p, mask);
......
......@@ -183,9 +183,14 @@ int dsa_port_mdb_add(const struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb);
int dsa_port_mdb_del(const struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb);
int dsa_port_pre_bridge_flags(const struct dsa_port *dp, unsigned long flags);
int dsa_port_bridge_flags(const struct dsa_port *dp, unsigned long flags);
int dsa_port_mrouter(struct dsa_port *dp, bool mrouter);
int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack);
int dsa_port_bridge_flags(const struct dsa_port *dp,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack);
int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
struct netlink_ext_ack *extack);
int dsa_port_vlan_add(struct dsa_port *dp,
const struct switchdev_obj_port_vlan *vlan);
int dsa_port_vlan_del(struct dsa_port *dp,
......
......@@ -122,6 +122,28 @@ void dsa_port_disable(struct dsa_port *dp)
rtnl_unlock();
}
static void dsa_port_change_brport_flags(struct dsa_port *dp,
bool bridge_offload)
{
struct switchdev_brport_flags flags;
int flag;
flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
if (bridge_offload)
flags.val = flags.mask;
else
flags.val = flags.mask & ~BR_LEARNING;
for_each_set_bit(flag, &flags.mask, 32) {
struct switchdev_brport_flags tmp;
tmp.val = flags.val & BIT(flag);
tmp.mask = BIT(flag);
dsa_port_bridge_flags(dp, tmp, NULL);
}
}
int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br)
{
struct dsa_notifier_bridge_info info = {
......@@ -132,10 +154,10 @@ int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br)
};
int err;
/* Set the flooding mode before joining the port in the switch */
err = dsa_port_bridge_flags(dp, BR_FLOOD | BR_MCAST_FLOOD);
if (err)
return err;
/* Notify the port driver to set its configurable flags in a way that
* matches the initial settings of a bridge port.
*/
dsa_port_change_brport_flags(dp, true);
/* Here the interface is already bridged. Reflect the current
* configuration so that drivers can program their chips accordingly.
......@@ -146,7 +168,7 @@ int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br)
/* The bridging is rolled back on error */
if (err) {
dsa_port_bridge_flags(dp, 0);
dsa_port_change_brport_flags(dp, false);
dp->bridge_dev = NULL;
}
......@@ -172,8 +194,18 @@ void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
if (err)
pr_err("DSA: failed to notify DSA_NOTIFIER_BRIDGE_LEAVE\n");
/* Port is leaving the bridge, disable flooding */
dsa_port_bridge_flags(dp, 0);
/* Configure the port for standalone mode (no address learning,
* flood everything).
* The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events
* when the user requests it through netlink or sysfs, but not
* automatically at port join or leave, so we need to handle resetting
* the brport flags ourselves. But we even prefer it that way, because
* otherwise, some setups might never get the notification they need,
* for example, when a port leaves a LAG that offloads the bridge,
* it becomes standalone, but as far as the bridge is concerned, no
* port ever left.
*/
dsa_port_change_brport_flags(dp, false);
/* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
* so allow it to be in BR_STATE_FORWARDING to be kept functional
......@@ -392,39 +424,39 @@ int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock)
return 0;
}
int dsa_port_pre_bridge_flags(const struct dsa_port *dp, unsigned long flags)
int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dp->ds;
if (!ds->ops->port_egress_floods ||
(flags & ~(BR_FLOOD | BR_MCAST_FLOOD)))
if (!ds->ops->port_pre_bridge_flags)
return -EINVAL;
return 0;
return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack);
}
int dsa_port_bridge_flags(const struct dsa_port *dp, unsigned long flags)
int dsa_port_bridge_flags(const struct dsa_port *dp,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dp->ds;
int port = dp->index;
int err = 0;
if (ds->ops->port_egress_floods)
err = ds->ops->port_egress_floods(ds, port, flags & BR_FLOOD,
flags & BR_MCAST_FLOOD);
if (!ds->ops->port_bridge_flags)
return -EINVAL;
return err;
return ds->ops->port_bridge_flags(ds, dp->index, flags, extack);
}
int dsa_port_mrouter(struct dsa_port *dp, bool mrouter)
int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dp->ds;
int port = dp->index;
if (!ds->ops->port_egress_floods)
if (!ds->ops->port_set_mrouter)
return -EOPNOTSUPP;
return ds->ops->port_egress_floods(ds, port, true, mrouter);
return ds->ops->port_set_mrouter(ds, dp->index, mrouter, extack);
}
int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
......
......@@ -272,7 +272,8 @@ static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
static int dsa_slave_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr)
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
int ret;
......@@ -291,13 +292,14 @@ static int dsa_slave_port_attr_set(struct net_device *dev,
ret = dsa_port_ageing_time(dp, attr->u.ageing_time);
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags);
ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags,
extack);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
ret = dsa_port_bridge_flags(dp, attr->u.brport_flags);
ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
ret = dsa_port_mrouter(dp->cpu_dp, attr->u.mrouter);
ret = dsa_port_mrouter(dp->cpu_dp, attr->u.mrouter, extack);
break;
default:
ret = -EOPNOTSUPP;
......
......@@ -488,14 +488,18 @@ static int __switchdev_handle_port_attr_set(struct net_device *dev,
struct switchdev_notifier_port_attr_info *port_attr_info,
bool (*check_cb)(const struct net_device *dev),
int (*set_cb)(struct net_device *dev,
const struct switchdev_attr *attr))
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack))
{
struct netlink_ext_ack *extack;
struct net_device *lower_dev;
struct list_head *iter;
int err = -EOPNOTSUPP;
extack = switchdev_notifier_info_to_extack(&port_attr_info->info);
if (check_cb(dev)) {
err = set_cb(dev, port_attr_info->attr);
err = set_cb(dev, port_attr_info->attr, extack);
if (err != -EOPNOTSUPP)
port_attr_info->handled = true;
return err;
......@@ -525,7 +529,8 @@ int switchdev_handle_port_attr_set(struct net_device *dev,
struct switchdev_notifier_port_attr_info *port_attr_info,
bool (*check_cb)(const struct net_device *dev),
int (*set_cb)(struct net_device *dev,
const struct switchdev_attr *attr))
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack))
{
int err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment