Commit 879c610c authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'dsa-changes-for-multiple-cpu-ports-part-1'

Vladimir Oltean says:

====================
DSA changes for multiple CPU ports (part 1)

I am trying to enable the second internal port pair from the NXP LS1028A
Felix switch for DSA-tagged traffic via "ocelot-8021q". This series
represents part 1 (of an unknown number) of that effort.

It does some preparation work, like managing host flooding in DSA via a
dedicated method, and removing the CPU port as argument from the tagging
protocol change procedure.

In terms of driver-specific changes, it reworks the 2 tag protocol
implementations in the Felix driver to have a structured data format.
It enables host flooding towards all tag_8021q CPU ports. It dynamically
updates the tag_8021q CPU port used for traps. It also fixes a bug
introduced by a previous refactoring/oversimplification commit in
net-next.
====================

Link: https://lore.kernel.org/r/20220511095020.562461-1-vladimir.oltean@nxp.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents f0a65f81 7a29d220
...@@ -6329,11 +6329,12 @@ static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds, ...@@ -6329,11 +6329,12 @@ static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
return chip->tag_protocol; return chip->tag_protocol;
} }
static int mv88e6xxx_change_tag_protocol(struct dsa_switch *ds, int port, static int mv88e6xxx_change_tag_protocol(struct dsa_switch *ds,
enum dsa_tag_protocol proto) enum dsa_tag_protocol proto)
{ {
struct mv88e6xxx_chip *chip = ds->priv; struct mv88e6xxx_chip *chip = ds->priv;
enum dsa_tag_protocol old_protocol; enum dsa_tag_protocol old_protocol;
struct dsa_port *cpu_dp;
int err; int err;
switch (proto) { switch (proto) {
...@@ -6358,11 +6359,24 @@ static int mv88e6xxx_change_tag_protocol(struct dsa_switch *ds, int port, ...@@ -6358,11 +6359,24 @@ static int mv88e6xxx_change_tag_protocol(struct dsa_switch *ds, int port,
chip->tag_protocol = proto; chip->tag_protocol = proto;
mv88e6xxx_reg_lock(chip); mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_setup_port_mode(chip, port); dsa_switch_for_each_cpu_port(cpu_dp, ds) {
err = mv88e6xxx_setup_port_mode(chip, cpu_dp->index);
if (err) {
mv88e6xxx_reg_unlock(chip);
goto unwind;
}
}
mv88e6xxx_reg_unlock(chip); mv88e6xxx_reg_unlock(chip);
if (err) return 0;
chip->tag_protocol = old_protocol;
unwind:
chip->tag_protocol = old_protocol;
mv88e6xxx_reg_lock(chip);
dsa_switch_for_each_cpu_port_continue_reverse(cpu_dp, ds)
mv88e6xxx_setup_port_mode(chip, cpu_dp->index);
mv88e6xxx_reg_unlock(chip);
return err; return err;
} }
......
This diff is collapsed.
...@@ -59,6 +59,19 @@ struct felix_info { ...@@ -59,6 +59,19 @@ struct felix_info {
struct resource *res); struct resource *res);
}; };
/* Methods for initializing the hardware resources specific to a tagging
* protocol (like the NPI port, for "ocelot" or "seville", or the VCAP TCAMs,
* for "ocelot-8021q").
* It is important that the resources configured here do not have side effects
* for the other tagging protocols. If that is the case, their configuration
* needs to go to felix_tag_proto_setup_shared().
*/
struct felix_tag_proto_ops {
int (*setup)(struct dsa_switch *ds);
void (*teardown)(struct dsa_switch *ds);
unsigned long (*get_host_fwd_mask)(struct dsa_switch *ds);
};
extern const struct dsa_switch_ops felix_switch_ops; extern const struct dsa_switch_ops felix_switch_ops;
/* DSA glue / front-end for struct ocelot */ /* DSA glue / front-end for struct ocelot */
...@@ -71,7 +84,10 @@ struct felix { ...@@ -71,7 +84,10 @@ struct felix {
resource_size_t switch_base; resource_size_t switch_base;
resource_size_t imdio_base; resource_size_t imdio_base;
enum dsa_tag_protocol tag_proto; enum dsa_tag_protocol tag_proto;
const struct felix_tag_proto_ops *tag_proto_ops;
struct kthread_worker *xmit_worker; struct kthread_worker *xmit_worker;
unsigned long host_flood_uc_mask;
unsigned long host_flood_mc_mask;
}; };
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port); struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port);
......
...@@ -1778,7 +1778,7 @@ static int rtl8365mb_cpu_config(struct realtek_priv *priv) ...@@ -1778,7 +1778,7 @@ static int rtl8365mb_cpu_config(struct realtek_priv *priv)
return 0; return 0;
} }
static int rtl8365mb_change_tag_protocol(struct dsa_switch *ds, int cpu_index, static int rtl8365mb_change_tag_protocol(struct dsa_switch *ds,
enum dsa_tag_protocol proto) enum dsa_tag_protocol proto)
{ {
struct realtek_priv *priv = ds->priv; struct realtek_priv *priv = ds->priv;
......
...@@ -1349,15 +1349,10 @@ EXPORT_SYMBOL(ocelot_drain_cpu_queue); ...@@ -1349,15 +1349,10 @@ EXPORT_SYMBOL(ocelot_drain_cpu_queue);
int ocelot_fdb_add(struct ocelot *ocelot, int port, const unsigned char *addr, int ocelot_fdb_add(struct ocelot *ocelot, int port, const unsigned char *addr,
u16 vid, const struct net_device *bridge) u16 vid, const struct net_device *bridge)
{ {
int pgid = port;
if (port == ocelot->npi)
pgid = PGID_CPU;
if (!vid) if (!vid)
vid = ocelot_vlan_unaware_pvid(ocelot, bridge); vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
return ocelot_mact_learn(ocelot, pgid, addr, vid, ENTRYTYPE_LOCKED); return ocelot_mact_learn(ocelot, port, addr, vid, ENTRYTYPE_LOCKED);
} }
EXPORT_SYMBOL(ocelot_fdb_add); EXPORT_SYMBOL(ocelot_fdb_add);
...@@ -2344,9 +2339,6 @@ int ocelot_port_mdb_add(struct ocelot *ocelot, int port, ...@@ -2344,9 +2339,6 @@ int ocelot_port_mdb_add(struct ocelot *ocelot, int port,
struct ocelot_pgid *pgid; struct ocelot_pgid *pgid;
u16 vid = mdb->vid; u16 vid = mdb->vid;
if (port == ocelot->npi)
port = ocelot->num_phys_ports;
if (!vid) if (!vid)
vid = ocelot_vlan_unaware_pvid(ocelot, bridge); vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
...@@ -2404,9 +2396,6 @@ int ocelot_port_mdb_del(struct ocelot *ocelot, int port, ...@@ -2404,9 +2396,6 @@ int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
struct ocelot_pgid *pgid; struct ocelot_pgid *pgid;
u16 vid = mdb->vid; u16 vid = mdb->vid;
if (port == ocelot->npi)
port = ocelot->num_phys_ports;
if (!vid) if (!vid)
vid = ocelot_vlan_unaware_pvid(ocelot, bridge); vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
...@@ -2954,9 +2943,6 @@ EXPORT_SYMBOL(ocelot_port_pre_bridge_flags); ...@@ -2954,9 +2943,6 @@ EXPORT_SYMBOL(ocelot_port_pre_bridge_flags);
void ocelot_port_bridge_flags(struct ocelot *ocelot, int port, void ocelot_port_bridge_flags(struct ocelot *ocelot, int port,
struct switchdev_brport_flags flags) struct switchdev_brport_flags flags)
{ {
if (port == ocelot->npi)
port = ocelot->num_phys_ports;
if (flags.mask & BR_LEARNING) if (flags.mask & BR_LEARNING)
ocelot_port_set_learning(ocelot, port, ocelot_port_set_learning(ocelot, port,
!!(flags.val & BR_LEARNING)); !!(flags.val & BR_LEARNING));
......
...@@ -579,6 +579,10 @@ static inline bool dsa_is_user_port(struct dsa_switch *ds, int p) ...@@ -579,6 +579,10 @@ static inline bool dsa_is_user_port(struct dsa_switch *ds, int p)
dsa_switch_for_each_port((_dp), (_ds)) \ dsa_switch_for_each_port((_dp), (_ds)) \
if (dsa_port_is_cpu((_dp))) if (dsa_port_is_cpu((_dp)))
#define dsa_switch_for_each_cpu_port_continue_reverse(_dp, _ds) \
dsa_switch_for_each_port_continue_reverse((_dp), (_ds)) \
if (dsa_port_is_cpu((_dp)))
static inline u32 dsa_user_ports(struct dsa_switch *ds) static inline u32 dsa_user_ports(struct dsa_switch *ds)
{ {
struct dsa_port *dp; struct dsa_port *dp;
...@@ -590,6 +594,17 @@ static inline u32 dsa_user_ports(struct dsa_switch *ds) ...@@ -590,6 +594,17 @@ static inline u32 dsa_user_ports(struct dsa_switch *ds)
return mask; return mask;
} }
static inline u32 dsa_cpu_ports(struct dsa_switch *ds)
{
struct dsa_port *cpu_dp;
u32 mask = 0;
dsa_switch_for_each_cpu_port(cpu_dp, ds)
mask |= BIT(cpu_dp->index);
return mask;
}
/* Return the local port used to reach an arbitrary switch device */ /* Return the local port used to reach an arbitrary switch device */
static inline unsigned int dsa_routing_port(struct dsa_switch *ds, int device) static inline unsigned int dsa_routing_port(struct dsa_switch *ds, int device)
{ {
...@@ -792,7 +807,7 @@ struct dsa_switch_ops { ...@@ -792,7 +807,7 @@ struct dsa_switch_ops {
enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds, enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds,
int port, int port,
enum dsa_tag_protocol mprot); enum dsa_tag_protocol mprot);
int (*change_tag_protocol)(struct dsa_switch *ds, int port, int (*change_tag_protocol)(struct dsa_switch *ds,
enum dsa_tag_protocol proto); enum dsa_tag_protocol proto);
/* /*
* Method for switch drivers to connect to the tagging protocol driver * Method for switch drivers to connect to the tagging protocol driver
...@@ -967,6 +982,8 @@ struct dsa_switch_ops { ...@@ -967,6 +982,8 @@ struct dsa_switch_ops {
int (*port_bridge_flags)(struct dsa_switch *ds, int port, int (*port_bridge_flags)(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags, struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack); struct netlink_ext_ack *extack);
void (*port_set_host_flood)(struct dsa_switch *ds, int port,
bool uc, bool mc);
/* /*
* VLAN support * VLAN support
......
...@@ -809,22 +809,18 @@ static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds) ...@@ -809,22 +809,18 @@ static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
{ {
const struct dsa_device_ops *tag_ops = ds->dst->tag_ops; const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
struct dsa_switch_tree *dst = ds->dst; struct dsa_switch_tree *dst = ds->dst;
struct dsa_port *cpu_dp;
int err; int err;
if (tag_ops->proto == dst->default_proto) if (tag_ops->proto == dst->default_proto)
goto connect; goto connect;
dsa_switch_for_each_cpu_port(cpu_dp, ds) { rtnl_lock();
rtnl_lock(); err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
err = ds->ops->change_tag_protocol(ds, cpu_dp->index, rtnl_unlock();
tag_ops->proto); if (err) {
rtnl_unlock(); dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
if (err) { tag_ops->name, ERR_PTR(err));
dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n", return err;
tag_ops->name, ERR_PTR(err));
return err;
}
} }
connect: connect:
......
...@@ -291,6 +291,7 @@ int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr); ...@@ -291,6 +291,7 @@ int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr);
void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr); void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr);
int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast); int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast);
void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast); void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast);
void dsa_port_set_host_flood(struct dsa_port *dp, bool uc, bool mc);
/* slave.c */ /* slave.c */
extern const struct dsa_device_ops notag_netdev_ops; extern const struct dsa_device_ops notag_netdev_ops;
......
...@@ -920,6 +920,14 @@ int dsa_port_bridge_flags(struct dsa_port *dp, ...@@ -920,6 +920,14 @@ int dsa_port_bridge_flags(struct dsa_port *dp,
return 0; return 0;
} }
void dsa_port_set_host_flood(struct dsa_port *dp, bool uc, bool mc)
{
struct dsa_switch *ds = dp->ds;
if (ds->ops->port_set_host_flood)
ds->ops->port_set_host_flood(ds, dp->index, uc, mc);
}
int dsa_port_vlan_msti(struct dsa_port *dp, int dsa_port_vlan_msti(struct dsa_port *dp,
const struct switchdev_vlan_msti *msti) const struct switchdev_vlan_msti *msti)
{ {
......
...@@ -262,37 +262,13 @@ static int dsa_slave_close(struct net_device *dev) ...@@ -262,37 +262,13 @@ static int dsa_slave_close(struct net_device *dev)
return 0; return 0;
} }
/* Keep flooding enabled towards this port's CPU port as long as it serves at static void dsa_slave_manage_host_flood(struct net_device *dev)
* least one port in the tree that requires it.
*/
static void dsa_port_manage_cpu_flood(struct dsa_port *dp)
{ {
struct switchdev_brport_flags flags = { bool mc = dev->flags & (IFF_PROMISC | IFF_ALLMULTI);
.mask = BR_FLOOD | BR_MCAST_FLOOD, struct dsa_port *dp = dsa_slave_to_port(dev);
}; bool uc = dev->flags & IFF_PROMISC;
struct dsa_switch_tree *dst = dp->ds->dst;
struct dsa_port *cpu_dp = dp->cpu_dp;
struct dsa_port *other_dp;
int err;
list_for_each_entry(other_dp, &dst->ports, list) {
if (!dsa_port_is_user(other_dp))
continue;
if (other_dp->cpu_dp != cpu_dp)
continue;
if (other_dp->slave->flags & IFF_ALLMULTI)
flags.val |= BR_MCAST_FLOOD;
if (other_dp->slave->flags & IFF_PROMISC)
flags.val |= BR_FLOOD | BR_MCAST_FLOOD;
}
err = dsa_port_pre_bridge_flags(dp, flags, NULL);
if (err)
return;
dsa_port_bridge_flags(cpu_dp, flags, NULL); dsa_port_set_host_flood(dp, uc, mc);
} }
static void dsa_slave_change_rx_flags(struct net_device *dev, int change) static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
...@@ -310,7 +286,7 @@ static void dsa_slave_change_rx_flags(struct net_device *dev, int change) ...@@ -310,7 +286,7 @@ static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
if (dsa_switch_supports_uc_filtering(ds) && if (dsa_switch_supports_uc_filtering(ds) &&
dsa_switch_supports_mc_filtering(ds)) dsa_switch_supports_mc_filtering(ds))
dsa_port_manage_cpu_flood(dp); dsa_slave_manage_host_flood(dev);
} }
static void dsa_slave_set_rx_mode(struct net_device *dev) static void dsa_slave_set_rx_mode(struct net_device *dev)
......
...@@ -809,14 +809,12 @@ static int dsa_switch_change_tag_proto(struct dsa_switch *ds, ...@@ -809,14 +809,12 @@ static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
ASSERT_RTNL(); ASSERT_RTNL();
dsa_switch_for_each_cpu_port(cpu_dp, ds) { err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
err = ds->ops->change_tag_protocol(ds, cpu_dp->index, if (err)
tag_ops->proto); return err;
if (err)
return err;
dsa_switch_for_each_cpu_port(cpu_dp, ds)
dsa_port_set_tag_protocol(cpu_dp, tag_ops); dsa_port_set_tag_protocol(cpu_dp, tag_ops);
}
/* Now that changing the tag protocol can no longer fail, let's update /* Now that changing the tag protocol can no longer fail, let's update
* the remaining bits which are "duplicated for faster access", and the * the remaining bits which are "duplicated for faster access", and the
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment