Commit 0fac6aa0 authored by Vladimir Oltean's avatar Vladimir Oltean Committed by David S. Miller

net: dsa: sja1105: delete the best_effort_vlan_filtering mode

Simply put, the best-effort VLAN filtering mode relied on VLAN retagging
from a bridge VLAN towards a tag_8021q sub-VLAN in order to be able to
decode the source port in the tagger, but the VLAN retagging
implementation inside the sja1105 chips is not the best and we were
relying on marginal operating conditions.

The most notable limitation of the best-effort VLAN filtering mode is
its incapacity to treat this case properly:

ip link add br0 type bridge vlan_filtering 1
ip link set swp2 master br0
ip link set swp4 master br0
bridge vlan del dev swp4 vid 1
bridge vlan add dev swp4 vid 1 pvid

When sending an untagged packet through swp2, the expectation is for it
to be forwarded to swp4 as egress-tagged (so it will contain VLAN ID 1
on egress). But the switch will send it as egress-untagged.

There was an attempt to fix this here:
https://patchwork.kernel.org/project/netdevbpf/patch/20210407201452.1703261-2-olteanv@gmail.com/

but it failed miserably because it broke PTP RX timestamping, in a way
that cannot be corrected due to hardware issues related to VLAN
retagging.

So with either PTP broken or pushing VLAN headers on egress for untagged
packets being broken, the sad reality is that the best-effort VLAN
filtering code is broken. Delete it.

Note that this means there will be a temporary loss of functionality in
this driver until it is replaced with something better (network stack
RX/TX capability for "mode 2" as described in
Documentation/networking/dsa/sja1105.rst, the "port under VLAN-aware
bridge" case). We simply cannot keep this code until that driver rework
is done, it is super bloated and tangled with tag_8021q.
Signed-off-by: default avatarVladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c18e9405
......@@ -234,19 +234,13 @@ struct sja1105_bridge_vlan {
bool untagged;
};
enum sja1105_vlan_state {
SJA1105_VLAN_UNAWARE,
SJA1105_VLAN_BEST_EFFORT,
SJA1105_VLAN_FILTERING_FULL,
};
struct sja1105_private {
struct sja1105_static_config static_config;
bool rgmii_rx_delay[SJA1105_MAX_NUM_PORTS];
bool rgmii_tx_delay[SJA1105_MAX_NUM_PORTS];
phy_interface_t phy_mode[SJA1105_MAX_NUM_PORTS];
bool fixed_link[SJA1105_MAX_NUM_PORTS];
bool best_effort_vlan_filtering;
bool vlan_aware;
unsigned long learn_ena;
unsigned long ucast_egress_floods;
unsigned long bcast_egress_floods;
......@@ -264,7 +258,6 @@ struct sja1105_private {
*/
struct mutex mgmt_lock;
struct dsa_8021q_context *dsa_8021q_ctx;
enum sja1105_vlan_state vlan_state;
struct devlink_region **regions;
struct sja1105_cbs_entry *cbs;
struct mii_bus *mdio_base_t1;
......@@ -311,10 +304,6 @@ int sja1110_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val);
/* From sja1105_devlink.c */
int sja1105_devlink_setup(struct dsa_switch *ds);
void sja1105_devlink_teardown(struct dsa_switch *ds);
int sja1105_devlink_param_get(struct dsa_switch *ds, u32 id,
struct devlink_param_gset_ctx *ctx);
int sja1105_devlink_param_set(struct dsa_switch *ds, u32 id,
struct devlink_param_gset_ctx *ctx);
int sja1105_devlink_info_get(struct dsa_switch *ds,
struct devlink_info_req *req,
struct netlink_ext_ack *extack);
......
......@@ -115,105 +115,6 @@ static void sja1105_teardown_devlink_regions(struct dsa_switch *ds)
kfree(priv->regions);
}
static int sja1105_best_effort_vlan_filtering_get(struct sja1105_private *priv,
bool *be_vlan)
{
*be_vlan = priv->best_effort_vlan_filtering;
return 0;
}
static int sja1105_best_effort_vlan_filtering_set(struct sja1105_private *priv,
bool be_vlan)
{
struct dsa_switch *ds = priv->ds;
bool vlan_filtering;
int port;
int rc;
priv->best_effort_vlan_filtering = be_vlan;
rtnl_lock();
for (port = 0; port < ds->num_ports; port++) {
struct dsa_port *dp;
if (!dsa_is_user_port(ds, port))
continue;
dp = dsa_to_port(ds, port);
vlan_filtering = dsa_port_is_vlan_filtering(dp);
rc = sja1105_vlan_filtering(ds, port, vlan_filtering, NULL);
if (rc)
break;
}
rtnl_unlock();
return rc;
}
enum sja1105_devlink_param_id {
SJA1105_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING,
};
int sja1105_devlink_param_get(struct dsa_switch *ds, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct sja1105_private *priv = ds->priv;
int err;
switch (id) {
case SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING:
err = sja1105_best_effort_vlan_filtering_get(priv,
&ctx->val.vbool);
break;
default:
err = -EOPNOTSUPP;
break;
}
return err;
}
int sja1105_devlink_param_set(struct dsa_switch *ds, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct sja1105_private *priv = ds->priv;
int err;
switch (id) {
case SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING:
err = sja1105_best_effort_vlan_filtering_set(priv,
ctx->val.vbool);
break;
default:
err = -EOPNOTSUPP;
break;
}
return err;
}
static const struct devlink_param sja1105_devlink_params[] = {
DSA_DEVLINK_PARAM_DRIVER(SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING,
"best_effort_vlan_filtering",
DEVLINK_PARAM_TYPE_BOOL,
BIT(DEVLINK_PARAM_CMODE_RUNTIME)),
};
static int sja1105_setup_devlink_params(struct dsa_switch *ds)
{
return dsa_devlink_params_register(ds, sja1105_devlink_params,
ARRAY_SIZE(sja1105_devlink_params));
}
static void sja1105_teardown_devlink_params(struct dsa_switch *ds)
{
dsa_devlink_params_unregister(ds, sja1105_devlink_params,
ARRAY_SIZE(sja1105_devlink_params));
}
int sja1105_devlink_info_get(struct dsa_switch *ds,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
......@@ -233,23 +134,10 @@ int sja1105_devlink_info_get(struct dsa_switch *ds,
int sja1105_devlink_setup(struct dsa_switch *ds)
{
int rc;
rc = sja1105_setup_devlink_params(ds);
if (rc)
return rc;
rc = sja1105_setup_devlink_regions(ds);
if (rc < 0) {
sja1105_teardown_devlink_params(ds);
return rc;
}
return 0;
return sja1105_setup_devlink_regions(ds);
}
void sja1105_devlink_teardown(struct dsa_switch *ds)
{
sja1105_teardown_devlink_params(ds);
sja1105_teardown_devlink_regions(ds);
}
This diff is collapsed.
......@@ -496,14 +496,11 @@ int sja1105_vl_redirect(struct sja1105_private *priv, int port,
struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
int rc;
if (priv->vlan_state == SJA1105_VLAN_UNAWARE &&
key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
if (!priv->vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only redirect based on DMAC");
return -EOPNOTSUPP;
} else if ((priv->vlan_state == SJA1105_VLAN_BEST_EFFORT ||
priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) &&
key->type != SJA1105_KEY_VLAN_AWARE_VL) {
} else if (priv->vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only redirect based on {DMAC, VID, PCP}");
return -EOPNOTSUPP;
......@@ -595,14 +592,11 @@ int sja1105_vl_gate(struct sja1105_private *priv, int port,
return -ERANGE;
}
if (priv->vlan_state == SJA1105_VLAN_UNAWARE &&
key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
if (!priv->vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only gate based on DMAC");
return -EOPNOTSUPP;
} else if ((priv->vlan_state == SJA1105_VLAN_BEST_EFFORT ||
priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) &&
key->type != SJA1105_KEY_VLAN_AWARE_VL) {
} else if (priv->vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only gate based on {DMAC, VID, PCP}");
return -EOPNOTSUPP;
......
......@@ -35,8 +35,6 @@ struct dsa_8021q_context {
__be16 proto;
};
#define DSA_8021Q_N_SUBVLAN 8
int dsa_8021q_setup(struct dsa_8021q_context *ctx, bool enabled);
int dsa_8021q_crosschip_bridge_join(struct dsa_8021q_context *ctx, int port,
......@@ -50,21 +48,16 @@ int dsa_8021q_crosschip_bridge_leave(struct dsa_8021q_context *ctx, int port,
struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
u16 tpid, u16 tci);
void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id,
int *subvlan);
void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id);
u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port);
u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port);
u16 dsa_8021q_rx_vid_subvlan(struct dsa_switch *ds, int port, u16 subvlan);
int dsa_8021q_rx_switch_id(u16 vid);
int dsa_8021q_rx_source_port(u16 vid);
u16 dsa_8021q_rx_subvlan(u16 vid);
bool vid_is_dsa_8021q_rxvlan(u16 vid);
bool vid_is_dsa_8021q_txvlan(u16 vid);
......
......@@ -59,7 +59,6 @@ struct sja1105_skb_cb {
((struct sja1105_skb_cb *)((skb)->cb))
struct sja1105_port {
u16 subvlan_map[DSA_8021Q_N_SUBVLAN];
struct kthread_worker *xmit_worker;
struct kthread_work xmit_work;
struct sk_buff_head xmit_queue;
......
......@@ -17,7 +17,7 @@
*
* | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
* +-----------+-----+-----------------+-----------+-----------------------+
* | DIR | SVL | SWITCH_ID | SUBVLAN | PORT |
* | DIR | RSV | SWITCH_ID | RSV | PORT |
* +-----------+-----+-----------------+-----------+-----------------------+
*
* DIR - VID[11:10]:
......@@ -27,24 +27,13 @@
* These values make the special VIDs of 0, 1 and 4095 to be left
* unused by this coding scheme.
*
* SVL/SUBVLAN - { VID[9], VID[5:4] }:
* Sub-VLAN encoding. Valid only when DIR indicates an RX VLAN.
* * 0 (0b000): Field does not encode a sub-VLAN, either because
* received traffic is untagged, PVID-tagged or because a second
* VLAN tag is present after this tag and not inside of it.
* * 1 (0b001): Received traffic is tagged with a VID value private
* to the host. This field encodes the index in the host's lookup
* table through which the value of the ingress VLAN ID can be
* recovered.
* * 2 (0b010): Field encodes a sub-VLAN.
* ...
* * 7 (0b111): Field encodes a sub-VLAN.
* When DIR indicates a TX VLAN, SUBVLAN must be transmitted as zero
* (by the host) and ignored on receive (by the switch).
*
* SWITCH_ID - VID[8:6]:
* Index of switch within DSA tree. Must be between 0 and 7.
*
* RSV - VID[5:4]:
* To be used for further expansion of PORT or for other purposes.
* Must be transmitted as zero and ignored on receive.
*
* PORT - VID[3:0]:
* Index of switch port. Must be between 0 and 15.
*/
......@@ -61,18 +50,6 @@
#define DSA_8021Q_SWITCH_ID(x) (((x) << DSA_8021Q_SWITCH_ID_SHIFT) & \
DSA_8021Q_SWITCH_ID_MASK)
#define DSA_8021Q_SUBVLAN_HI_SHIFT 9
#define DSA_8021Q_SUBVLAN_HI_MASK GENMASK(9, 9)
#define DSA_8021Q_SUBVLAN_LO_SHIFT 4
#define DSA_8021Q_SUBVLAN_LO_MASK GENMASK(5, 4)
#define DSA_8021Q_SUBVLAN_HI(x) (((x) & GENMASK(2, 2)) >> 2)
#define DSA_8021Q_SUBVLAN_LO(x) ((x) & GENMASK(1, 0))
#define DSA_8021Q_SUBVLAN(x) \
(((DSA_8021Q_SUBVLAN_LO(x) << DSA_8021Q_SUBVLAN_LO_SHIFT) & \
DSA_8021Q_SUBVLAN_LO_MASK) | \
((DSA_8021Q_SUBVLAN_HI(x) << DSA_8021Q_SUBVLAN_HI_SHIFT) & \
DSA_8021Q_SUBVLAN_HI_MASK))
#define DSA_8021Q_PORT_SHIFT 0
#define DSA_8021Q_PORT_MASK GENMASK(3, 0)
#define DSA_8021Q_PORT(x) (((x) << DSA_8021Q_PORT_SHIFT) & \
......@@ -98,13 +75,6 @@ u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port)
}
EXPORT_SYMBOL_GPL(dsa_8021q_rx_vid);
u16 dsa_8021q_rx_vid_subvlan(struct dsa_switch *ds, int port, u16 subvlan)
{
return DSA_8021Q_DIR_RX | DSA_8021Q_SWITCH_ID(ds->index) |
DSA_8021Q_PORT(port) | DSA_8021Q_SUBVLAN(subvlan);
}
EXPORT_SYMBOL_GPL(dsa_8021q_rx_vid_subvlan);
/* Returns the decoded switch ID from the RX VID. */
int dsa_8021q_rx_switch_id(u16 vid)
{
......@@ -119,20 +89,6 @@ int dsa_8021q_rx_source_port(u16 vid)
}
EXPORT_SYMBOL_GPL(dsa_8021q_rx_source_port);
/* Returns the decoded subvlan from the RX VID. */
u16 dsa_8021q_rx_subvlan(u16 vid)
{
u16 svl_hi, svl_lo;
svl_hi = (vid & DSA_8021Q_SUBVLAN_HI_MASK) >>
DSA_8021Q_SUBVLAN_HI_SHIFT;
svl_lo = (vid & DSA_8021Q_SUBVLAN_LO_MASK) >>
DSA_8021Q_SUBVLAN_LO_SHIFT;
return (svl_hi << 2) | svl_lo;
}
EXPORT_SYMBOL_GPL(dsa_8021q_rx_subvlan);
bool vid_is_dsa_8021q_rxvlan(u16 vid)
{
return (vid & DSA_8021Q_DIR_MASK) == DSA_8021Q_DIR_RX;
......@@ -227,7 +183,7 @@ static int dsa_8021q_setup_port(struct dsa_8021q_context *ctx, int port,
u16 rx_vid = dsa_8021q_rx_vid(ctx->ds, port);
u16 tx_vid = dsa_8021q_tx_vid(ctx->ds, port);
struct net_device *master;
int i, err, subvlan;
int i, err;
/* The CPU port is implicitly configured by
* configuring the front-panel ports
......@@ -275,18 +231,11 @@ static int dsa_8021q_setup_port(struct dsa_8021q_context *ctx, int port,
return err;
}
/* Add to the master's RX filter not only @rx_vid, but in fact
* the entire subvlan range, just in case this DSA switch might
* want to use sub-VLANs.
*/
for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) {
u16 vid = dsa_8021q_rx_vid_subvlan(ctx->ds, port, subvlan);
/* Add @rx_vid to the master's RX filter. */
if (enabled)
vlan_vid_add(master, ctx->proto, vid);
vlan_vid_add(master, ctx->proto, rx_vid);
else
vlan_vid_del(master, ctx->proto, vid);
}
vlan_vid_del(master, ctx->proto, rx_vid);
/* Finally apply the TX VID on this port and on the CPU port */
err = dsa_8021q_vid_apply(ctx, port, tx_vid, BRIDGE_VLAN_INFO_UNTAGGED,
......@@ -471,8 +420,7 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
}
EXPORT_SYMBOL_GPL(dsa_8021q_xmit);
void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id,
int *subvlan)
void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id)
{
u16 vid, tci;
......@@ -489,7 +437,6 @@ void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id,
*source_port = dsa_8021q_rx_source_port(vid);
*switch_id = dsa_8021q_rx_switch_id(vid);
*subvlan = dsa_8021q_rx_subvlan(vid);
skb->priority = (tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
}
EXPORT_SYMBOL_GPL(dsa_8021q_rcv);
......
......@@ -41,9 +41,9 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
struct net_device *netdev,
struct packet_type *pt)
{
int src_port, switch_id, subvlan;
int src_port, switch_id;
dsa_8021q_rcv(skb, &src_port, &switch_id, &subvlan);
dsa_8021q_rcv(skb, &src_port, &switch_id);
skb->dev = dsa_master_find_slave(netdev, switch_id, src_port);
if (!skb->dev)
......
......@@ -358,20 +358,6 @@ static struct sk_buff
return skb;
}
static void sja1105_decode_subvlan(struct sk_buff *skb, u16 subvlan)
{
struct dsa_port *dp = dsa_slave_to_port(skb->dev);
struct sja1105_port *sp = dp->priv;
u16 vid = sp->subvlan_map[subvlan];
u16 vlan_tci;
if (vid == VLAN_N_VID)
return;
vlan_tci = (skb->priority << VLAN_PRIO_SHIFT) | vid;
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
}
static bool sja1105_skb_has_tag_8021q(const struct sk_buff *skb)
{
u16 tpid = ntohs(eth_hdr(skb)->h_proto);
......@@ -389,8 +375,8 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
struct net_device *netdev,
struct packet_type *pt)
{
int source_port, switch_id, subvlan = 0;
struct sja1105_meta meta = {0};
int source_port, switch_id;
struct ethhdr *hdr;
bool is_link_local;
bool is_meta;
......@@ -403,7 +389,7 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
if (sja1105_skb_has_tag_8021q(skb)) {
/* Normal traffic path. */
dsa_8021q_rcv(skb, &source_port, &switch_id, &subvlan);
dsa_8021q_rcv(skb, &source_port, &switch_id);
} else if (is_link_local) {
/* Management traffic path. Switch embeds the switch ID and
* port ID into bytes of the destination MAC, courtesy of
......@@ -428,9 +414,6 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
return NULL;
}
if (subvlan)
sja1105_decode_subvlan(skb, subvlan);
return sja1105_rcv_meta_state_machine(skb, &meta, is_link_local,
is_meta);
}
......@@ -538,7 +521,7 @@ static struct sk_buff *sja1110_rcv(struct sk_buff *skb,
struct net_device *netdev,
struct packet_type *pt)
{
int source_port = -1, switch_id = -1, subvlan = 0;
int source_port = -1, switch_id = -1;
skb->offload_fwd_mark = 1;
......@@ -551,7 +534,7 @@ static struct sk_buff *sja1110_rcv(struct sk_buff *skb,
/* Packets with in-band control extensions might still have RX VLANs */
if (likely(sja1105_skb_has_tag_8021q(skb)))
dsa_8021q_rcv(skb, &source_port, &switch_id, &subvlan);
dsa_8021q_rcv(skb, &source_port, &switch_id);
skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
if (!skb->dev) {
......@@ -561,9 +544,6 @@ static struct sk_buff *sja1110_rcv(struct sk_buff *skb,
return NULL;
}
if (subvlan)
sja1105_decode_subvlan(skb, subvlan);
return skb;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment