Commit 5b60dadb authored by Tobias Waldekranz's avatar Tobias Waldekranz Committed by Jakub Kicinski

net: dsa: tag_dsa: Support reception of packets from LAG devices

Packets ingressing on a LAG that egress on the CPU port, which are not
classified as management, will have a FORWARD tag that does not
contain the normal source device/port tuple. Instead the trunk bit
will be set, and the port field holds the LAG id.

Since the exact source port information is not available in the tag,
frames are injected directly on the LAG interface and thus do never
pass through any DSA port interface on ingress.

Management frames (TO_CPU) are not affected and will pass through the
DSA port interface as usual.
Signed-off-by: default avatarTobias Waldekranz <tobias@waldekranz.com>
Reviewed-by: default avatarFlorian Fainelli <f.fainelli@gmail.com>
Reviewed-by: default avatarVladimir Oltean <olteanv@gmail.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 57e661aa
...@@ -219,11 +219,21 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev, ...@@ -219,11 +219,21 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
} }
skb = nskb; skb = nskb;
p = netdev_priv(skb->dev);
skb_push(skb, ETH_HLEN); skb_push(skb, ETH_HLEN);
skb->pkt_type = PACKET_HOST; skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, skb->dev); skb->protocol = eth_type_trans(skb, skb->dev);
if (unlikely(!dsa_slave_dev_check(skb->dev))) {
/* Packet is to be injected directly on an upper
* device, e.g. a team/bond, so skip all DSA-port
* specific actions.
*/
netif_rx(skb);
return 0;
}
p = netdev_priv(skb->dev);
if (unlikely(cpu_dp->ds->untag_bridge_pvid)) { if (unlikely(cpu_dp->ds->untag_bridge_pvid)) {
nskb = dsa_untag_bridge_pvid(skb); nskb = dsa_untag_bridge_pvid(skb);
if (!nskb) { if (!nskb) {
......
...@@ -163,6 +163,7 @@ static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev, ...@@ -163,6 +163,7 @@ static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev,
u8 extra) u8 extra)
{ {
int source_device, source_port; int source_device, source_port;
bool trunk = false;
enum dsa_code code; enum dsa_code code;
enum dsa_cmd cmd; enum dsa_cmd cmd;
u8 *dsa_header; u8 *dsa_header;
...@@ -174,6 +175,8 @@ static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev, ...@@ -174,6 +175,8 @@ static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev,
switch (cmd) { switch (cmd) {
case DSA_CMD_FORWARD: case DSA_CMD_FORWARD:
skb->offload_fwd_mark = 1; skb->offload_fwd_mark = 1;
trunk = !!(dsa_header[1] & 7);
break; break;
case DSA_CMD_TO_CPU: case DSA_CMD_TO_CPU:
...@@ -216,7 +219,19 @@ static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev, ...@@ -216,7 +219,19 @@ static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev,
source_device = dsa_header[0] & 0x1f; source_device = dsa_header[0] & 0x1f;
source_port = (dsa_header[1] >> 3) & 0x1f; source_port = (dsa_header[1] >> 3) & 0x1f;
skb->dev = dsa_master_find_slave(dev, source_device, source_port); if (trunk) {
struct dsa_port *cpu_dp = dev->dsa_ptr;
/* The exact source port is not available in the tag,
* so we inject the frame directly on the upper
* team/bond.
*/
skb->dev = dsa_lag_dev(cpu_dp->dst, source_port);
} else {
skb->dev = dsa_master_find_slave(dev, source_device,
source_port);
}
if (!skb->dev) if (!skb->dev)
return NULL; return NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment