Commit d90a5215 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlxsw-ERSPAN-Take-LACP-state-into-consideration'

Ido Schimmel says:

====================
mlxsw: ERSPAN: Take LACP state into consideration

Petr says:

When offloading mirror-to-gretap, mlxsw needs to preroute the path that
the encapsulated packet will take. That path may include a LAG device
above a front panel port. So far, mlxsw resolved the path to the first
up front panel slave of the LAG interface, but that only reflects
administrative state of the port. It neglects to consider whether the
port actually has a carrier, and what the LACP state is. This patch set
aims to address these problems.

Patch #1 publishes team_port_get_rcu().

Then in patch #2, a new function is introduced,
mlxsw_sp_port_dev_check(). That returns, for a given netdevice that is a
slave of a LAG device, whether that device is "txable", i.e. whether the
LAG master would send traffic through it. Since there's no good place to
put LAG-wide helpers, introduce a new header include/net/lag.h.

Finally in patch #3, fix the slave selection logic to take into
consideration whether a given slave has a carrier and whether it is
txable.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 80fd2d6c b5de82f3
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <net/arp.h> #include <net/arp.h>
#include <net/gre.h> #include <net/gre.h>
#include <net/lag.h>
#include <net/ndisc.h> #include <net/ndisc.h>
#include <net/ip6_tunnel.h> #include <net/ip6_tunnel.h>
...@@ -254,7 +255,9 @@ mlxsw_sp_span_entry_lag(struct net_device *lag_dev) ...@@ -254,7 +255,9 @@ mlxsw_sp_span_entry_lag(struct net_device *lag_dev)
struct list_head *iter; struct list_head *iter;
netdev_for_each_lower_dev(lag_dev, dev, iter) netdev_for_each_lower_dev(lag_dev, dev, iter)
if ((dev->flags & IFF_UP) && mlxsw_sp_port_dev_check(dev)) if (netif_carrier_ok(dev) &&
net_lag_port_dev_txable(dev) &&
mlxsw_sp_port_dev_check(dev))
return dev; return dev;
return NULL; return NULL;
......
...@@ -41,11 +41,6 @@ ...@@ -41,11 +41,6 @@
#define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT) #define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
static struct team_port *team_port_get_rcu(const struct net_device *dev)
{
return rcu_dereference(dev->rx_handler_data);
}
static struct team_port *team_port_get_rtnl(const struct net_device *dev) static struct team_port *team_port_get_rtnl(const struct net_device *dev)
{ {
struct team_port *port = rtnl_dereference(dev->rx_handler_data); struct team_port *port = rtnl_dereference(dev->rx_handler_data);
......
...@@ -74,6 +74,11 @@ struct team_port { ...@@ -74,6 +74,11 @@ struct team_port {
long mode_priv[0]; long mode_priv[0];
}; };
static inline struct team_port *team_port_get_rcu(const struct net_device *dev)
{
return rcu_dereference(dev->rx_handler_data);
}
static inline bool team_port_enabled(struct team_port *port) static inline bool team_port_enabled(struct team_port *port)
{ {
return port->index != -1; return port->index != -1;
...@@ -84,6 +89,19 @@ static inline bool team_port_txable(struct team_port *port) ...@@ -84,6 +89,19 @@ static inline bool team_port_txable(struct team_port *port)
return port->linkup && team_port_enabled(port); return port->linkup && team_port_enabled(port);
} }
static inline bool team_port_dev_txable(const struct net_device *port_dev)
{
struct team_port *port;
bool txable;
rcu_read_lock();
port = team_port_get_rcu(port_dev);
txable = port ? team_port_txable(port) : false;
rcu_read_unlock();
return txable;
}
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
static inline void team_netpoll_send_skb(struct team_port *port, static inline void team_netpoll_send_skb(struct team_port *port,
struct sk_buff *skb) struct sk_buff *skb)
......
...@@ -411,6 +411,19 @@ static inline bool bond_slave_can_tx(struct slave *slave) ...@@ -411,6 +411,19 @@ static inline bool bond_slave_can_tx(struct slave *slave)
bond_is_active_slave(slave); bond_is_active_slave(slave);
} }
static inline bool bond_is_active_slave_dev(const struct net_device *slave_dev)
{
struct slave *slave;
bool active;
rcu_read_lock();
slave = bond_slave_get_rcu(slave_dev);
active = bond_is_active_slave(slave);
rcu_read_unlock();
return active;
}
static inline void bond_hw_addr_copy(u8 *dst, const u8 *src, unsigned int len) static inline void bond_hw_addr_copy(u8 *dst, const u8 *src, unsigned int len)
{ {
if (len == ETH_ALEN) { if (len == ETH_ALEN) {
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IF_LAG_H
#define _LINUX_IF_LAG_H
#include <linux/netdevice.h>
#include <linux/if_team.h>
#include <net/bonding.h>
static inline bool net_lag_port_dev_txable(const struct net_device *port_dev)
{
if (netif_is_team_port(port_dev))
return team_port_dev_txable(port_dev);
else
return bond_is_active_slave_dev(port_dev);
}
#endif /* _LINUX_IF_LAG_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment