Commit 7b68b836 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlxsw-Preparations-for-VxLAN-support'

Ido Schimmel says:

====================
mlxsw: Preparations for VxLAN support

This patchset prepares mlxsw for VxLAN support. It contains small and
mostly non-functional changes.

The first eight patches perform small changes in the code to make it
more receptive towards the actual VxLAN changes in the next patchset.

Patches 9-17 add the registers used to configure the device for VxLAN
offload.

Last two patches add the required resources and trap IDs.

The next patchset is available here [1].

1. https://github.com/idosch/linux/tree/vxlan
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents df92062e b02597d5
...@@ -295,6 +295,7 @@ enum mlxsw_reg_sfd_rec_type { ...@@ -295,6 +295,7 @@ enum mlxsw_reg_sfd_rec_type {
MLXSW_REG_SFD_REC_TYPE_UNICAST = 0x0, MLXSW_REG_SFD_REC_TYPE_UNICAST = 0x0,
MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG = 0x1, MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG = 0x1,
MLXSW_REG_SFD_REC_TYPE_MULTICAST = 0x2, MLXSW_REG_SFD_REC_TYPE_MULTICAST = 0x2,
MLXSW_REG_SFD_REC_TYPE_UNICAST_TUNNEL = 0xC,
}; };
/* reg_sfd_rec_type /* reg_sfd_rec_type
...@@ -525,6 +526,61 @@ mlxsw_reg_sfd_mc_pack(char *payload, int rec_index, ...@@ -525,6 +526,61 @@ mlxsw_reg_sfd_mc_pack(char *payload, int rec_index,
mlxsw_reg_sfd_mc_mid_set(payload, rec_index, mid); mlxsw_reg_sfd_mc_mid_set(payload, rec_index, mid);
} }
/* reg_sfd_uc_tunnel_uip_msb
* When protocol is IPv4, the most significant byte of the underlay IPv4
* destination IP.
* When protocol is IPv6, reserved.
* Access: RW
*/
MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_uip_msb, MLXSW_REG_SFD_BASE_LEN, 24,
8, MLXSW_REG_SFD_REC_LEN, 0x08, false);
/* reg_sfd_uc_tunnel_fid
* Filtering ID.
* Access: Index
*/
MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_fid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
MLXSW_REG_SFD_REC_LEN, 0x08, false);
enum mlxsw_reg_sfd_uc_tunnel_protocol {
MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4,
MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV6,
};
/* reg_sfd_uc_tunnel_protocol
* IP protocol.
* Access: RW
*/
MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_protocol, MLXSW_REG_SFD_BASE_LEN, 27,
1, MLXSW_REG_SFD_REC_LEN, 0x0C, false);
/* reg_sfd_uc_tunnel_uip_lsb
* When protocol is IPv4, the least significant bytes of the underlay
* IPv4 destination IP.
* When protocol is IPv6, pointer to the underlay IPv6 destination IP
* which is configured by RIPS.
* Access: RW
*/
MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_uip_lsb, MLXSW_REG_SFD_BASE_LEN, 0,
24, MLXSW_REG_SFD_REC_LEN, 0x0C, false);
static inline void
mlxsw_reg_sfd_uc_tunnel_pack(char *payload, int rec_index,
enum mlxsw_reg_sfd_rec_policy policy,
const char *mac, u16 fid,
enum mlxsw_reg_sfd_rec_action action, u32 uip,
enum mlxsw_reg_sfd_uc_tunnel_protocol proto)
{
mlxsw_reg_sfd_rec_pack(payload, rec_index,
MLXSW_REG_SFD_REC_TYPE_UNICAST_TUNNEL, mac,
action);
mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
mlxsw_reg_sfd_uc_tunnel_uip_msb_set(payload, rec_index, uip >> 24);
mlxsw_reg_sfd_uc_tunnel_uip_lsb_set(payload, rec_index, uip);
mlxsw_reg_sfd_uc_tunnel_fid_set(payload, rec_index, fid);
mlxsw_reg_sfd_uc_tunnel_protocol_set(payload, rec_index, proto);
}
/* SFN - Switch FDB Notification Register /* SFN - Switch FDB Notification Register
* ------------------------------------------- * -------------------------------------------
* The switch provides notifications on newly learned FDB entries and * The switch provides notifications on newly learned FDB entries and
...@@ -1069,6 +1125,8 @@ enum mlxsw_reg_sfdf_flush_type { ...@@ -1069,6 +1125,8 @@ enum mlxsw_reg_sfdf_flush_type {
MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID,
MLXSW_REG_SFDF_FLUSH_PER_LAG, MLXSW_REG_SFDF_FLUSH_PER_LAG,
MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID,
MLXSW_REG_SFDF_FLUSH_PER_NVE,
MLXSW_REG_SFDF_FLUSH_PER_NVE_AND_FID,
}; };
/* reg_sfdf_flush_type /* reg_sfdf_flush_type
...@@ -1079,6 +1137,10 @@ enum mlxsw_reg_sfdf_flush_type { ...@@ -1079,6 +1137,10 @@ enum mlxsw_reg_sfdf_flush_type {
* 3 - All FID dynamic entries pointing to port are flushed. * 3 - All FID dynamic entries pointing to port are flushed.
* 4 - All dynamic entries pointing to LAG are flushed. * 4 - All dynamic entries pointing to LAG are flushed.
* 5 - All FID dynamic entries pointing to LAG are flushed. * 5 - All FID dynamic entries pointing to LAG are flushed.
* 6 - All entries of type "Unicast Tunnel" or "Multicast Tunnel" are
* flushed.
* 7 - All entries of type "Unicast Tunnel" or "Multicast Tunnel" are
* flushed, per FID.
* Access: RW * Access: RW
*/ */
MLXSW_ITEM32(reg, sfdf, flush_type, 0x04, 28, 4); MLXSW_ITEM32(reg, sfdf, flush_type, 0x04, 28, 4);
...@@ -1315,12 +1377,19 @@ MLXSW_ITEM32(reg, slcr, type, 0x00, 0, 4); ...@@ -1315,12 +1377,19 @@ MLXSW_ITEM32(reg, slcr, type, 0x00, 0, 4);
*/ */
MLXSW_ITEM32(reg, slcr, lag_hash, 0x04, 0, 20); MLXSW_ITEM32(reg, slcr, lag_hash, 0x04, 0, 20);
static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash) /* reg_slcr_seed
* LAG seed value. The seed is the same for all ports.
* Access: RW
*/
MLXSW_ITEM32(reg, slcr, seed, 0x08, 0, 32);
static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash, u32 seed)
{ {
MLXSW_REG_ZERO(slcr, payload); MLXSW_REG_ZERO(slcr, payload);
mlxsw_reg_slcr_pp_set(payload, MLXSW_REG_SLCR_PP_GLOBAL); mlxsw_reg_slcr_pp_set(payload, MLXSW_REG_SLCR_PP_GLOBAL);
mlxsw_reg_slcr_type_set(payload, MLXSW_REG_SLCR_TYPE_CRC); mlxsw_reg_slcr_type_set(payload, MLXSW_REG_SLCR_TYPE_CRC);
mlxsw_reg_slcr_lag_hash_set(payload, lag_hash); mlxsw_reg_slcr_lag_hash_set(payload, lag_hash);
mlxsw_reg_slcr_seed_set(payload, seed);
} }
/* SLCOR - Switch LAG Collector Register /* SLCOR - Switch LAG Collector Register
...@@ -8279,6 +8348,508 @@ static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index, ...@@ -8279,6 +8348,508 @@ static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index,
mlxsw_reg_mgpc_opcode_set(payload, opcode); mlxsw_reg_mgpc_opcode_set(payload, opcode);
} }
/* MPRS - Monitoring Parsing State Register
* ----------------------------------------
* The MPRS register is used for setting up the parsing for hash,
* policy-engine and routing.
*/
#define MLXSW_REG_MPRS_ID 0x9083
#define MLXSW_REG_MPRS_LEN 0x14
MLXSW_REG_DEFINE(mprs, MLXSW_REG_MPRS_ID, MLXSW_REG_MPRS_LEN);
/* reg_mprs_parsing_depth
* Minimum parsing depth.
* Need to enlarge parsing depth according to L3, MPLS, tunnels, ACL
* rules, traps, hash, etc. Default is 96 bytes. Reserved when SwitchX-2.
* Access: RW
*/
MLXSW_ITEM32(reg, mprs, parsing_depth, 0x00, 0, 16);
/* reg_mprs_parsing_en
* Parsing enable.
* Bit 0 - Enable parsing of NVE of types VxLAN, VxLAN-GPE, GENEVE and
* NVGRE. Default is enabled. Reserved when SwitchX-2.
* Access: RW
*/
MLXSW_ITEM32(reg, mprs, parsing_en, 0x04, 0, 16);
/* reg_mprs_vxlan_udp_dport
* VxLAN UDP destination port.
* Used for identifying VxLAN packets and for dport field in
* encapsulation. Default is 4789.
* Access: RW
*/
MLXSW_ITEM32(reg, mprs, vxlan_udp_dport, 0x10, 0, 16);
static inline void mlxsw_reg_mprs_pack(char *payload, u16 parsing_depth,
u16 vxlan_udp_dport)
{
MLXSW_REG_ZERO(mprs, payload);
mlxsw_reg_mprs_parsing_depth_set(payload, parsing_depth);
mlxsw_reg_mprs_parsing_en_set(payload, true);
mlxsw_reg_mprs_vxlan_udp_dport_set(payload, vxlan_udp_dport);
}
/* TNGCR - Tunneling NVE General Configuration Register
* ----------------------------------------------------
* The TNGCR register is used for setting up the NVE Tunneling configuration.
*/
#define MLXSW_REG_TNGCR_ID 0xA001
#define MLXSW_REG_TNGCR_LEN 0x44
MLXSW_REG_DEFINE(tngcr, MLXSW_REG_TNGCR_ID, MLXSW_REG_TNGCR_LEN);
enum mlxsw_reg_tngcr_type {
MLXSW_REG_TNGCR_TYPE_VXLAN,
MLXSW_REG_TNGCR_TYPE_VXLAN_GPE,
MLXSW_REG_TNGCR_TYPE_GENEVE,
MLXSW_REG_TNGCR_TYPE_NVGRE,
};
/* reg_tngcr_type
* Tunnel type for encapsulation and decapsulation. The types are mutually
* exclusive.
* Note: For Spectrum the NVE parsing must be enabled in MPRS.
* Access: RW
*/
MLXSW_ITEM32(reg, tngcr, type, 0x00, 0, 4);
/* reg_tngcr_nve_valid
* The VTEP is valid. Allows adding FDB entries for tunnel encapsulation.
* Access: RW
*/
MLXSW_ITEM32(reg, tngcr, nve_valid, 0x04, 31, 1);
/* reg_tngcr_nve_ttl_uc
* The TTL for NVE tunnel encapsulation underlay unicast packets.
* Access: RW
*/
MLXSW_ITEM32(reg, tngcr, nve_ttl_uc, 0x04, 0, 8);
/* reg_tngcr_nve_ttl_mc
* The TTL for NVE tunnel encapsulation underlay multicast packets.
* Access: RW
*/
MLXSW_ITEM32(reg, tngcr, nve_ttl_mc, 0x08, 0, 8);
enum {
/* Do not copy flow label. Calculate flow label using nve_flh. */
MLXSW_REG_TNGCR_FL_NO_COPY,
/* Copy flow label from inner packet if packet is IPv6 and
* encapsulation is by IPv6. Otherwise, calculate flow label using
* nve_flh.
*/
MLXSW_REG_TNGCR_FL_COPY,
};
/* reg_tngcr_nve_flc
* For NVE tunnel encapsulation: Flow label copy from inner packet.
* Access: RW
*/
MLXSW_ITEM32(reg, tngcr, nve_flc, 0x0C, 25, 1);
enum {
/* Flow label is static. In Spectrum this means '0'. Spectrum-2
* uses {nve_fl_prefix, nve_fl_suffix}.
*/
MLXSW_REG_TNGCR_FL_NO_HASH,
/* 8 LSBs of the flow label are calculated from ECMP hash of the
* inner packet. 12 MSBs are configured by nve_fl_prefix.
*/
MLXSW_REG_TNGCR_FL_HASH,
};
/* reg_tngcr_nve_flh
* NVE flow label hash.
* Access: RW
*/
MLXSW_ITEM32(reg, tngcr, nve_flh, 0x0C, 24, 1);
/* reg_tngcr_nve_fl_prefix
* NVE flow label prefix. Constant 12 MSBs of the flow label.
* Access: RW
*/
MLXSW_ITEM32(reg, tngcr, nve_fl_prefix, 0x0C, 8, 12);
/* reg_tngcr_nve_fl_suffix
* NVE flow label suffix. Constant 8 LSBs of the flow label.
* Reserved when nve_flh=1 and for Spectrum.
* Access: RW
*/
MLXSW_ITEM32(reg, tngcr, nve_fl_suffix, 0x0C, 0, 8);
enum {
/* Source UDP port is fixed (default '0') */
MLXSW_REG_TNGCR_UDP_SPORT_NO_HASH,
/* Source UDP port is calculated based on hash */
MLXSW_REG_TNGCR_UDP_SPORT_HASH,
};
/* reg_tngcr_nve_udp_sport_type
* NVE UDP source port type.
* Spectrum uses LAG hash (SLCRv2). Spectrum-2 uses ECMP hash (RECRv2).
* When the source UDP port is calculated based on hash, then the 8 LSBs
* are calculated from hash the 8 MSBs are configured by
* nve_udp_sport_prefix.
* Access: RW
*/
MLXSW_ITEM32(reg, tngcr, nve_udp_sport_type, 0x10, 24, 1);
/* reg_tngcr_nve_udp_sport_prefix
* NVE UDP source port prefix. Constant 8 MSBs of the UDP source port.
* Reserved when NVE type is NVGRE.
* Access: RW
*/
MLXSW_ITEM32(reg, tngcr, nve_udp_sport_prefix, 0x10, 8, 8);
/* reg_tngcr_nve_group_size_mc
* The amount of sequential linked lists of MC entries. The first linked
* list is configured by SFD.underlay_mc_ptr.
* Valid values: 1, 2, 4, 8, 16, 32, 64
* The linked list are configured by TNUMT.
* The hash is set by LAG hash.
* Access: RW
*/
MLXSW_ITEM32(reg, tngcr, nve_group_size_mc, 0x18, 0, 8);
/* reg_tngcr_nve_group_size_flood
* The amount of sequential linked lists of flooding entries. The first
* linked list is configured by SFMR.nve_tunnel_flood_ptr
* Valid values: 1, 2, 4, 8, 16, 32, 64
* The linked list are configured by TNUMT.
* The hash is set by LAG hash.
* Access: RW
*/
MLXSW_ITEM32(reg, tngcr, nve_group_size_flood, 0x1C, 0, 8);
/* reg_tngcr_learn_enable
* During decapsulation, whether to learn from NVE port.
* Reserved when Spectrum-2. See TNPC.
* Access: RW
*/
MLXSW_ITEM32(reg, tngcr, learn_enable, 0x20, 31, 1);
/* reg_tngcr_underlay_virtual_router
* Underlay virtual router.
* Reserved when Spectrum-2.
* Access: RW
*/
MLXSW_ITEM32(reg, tngcr, underlay_virtual_router, 0x20, 0, 16);
/* reg_tngcr_underlay_rif
* Underlay ingress router interface. RIF type should be loopback generic.
* Reserved when Spectrum.
* Access: RW
*/
MLXSW_ITEM32(reg, tngcr, underlay_rif, 0x24, 0, 16);
/* reg_tngcr_usipv4
* Underlay source IPv4 address of the NVE.
* Access: RW
*/
MLXSW_ITEM32(reg, tngcr, usipv4, 0x28, 0, 32);
/* reg_tngcr_usipv6
* Underlay source IPv6 address of the NVE. For Spectrum, must not be
* modified under traffic of NVE tunneling encapsulation.
* Access: RW
*/
MLXSW_ITEM_BUF(reg, tngcr, usipv6, 0x30, 16);
static inline void mlxsw_reg_tngcr_pack(char *payload,
enum mlxsw_reg_tngcr_type type,
bool valid, u8 ttl)
{
MLXSW_REG_ZERO(tngcr, payload);
mlxsw_reg_tngcr_type_set(payload, type);
mlxsw_reg_tngcr_nve_valid_set(payload, valid);
mlxsw_reg_tngcr_nve_ttl_uc_set(payload, ttl);
mlxsw_reg_tngcr_nve_ttl_mc_set(payload, ttl);
mlxsw_reg_tngcr_nve_flc_set(payload, MLXSW_REG_TNGCR_FL_NO_COPY);
mlxsw_reg_tngcr_nve_flh_set(payload, 0);
mlxsw_reg_tngcr_nve_udp_sport_type_set(payload,
MLXSW_REG_TNGCR_UDP_SPORT_HASH);
mlxsw_reg_tngcr_nve_udp_sport_prefix_set(payload, 0);
mlxsw_reg_tngcr_nve_group_size_mc_set(payload, 1);
mlxsw_reg_tngcr_nve_group_size_flood_set(payload, 1);
}
/* TNUMT - Tunneling NVE Underlay Multicast Table Register
* -------------------------------------------------------
* The TNUMT register is for building the underlay MC table. It is used
* for MC, flooding and BC traffic into the NVE tunnel.
*/
#define MLXSW_REG_TNUMT_ID 0xA003
#define MLXSW_REG_TNUMT_LEN 0x20
MLXSW_REG_DEFINE(tnumt, MLXSW_REG_TNUMT_ID, MLXSW_REG_TNUMT_LEN);
enum mlxsw_reg_tnumt_record_type {
MLXSW_REG_TNUMT_RECORD_TYPE_IPV4,
MLXSW_REG_TNUMT_RECORD_TYPE_IPV6,
MLXSW_REG_TNUMT_RECORD_TYPE_LABEL,
};
/* reg_tnumt_record_type
* Record type.
* Access: RW
*/
MLXSW_ITEM32(reg, tnumt, record_type, 0x00, 28, 4);
enum mlxsw_reg_tnumt_tunnel_port {
MLXSW_REG_TNUMT_TUNNEL_PORT_NVE,
MLXSW_REG_TNUMT_TUNNEL_PORT_VPLS,
MLXSW_REG_TNUMT_TUNNEL_FLEX_TUNNEL0,
MLXSW_REG_TNUMT_TUNNEL_FLEX_TUNNEL1,
};
/* reg_tnumt_tunnel_port
* Tunnel port.
* Access: RW
*/
MLXSW_ITEM32(reg, tnumt, tunnel_port, 0x00, 24, 4);
/* reg_tnumt_underlay_mc_ptr
* Index to the underlay multicast table.
* For Spectrum the index is to the KVD linear.
* Access: Index
*/
MLXSW_ITEM32(reg, tnumt, underlay_mc_ptr, 0x00, 0, 24);
/* reg_tnumt_vnext
* The next_underlay_mc_ptr is valid.
* Access: RW
*/
MLXSW_ITEM32(reg, tnumt, vnext, 0x04, 31, 1);
/* reg_tnumt_next_underlay_mc_ptr
* The next index to the underlay multicast table.
* Access: RW
*/
MLXSW_ITEM32(reg, tnumt, next_underlay_mc_ptr, 0x04, 0, 24);
/* reg_tnumt_record_size
* Number of IP addresses in the record.
* Range is 1..cap_max_nve_mc_entries_ipv{4,6}
* Access: RW
*/
MLXSW_ITEM32(reg, tnumt, record_size, 0x08, 0, 3);
/* reg_tnumt_udip
* The underlay IPv4 addresses. udip[i] is reserved if i >= size
* Access: RW
*/
MLXSW_ITEM32_INDEXED(reg, tnumt, udip, 0x0C, 0, 32, 0x04, 0x00, false);
/* reg_tnumt_udip_ptr
* The pointer to the underlay IPv6 addresses. udip_ptr[i] is reserved if
* i >= size. The IPv6 addresses are configured by RIPS.
* Access: RW
*/
MLXSW_ITEM32_INDEXED(reg, tnumt, udip_ptr, 0x0C, 0, 24, 0x04, 0x00, false);
static inline void mlxsw_reg_tnumt_pack(char *payload,
enum mlxsw_reg_tnumt_record_type type,
enum mlxsw_reg_tnumt_tunnel_port tport,
u32 underlay_mc_ptr, bool vnext,
u32 next_underlay_mc_ptr,
u8 record_size)
{
MLXSW_REG_ZERO(tnumt, payload);
mlxsw_reg_tnumt_record_type_set(payload, type);
mlxsw_reg_tnumt_tunnel_port_set(payload, tport);
mlxsw_reg_tnumt_underlay_mc_ptr_set(payload, underlay_mc_ptr);
mlxsw_reg_tnumt_vnext_set(payload, vnext);
mlxsw_reg_tnumt_next_underlay_mc_ptr_set(payload, next_underlay_mc_ptr);
mlxsw_reg_tnumt_record_size_set(payload, record_size);
}
/* TNQCR - Tunneling NVE QoS Configuration Register
* ------------------------------------------------
* The TNQCR register configures how QoS is set in encapsulation into the
* underlay network.
*/
#define MLXSW_REG_TNQCR_ID 0xA010
#define MLXSW_REG_TNQCR_LEN 0x0C
MLXSW_REG_DEFINE(tnqcr, MLXSW_REG_TNQCR_ID, MLXSW_REG_TNQCR_LEN);
/* reg_tnqcr_enc_set_dscp
* For encapsulation: How to set DSCP field:
* 0 - Copy the DSCP from the overlay (inner) IP header to the underlay
* (outer) IP header. If there is no IP header, use TNQDR.dscp
* 1 - Set the DSCP field as TNQDR.dscp
* Access: RW
*/
MLXSW_ITEM32(reg, tnqcr, enc_set_dscp, 0x04, 28, 1);
static inline void mlxsw_reg_tnqcr_pack(char *payload)
{
MLXSW_REG_ZERO(tnqcr, payload);
mlxsw_reg_tnqcr_enc_set_dscp_set(payload, 0);
}
/* TNQDR - Tunneling NVE QoS Default Register
* ------------------------------------------
* The TNQDR register configures the default QoS settings for NVE
* encapsulation.
*/
#define MLXSW_REG_TNQDR_ID 0xA011
#define MLXSW_REG_TNQDR_LEN 0x08
MLXSW_REG_DEFINE(tnqdr, MLXSW_REG_TNQDR_ID, MLXSW_REG_TNQDR_LEN);
/* reg_tnqdr_local_port
* Local port number (receive port). CPU port is supported.
* Access: Index
*/
MLXSW_ITEM32(reg, tnqdr, local_port, 0x00, 16, 8);
/* reg_tnqdr_dscp
* For encapsulation, the default DSCP.
* Access: RW
*/
MLXSW_ITEM32(reg, tnqdr, dscp, 0x04, 0, 6);
static inline void mlxsw_reg_tnqdr_pack(char *payload, u8 local_port)
{
MLXSW_REG_ZERO(tnqdr, payload);
mlxsw_reg_tnqdr_local_port_set(payload, local_port);
mlxsw_reg_tnqdr_dscp_set(payload, 0);
}
/* TNEEM - Tunneling NVE Encapsulation ECN Mapping Register
* --------------------------------------------------------
* The TNEEM register maps ECN of the IP header at the ingress to the
* encapsulation to the ECN of the underlay network.
*/
#define MLXSW_REG_TNEEM_ID 0xA012
#define MLXSW_REG_TNEEM_LEN 0x0C
MLXSW_REG_DEFINE(tneem, MLXSW_REG_TNEEM_ID, MLXSW_REG_TNEEM_LEN);
/* reg_tneem_overlay_ecn
* ECN of the IP header in the overlay network.
* Access: Index
*/
MLXSW_ITEM32(reg, tneem, overlay_ecn, 0x04, 24, 2);
/* reg_tneem_underlay_ecn
* ECN of the IP header in the underlay network.
* Access: RW
*/
MLXSW_ITEM32(reg, tneem, underlay_ecn, 0x04, 16, 2);
static inline void mlxsw_reg_tneem_pack(char *payload, u8 overlay_ecn,
u8 underlay_ecn)
{
MLXSW_REG_ZERO(tneem, payload);
mlxsw_reg_tneem_overlay_ecn_set(payload, overlay_ecn);
mlxsw_reg_tneem_underlay_ecn_set(payload, underlay_ecn);
}
/* TNDEM - Tunneling NVE Decapsulation ECN Mapping Register
* --------------------------------------------------------
* The TNDEM register configures the actions that are done in the
* decapsulation.
*/
#define MLXSW_REG_TNDEM_ID 0xA013
#define MLXSW_REG_TNDEM_LEN 0x0C
MLXSW_REG_DEFINE(tndem, MLXSW_REG_TNDEM_ID, MLXSW_REG_TNDEM_LEN);
/* reg_tndem_underlay_ecn
* ECN field of the IP header in the underlay network.
* Access: Index
*/
MLXSW_ITEM32(reg, tndem, underlay_ecn, 0x04, 24, 2);
/* reg_tndem_overlay_ecn
* ECN field of the IP header in the overlay network.
* Access: Index
*/
MLXSW_ITEM32(reg, tndem, overlay_ecn, 0x04, 16, 2);
/* reg_tndem_eip_ecn
* Egress IP ECN. ECN field of the IP header of the packet which goes out
* from the decapsulation.
* Access: RW
*/
MLXSW_ITEM32(reg, tndem, eip_ecn, 0x04, 8, 2);
/* reg_tndem_trap_en
* Trap enable:
* 0 - No trap due to decap ECN
* 1 - Trap enable with trap_id
* Access: RW
*/
MLXSW_ITEM32(reg, tndem, trap_en, 0x08, 28, 4);
/* reg_tndem_trap_id
* Trap ID. Either DECAP_ECN0 or DECAP_ECN1.
* Reserved when trap_en is '0'.
* Access: RW
*/
MLXSW_ITEM32(reg, tndem, trap_id, 0x08, 0, 9);
static inline void mlxsw_reg_tndem_pack(char *payload, u8 underlay_ecn,
u8 overlay_ecn, u8 ecn, bool trap_en,
u16 trap_id)
{
MLXSW_REG_ZERO(tndem, payload);
mlxsw_reg_tndem_underlay_ecn_set(payload, underlay_ecn);
mlxsw_reg_tndem_overlay_ecn_set(payload, overlay_ecn);
mlxsw_reg_tndem_eip_ecn_set(payload, ecn);
mlxsw_reg_tndem_trap_en_set(payload, trap_en);
mlxsw_reg_tndem_trap_id_set(payload, trap_id);
}
/* TNPC - Tunnel Port Configuration Register
* -----------------------------------------
* The TNPC register is used for tunnel port configuration.
* Reserved when Spectrum.
*/
#define MLXSW_REG_TNPC_ID 0xA020
#define MLXSW_REG_TNPC_LEN 0x18
MLXSW_REG_DEFINE(tnpc, MLXSW_REG_TNPC_ID, MLXSW_REG_TNPC_LEN);
enum mlxsw_reg_tnpc_tunnel_port {
MLXSW_REG_TNPC_TUNNEL_PORT_NVE,
MLXSW_REG_TNPC_TUNNEL_PORT_VPLS,
MLXSW_REG_TNPC_TUNNEL_FLEX_TUNNEL0,
MLXSW_REG_TNPC_TUNNEL_FLEX_TUNNEL1,
};
/* reg_tnpc_tunnel_port
* Tunnel port.
* Access: Index
*/
MLXSW_ITEM32(reg, tnpc, tunnel_port, 0x00, 0, 4);
/* reg_tnpc_learn_enable_v6
* During IPv6 underlay decapsulation, whether to learn from tunnel port.
* Access: RW
*/
MLXSW_ITEM32(reg, tnpc, learn_enable_v6, 0x04, 1, 1);
/* reg_tnpc_learn_enable_v4
* During IPv4 underlay decapsulation, whether to learn from tunnel port.
* Access: RW
*/
MLXSW_ITEM32(reg, tnpc, learn_enable_v4, 0x04, 0, 1);
static inline void mlxsw_reg_tnpc_pack(char *payload,
enum mlxsw_reg_tnpc_tunnel_port tport,
bool learn_enable)
{
MLXSW_REG_ZERO(tnpc, payload);
mlxsw_reg_tnpc_tunnel_port_set(payload, tport);
mlxsw_reg_tnpc_learn_enable_v4_set(payload, learn_enable);
mlxsw_reg_tnpc_learn_enable_v6_set(payload, learn_enable);
}
/* TIGCR - Tunneling IPinIP General Configuration Register /* TIGCR - Tunneling IPinIP General Configuration Register
* ------------------------------------------------------- * -------------------------------------------------------
* The TIGCR register is used for setting up the IPinIP Tunnel configuration. * The TIGCR register is used for setting up the IPinIP Tunnel configuration.
...@@ -8828,6 +9399,14 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { ...@@ -8828,6 +9399,14 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mcc), MLXSW_REG(mcc),
MLXSW_REG(mcda), MLXSW_REG(mcda),
MLXSW_REG(mgpc), MLXSW_REG(mgpc),
MLXSW_REG(mprs),
MLXSW_REG(tngcr),
MLXSW_REG(tnumt),
MLXSW_REG(tnqcr),
MLXSW_REG(tnqdr),
MLXSW_REG(tneem),
MLXSW_REG(tndem),
MLXSW_REG(tnpc),
MLXSW_REG(tigcr), MLXSW_REG(tigcr),
MLXSW_REG(sbpr), MLXSW_REG(sbpr),
MLXSW_REG(sbcm), MLXSW_REG(sbcm),
......
...@@ -46,6 +46,8 @@ enum mlxsw_res_id { ...@@ -46,6 +46,8 @@ enum mlxsw_res_id {
MLXSW_RES_ID_MAX_RIFS, MLXSW_RES_ID_MAX_RIFS,
MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES, MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES,
MLXSW_RES_ID_MAX_LPM_TREES, MLXSW_RES_ID_MAX_LPM_TREES,
MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV4,
MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV6,
/* Internal resources. /* Internal resources.
* Determined by the SW, not queried from the HW. * Determined by the SW, not queried from the HW.
...@@ -96,6 +98,8 @@ static u16 mlxsw_res_ids[] = { ...@@ -96,6 +98,8 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_MAX_RIFS] = 0x2C02, [MLXSW_RES_ID_MAX_RIFS] = 0x2C02,
[MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES] = 0x2C10, [MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES] = 0x2C10,
[MLXSW_RES_ID_MAX_LPM_TREES] = 0x2C30, [MLXSW_RES_ID_MAX_LPM_TREES] = 0x2C30,
[MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV4] = 0x2E02,
[MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV6] = 0x2E03,
}; };
struct mlxsw_res { struct mlxsw_res {
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/dcbnl.h> #include <linux/dcbnl.h>
#include <linux/inetdevice.h> #include <linux/inetdevice.h>
#include <linux/netlink.h> #include <linux/netlink.h>
#include <linux/random.h>
#include <net/switchdev.h> #include <net/switchdev.h>
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
#include <net/tc_act/tc_mirred.h> #include <net/tc_act/tc_mirred.h>
...@@ -3469,6 +3470,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { ...@@ -3469,6 +3470,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
/* PKT Sample trap */ /* PKT Sample trap */
...@@ -3482,6 +3484,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { ...@@ -3482,6 +3484,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false),
MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
/* NVE traps */
MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false),
}; };
static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
...@@ -3666,8 +3670,10 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) ...@@ -3666,8 +3670,10 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
{ {
char slcr_pl[MLXSW_REG_SLCR_LEN]; char slcr_pl[MLXSW_REG_SLCR_LEN];
u32 seed;
int err; int err;
get_random_bytes(&seed, sizeof(seed));
mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
MLXSW_REG_SLCR_LAG_HASH_DMAC | MLXSW_REG_SLCR_LAG_HASH_DMAC |
MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
...@@ -3676,7 +3682,7 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) ...@@ -3676,7 +3682,7 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
MLXSW_REG_SLCR_LAG_HASH_DIP | MLXSW_REG_SLCR_LAG_HASH_DIP |
MLXSW_REG_SLCR_LAG_HASH_SPORT | MLXSW_REG_SLCR_LAG_HASH_SPORT |
MLXSW_REG_SLCR_LAG_HASH_DPORT | MLXSW_REG_SLCR_LAG_HASH_DPORT |
MLXSW_REG_SLCR_LAG_HASH_IPPROTO); MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
if (err) if (err)
return err; return err;
......
...@@ -383,6 +383,17 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port) ...@@ -383,6 +383,17 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
#endif #endif
/* spectrum_router.c */ /* spectrum_router.c */
enum mlxsw_sp_l3proto {
MLXSW_SP_L3_PROTO_IPV4,
MLXSW_SP_L3_PROTO_IPV6,
#define MLXSW_SP_L3_PROTO_MAX (MLXSW_SP_L3_PROTO_IPV6 + 1)
};
union mlxsw_sp_l3addr {
__be32 addr4;
struct in6_addr addr6;
};
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_netdevice_router_port_event(struct net_device *dev); int mlxsw_sp_netdevice_router_port_event(struct net_device *dev);
...@@ -416,6 +427,10 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); ...@@ -416,6 +427,10 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp, void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev); struct net_device *dev);
struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev);
u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif);
/* spectrum_kvdl.c */ /* spectrum_kvdl.c */
enum mlxsw_sp_kvdl_entry_type { enum mlxsw_sp_kvdl_entry_type {
...@@ -423,6 +438,7 @@ enum mlxsw_sp_kvdl_entry_type { ...@@ -423,6 +438,7 @@ enum mlxsw_sp_kvdl_entry_type {
MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
MLXSW_SP_KVDL_ENTRY_TYPE_PBS, MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT,
}; };
static inline unsigned int static inline unsigned int
...@@ -433,6 +449,7 @@ mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type) ...@@ -433,6 +449,7 @@ mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type)
case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET: /* fall through */ case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET: /* fall through */
case MLXSW_SP_KVDL_ENTRY_TYPE_PBS: /* fall through */ case MLXSW_SP_KVDL_ENTRY_TYPE_PBS: /* fall through */
case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR: /* fall through */ case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR: /* fall through */
case MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT: /* fall through */
default: default:
return 1; return 1;
} }
......
...@@ -35,6 +35,7 @@ static const struct mlxsw_sp2_kvdl_part_info mlxsw_sp2_kvdl_parts_info[] = { ...@@ -35,6 +35,7 @@ static const struct mlxsw_sp2_kvdl_part_info mlxsw_sp2_kvdl_parts_info[] = {
MAX_KVD_ACTION_SETS), MAX_KVD_ACTION_SETS),
MLXSW_SP2_KVDL_PART_INFO(PBS, 0x24, KVD_SIZE, KVD_SIZE), MLXSW_SP2_KVDL_PART_INFO(PBS, 0x24, KVD_SIZE, KVD_SIZE),
MLXSW_SP2_KVDL_PART_INFO(MCRIGR, 0x26, KVD_SIZE, KVD_SIZE), MLXSW_SP2_KVDL_PART_INFO(MCRIGR, 0x26, KVD_SIZE, KVD_SIZE),
MLXSW_SP2_KVDL_PART_INFO(TNUMT, 0x29, KVD_SIZE, KVD_SIZE),
}; };
#define MLXSW_SP2_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp2_kvdl_parts_info) #define MLXSW_SP2_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp2_kvdl_parts_info)
......
...@@ -7,17 +7,6 @@ ...@@ -7,17 +7,6 @@
#include "spectrum.h" #include "spectrum.h"
#include "reg.h" #include "reg.h"
enum mlxsw_sp_l3proto {
MLXSW_SP_L3_PROTO_IPV4,
MLXSW_SP_L3_PROTO_IPV6,
#define MLXSW_SP_L3_PROTO_MAX (MLXSW_SP_L3_PROTO_IPV6 + 1)
};
union mlxsw_sp_l3addr {
__be32 addr4;
struct in6_addr addr6;
};
struct mlxsw_sp_rif_ipip_lb; struct mlxsw_sp_rif_ipip_lb;
struct mlxsw_sp_rif_ipip_lb_config { struct mlxsw_sp_rif_ipip_lb_config {
enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt; enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
...@@ -35,8 +24,6 @@ struct mlxsw_sp_neigh_entry; ...@@ -35,8 +24,6 @@ struct mlxsw_sp_neigh_entry;
struct mlxsw_sp_nexthop; struct mlxsw_sp_nexthop;
struct mlxsw_sp_ipip_entry; struct mlxsw_sp_ipip_entry;
struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev);
struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
u16 rif_index); u16 rif_index);
u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif); u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
...@@ -44,9 +31,7 @@ u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif); ...@@ -44,9 +31,7 @@ u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif); u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif);
u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev); u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev);
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif); int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif); const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif);
struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif, struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir, enum mlxsw_sp_rif_counter_dir dir,
......
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#include <net/switchdev.h> #include <net/switchdev.h>
#include "spectrum_span.h" #include "spectrum_span.h"
#include "spectrum_router.h"
#include "spectrum_switchdev.h" #include "spectrum_switchdev.h"
#include "spectrum.h" #include "spectrum.h"
#include "core.h" #include "core.h"
...@@ -2289,7 +2288,7 @@ struct mlxsw_sp_switchdev_event_work { ...@@ -2289,7 +2288,7 @@ struct mlxsw_sp_switchdev_event_work {
unsigned long event; unsigned long event;
}; };
static void mlxsw_sp_switchdev_event_work(struct work_struct *work) static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
{ {
struct mlxsw_sp_switchdev_event_work *switchdev_work = struct mlxsw_sp_switchdev_event_work *switchdev_work =
container_of(work, struct mlxsw_sp_switchdev_event_work, work); container_of(work, struct mlxsw_sp_switchdev_event_work, work);
...@@ -2344,16 +2343,23 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, ...@@ -2344,16 +2343,23 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
{ {
struct net_device *dev = switchdev_notifier_info_to_dev(ptr); struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
struct mlxsw_sp_switchdev_event_work *switchdev_work; struct mlxsw_sp_switchdev_event_work *switchdev_work;
struct switchdev_notifier_fdb_info *fdb_info = ptr; struct switchdev_notifier_fdb_info *fdb_info;
struct switchdev_notifier_info *info = ptr;
struct net_device *br_dev;
if (!mlxsw_sp_port_dev_lower_find_rcu(dev)) /* Tunnel devices are not our uppers, so check their master instead */
br_dev = netdev_master_upper_dev_get_rcu(dev);
if (!br_dev)
return NOTIFY_DONE;
if (!netif_is_bridge_master(br_dev))
return NOTIFY_DONE;
if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
return NOTIFY_DONE; return NOTIFY_DONE;
switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
if (!switchdev_work) if (!switchdev_work)
return NOTIFY_BAD; return NOTIFY_BAD;
INIT_WORK(&switchdev_work->work, mlxsw_sp_switchdev_event_work);
switchdev_work->dev = dev; switchdev_work->dev = dev;
switchdev_work->event = event; switchdev_work->event = event;
...@@ -2362,6 +2368,11 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, ...@@ -2362,6 +2368,11 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */ case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */
case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */ case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
case SWITCHDEV_FDB_DEL_TO_BRIDGE: case SWITCHDEV_FDB_DEL_TO_BRIDGE:
fdb_info = container_of(info,
struct switchdev_notifier_fdb_info,
info);
INIT_WORK(&switchdev_work->work,
mlxsw_sp_switchdev_bridge_fdb_event_work);
memcpy(&switchdev_work->fdb_info, ptr, memcpy(&switchdev_work->fdb_info, ptr,
sizeof(switchdev_work->fdb_info)); sizeof(switchdev_work->fdb_info));
switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
......
...@@ -24,6 +24,7 @@ enum { ...@@ -24,6 +24,7 @@ enum {
MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34, MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
MLXSW_TRAP_ID_PKT_SAMPLE = 0x38, MLXSW_TRAP_ID_PKT_SAMPLE = 0x38,
MLXSW_TRAP_ID_FID_MISS = 0x3D, MLXSW_TRAP_ID_FID_MISS = 0x3D,
MLXSW_TRAP_ID_DECAP_ECN0 = 0x40,
MLXSW_TRAP_ID_ARPBC = 0x50, MLXSW_TRAP_ID_ARPBC = 0x50,
MLXSW_TRAP_ID_ARPUC = 0x51, MLXSW_TRAP_ID_ARPUC = 0x51,
MLXSW_TRAP_ID_MTUERROR = 0x52, MLXSW_TRAP_ID_MTUERROR = 0x52,
...@@ -59,6 +60,7 @@ enum { ...@@ -59,6 +60,7 @@ enum {
MLXSW_TRAP_ID_IPV6_MC_LINK_LOCAL_DEST = 0x91, MLXSW_TRAP_ID_IPV6_MC_LINK_LOCAL_DEST = 0x91,
MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92, MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92,
MLXSW_TRAP_ID_IPIP_DECAP_ERROR = 0xB1, MLXSW_TRAP_ID_IPIP_DECAP_ERROR = 0xB1,
MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6, MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7, MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
MLXSW_TRAP_ID_ACL0 = 0x1C0, MLXSW_TRAP_ID_ACL0 = 0x1C0,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment