Commit 798661c7 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlxsw-unified-bridge-conversion-part-6'

Ido Schimmel says:

====================
mlxsw: Unified bridge conversion - part 6/6

This is the sixth and final part of the conversion of mlxsw to the
unified bridge model. It transitions the last bits of functionality that
were under firmware's responsibility in the legacy model to the driver.
The last patches flip the driver to the unified bridge model and clean
up code that was used to make the conversion easier to review.

Patchset overview:

Patch #1 sets the egress VID for known unicast packets. For multicast
packets, the egress VID is configured using the MPE table. See commit
8c2da081 ("mlxsw: spectrum_fid: Configure egress VID classification
for multicast").

Patch #2 configures the VNI to FID classification that is used during
decapsulation.

Patch #3 configures ingress router interface (RIF) in FID classification
records, so that when a packet reaches the router block, its ingress RIF
is known. Care is taken to configure this in all the different flows
(e.g., RIF set on a FID, {Port, VID} joins a FID that already has a RIF
etc.).

Patch #4 configures the egress VID for routed packets. For such packets,
the egress VID is not set by the MPE table or by an FDB record at the
egress bridge, but instead by a dedicated table that maps {Egress RIF,
Egress port} to a VID.

Patch #5 removes VID configuration from RIF creation as in the unified
bridge model firmware no longer needs it.

Patch #6 sets the egress FID to use in RIF configuration so that the
device knows using which FID to bridge the packet after routing.

Patches #7-#9 add a new 802.1Q family and associated VLAN RIFs. In the
unified bridge model, we no longer need to emulate 802.1Q FIDs using
802.1D FIDs as VNI can be associated with both.

Patches #10-#11 finally flip the driver to the unified bridge model.

Patches #12-#13 clean up code that was used to make the conversion
easier to review.

v2:
* Fix build failure [1] in patch #1.

[1] https://lore.kernel.org/netdev/20220630201709.6e66a1bb@kernel.org/
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d0bf1fe6 88840d69
......@@ -633,6 +633,12 @@ MLXSW_ITEM32(cmd_mbox, config_profile,
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1);
/* cmd_mbox_config_set_ubridge
* Capability bit. Setting a bit to 1 configures the profile
* according to the mailbox contents.
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_ubridge, 0x0C, 22, 1);
/* cmd_mbox_config_set_kvd_linear_size
* Capability bit. Setting a bit to 1 configures the profile
* according to the mailbox contents.
......@@ -792,6 +798,13 @@ MLXSW_ITEM32(cmd_mbox, config_profile, adaptive_routing_group_cap, 0x4C, 0, 16);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1);
/* cmd_mbox_config_profile_ubridge
* Unified Bridge
* 0 - non unified bridge
* 1 - unified bridge
*/
MLXSW_ITEM32(cmd_mbox, config_profile, ubridge, 0x50, 4, 1);
/* cmd_mbox_config_kvd_linear_size
* KVD Linear Size
* Valid for Spectrum only
......
......@@ -295,6 +295,7 @@ struct mlxsw_config_profile {
used_max_pkey:1,
used_ar_sec:1,
used_adaptive_routing_group_cap:1,
used_ubridge:1,
used_kvd_sizes:1;
u8 max_vepa_channels;
u16 max_mid;
......@@ -314,6 +315,7 @@ struct mlxsw_config_profile {
u8 ar_sec;
u16 adaptive_routing_group_cap;
u8 arn;
u8 ubridge;
u32 kvd_linear_size;
u8 kvd_hash_single_parts;
u8 kvd_hash_double_parts;
......
......@@ -1235,6 +1235,11 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
mbox, profile->adaptive_routing_group_cap);
}
if (profile->used_ubridge) {
mlxsw_cmd_mbox_config_profile_set_ubridge_set(mbox, 1);
mlxsw_cmd_mbox_config_profile_ubridge_set(mbox,
profile->ubridge);
}
if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) {
err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
if (err)
......
......@@ -380,7 +380,7 @@ static inline void mlxsw_reg_sfd_rec_pack(char *payload, int rec_index,
static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
enum mlxsw_reg_sfd_rec_policy policy,
const char *mac, u16 fid_vid,
const char *mac, u16 fid_vid, u16 vid,
enum mlxsw_reg_sfd_rec_action action,
u16 local_port)
{
......@@ -389,6 +389,8 @@ static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
mlxsw_reg_sfd_uc_sub_port_set(payload, rec_index, 0);
mlxsw_reg_sfd_uc_fid_vid_set(payload, rec_index, fid_vid);
mlxsw_reg_sfd_uc_set_vid_set(payload, rec_index, vid ? true : false);
mlxsw_reg_sfd_uc_vid_set(payload, rec_index, vid);
mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port);
}
......@@ -454,6 +456,7 @@ mlxsw_reg_sfd_uc_lag_pack(char *payload, int rec_index,
mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
mlxsw_reg_sfd_uc_lag_sub_port_set(payload, rec_index, 0);
mlxsw_reg_sfd_uc_lag_fid_vid_set(payload, rec_index, fid_vid);
mlxsw_reg_sfd_uc_lag_set_vid_set(payload, rec_index, true);
mlxsw_reg_sfd_uc_lag_lag_vid_set(payload, rec_index, lag_vid);
mlxsw_reg_sfd_uc_lag_lag_id_set(payload, rec_index, lag_id);
}
......@@ -1655,40 +1658,43 @@ MLXSW_ITEM32(reg, svfa, irif, 0x14, 0, 16);
static inline void __mlxsw_reg_svfa_pack(char *payload,
enum mlxsw_reg_svfa_mt mt, bool valid,
u16 fid)
u16 fid, bool irif_v, u16 irif)
{
MLXSW_REG_ZERO(svfa, payload);
mlxsw_reg_svfa_swid_set(payload, 0);
mlxsw_reg_svfa_mapping_table_set(payload, mt);
mlxsw_reg_svfa_v_set(payload, valid);
mlxsw_reg_svfa_fid_set(payload, fid);
mlxsw_reg_svfa_irif_v_set(payload, irif_v);
mlxsw_reg_svfa_irif_set(payload, irif_v ? irif : 0);
}
static inline void mlxsw_reg_svfa_port_vid_pack(char *payload, u16 local_port,
bool valid, u16 fid, u16 vid)
bool valid, u16 fid, u16 vid,
bool irif_v, u16 irif)
{
enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
__mlxsw_reg_svfa_pack(payload, mt, valid, fid);
__mlxsw_reg_svfa_pack(payload, mt, valid, fid, irif_v, irif);
mlxsw_reg_svfa_local_port_set(payload, local_port);
mlxsw_reg_svfa_vid_set(payload, vid);
}
static inline void mlxsw_reg_svfa_vid_pack(char *payload, bool valid, u16 fid,
u16 vid)
u16 vid, bool irif_v, u16 irif)
{
enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
__mlxsw_reg_svfa_pack(payload, mt, valid, fid);
__mlxsw_reg_svfa_pack(payload, mt, valid, fid, irif_v, irif);
mlxsw_reg_svfa_vid_set(payload, vid);
}
static inline void mlxsw_reg_svfa_vni_pack(char *payload, bool valid, u16 fid,
u32 vni)
u32 vni, bool irif_v, u16 irif)
{
enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VNI_TO_FID;
__mlxsw_reg_svfa_pack(payload, mt, valid, fid);
__mlxsw_reg_svfa_pack(payload, mt, valid, fid, irif_v, irif);
mlxsw_reg_svfa_vni_set(payload, vni);
}
......@@ -1963,7 +1969,8 @@ MLXSW_ITEM32(reg, sfmr, smpe, 0x28, 0, 16);
static inline void mlxsw_reg_sfmr_pack(char *payload,
enum mlxsw_reg_sfmr_op op, u16 fid,
u16 fid_offset, bool flood_rsp,
enum mlxsw_reg_bridge_type bridge_type)
enum mlxsw_reg_bridge_type bridge_type,
bool smpe_valid, u16 smpe)
{
MLXSW_REG_ZERO(sfmr, payload);
mlxsw_reg_sfmr_op_set(payload, op);
......@@ -1973,6 +1980,8 @@ static inline void mlxsw_reg_sfmr_pack(char *payload,
mlxsw_reg_sfmr_vv_set(payload, false);
mlxsw_reg_sfmr_flood_rsp_set(payload, flood_rsp);
mlxsw_reg_sfmr_flood_bridge_type_set(payload, bridge_type);
mlxsw_reg_sfmr_smpe_valid_set(payload, smpe_valid);
mlxsw_reg_sfmr_smpe_set(payload, smpe);
}
/* SPVMLR - Switch Port VLAN MAC Learning Register
......@@ -7107,10 +7116,11 @@ static inline void mlxsw_reg_ritr_rif_pack(char *payload, u16 rif)
}
static inline void mlxsw_reg_ritr_sp_if_pack(char *payload, bool lag,
u16 system_port, u16 vid)
u16 system_port, u16 efid, u16 vid)
{
mlxsw_reg_ritr_sp_if_lag_set(payload, lag);
mlxsw_reg_ritr_sp_if_system_port_set(payload, system_port);
mlxsw_reg_ritr_sp_if_efid_set(payload, efid);
mlxsw_reg_ritr_sp_if_vid_set(payload, vid);
}
......
......@@ -3161,7 +3161,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_ports_create;
}
mlxsw_sp->ubridge = false;
return 0;
err_ports_create:
......@@ -3383,24 +3382,15 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_parsing_fini(mlxsw_sp);
}
/* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated
* 802.1Q FIDs
*/
#define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \
VLAN_VID_MASK - 1)
static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
.used_max_mid = 1,
.max_mid = MLXSW_SP_MID_MAX,
.used_flood_tables = 1,
.used_flood_mode = 1,
.flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_MIXED,
.max_fid_flood_tables = 3,
.fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
.used_flood_mode = 1,
.flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
.used_max_ib_mc = 1,
.max_ib_mc = 0,
.used_max_pkey = 1,
.max_pkey = 0,
.used_ubridge = 1,
.ubridge = 1,
.used_kvd_sizes = 1,
.kvd_hash_single_parts = 59,
.kvd_hash_double_parts = 41,
......@@ -3414,17 +3404,14 @@ static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
};
static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
.used_max_mid = 1,
.max_mid = MLXSW_SP_MID_MAX,
.used_flood_tables = 1,
.used_flood_mode = 1,
.flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_MIXED,
.max_fid_flood_tables = 3,
.fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
.used_flood_mode = 1,
.flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
.used_max_ib_mc = 1,
.max_ib_mc = 0,
.used_max_pkey = 1,
.max_pkey = 0,
.used_ubridge = 1,
.ubridge = 1,
.swid_config = {
{
.used_type = 1,
......
......@@ -84,7 +84,7 @@ struct mlxsw_sp_upper {
enum mlxsw_sp_rif_type {
MLXSW_SP_RIF_TYPE_SUBPORT,
MLXSW_SP_RIF_TYPE_VLAN_EMU,
MLXSW_SP_RIF_TYPE_VLAN,
MLXSW_SP_RIF_TYPE_FID,
MLXSW_SP_RIF_TYPE_IPIP_LB, /* IP-in-IP loopback. */
MLXSW_SP_RIF_TYPE_MAX,
......@@ -208,7 +208,6 @@ struct mlxsw_sp {
u32 lowest_shaper_bs;
struct rhashtable ipv6_addr_ht;
struct mutex ipv6_addr_ht_lock; /* Protects ipv6_addr_ht */
bool ubridge;
struct mlxsw_sp_pgt *pgt;
bool pgt_smpe_index_valid;
};
......@@ -737,6 +736,7 @@ union mlxsw_sp_l3addr {
struct in6_addr addr6;
};
u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
struct netlink_ext_ack *extack);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
......@@ -1285,7 +1285,8 @@ void mlxsw_sp_fid_port_vid_unmap(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
u16 mlxsw_sp_fid_index(const struct mlxsw_sp_fid *fid);
enum mlxsw_sp_fid_type mlxsw_sp_fid_type(const struct mlxsw_sp_fid *fid);
void mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif);
int mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif);
void mlxsw_sp_fid_rif_unset(struct mlxsw_sp_fid *fid);
struct mlxsw_sp_rif *mlxsw_sp_fid_rif(const struct mlxsw_sp_fid *fid);
enum mlxsw_sp_rif_type
mlxsw_sp_fid_type_rif_type(const struct mlxsw_sp *mlxsw_sp,
......@@ -1476,7 +1477,6 @@ void mlxsw_sp_pgt_mid_free_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base,
u16 count);
int mlxsw_sp_pgt_entry_port_set(struct mlxsw_sp *mlxsw_sp, u16 mid,
u16 smpe, u16 local_port, bool member);
u16 mlxsw_sp_pgt_index_to_mid(const struct mlxsw_sp *mlxsw_sp, u16 pgt_index);
int mlxsw_sp_pgt_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_pgt_fini(struct mlxsw_sp *mlxsw_sp);
......
......@@ -83,7 +83,6 @@ struct mlxsw_sp_fid_ops {
u16 *p_fid_index);
bool (*compare)(const struct mlxsw_sp_fid *fid,
const void *arg);
u16 (*flood_index)(const struct mlxsw_sp_fid *fid);
int (*port_vid_map)(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *port, u16 vid);
void (*port_vid_unmap)(struct mlxsw_sp_fid *fid,
......@@ -94,6 +93,8 @@ struct mlxsw_sp_fid_ops {
void (*nve_flood_index_clear)(struct mlxsw_sp_fid *fid);
void (*fdb_clear_offload)(const struct mlxsw_sp_fid *fid,
const struct net_device *nve_dev);
int (*vid_to_fid_rif_update)(const struct mlxsw_sp_fid *fid,
const struct mlxsw_sp_rif *rif);
};
struct mlxsw_sp_fid_family {
......@@ -111,6 +112,7 @@ struct mlxsw_sp_fid_family {
bool flood_rsp;
enum mlxsw_reg_bridge_type bridge_type;
u16 pgt_base;
bool smpe_index_valid;
};
static const int mlxsw_sp_sfgc_uc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
......@@ -345,39 +347,20 @@ int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
bool member)
{
struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
const struct mlxsw_sp_flood_table *flood_table;
char *sftr2_pl;
u16 mid_index;
int err;
if (WARN_ON(!fid_family->flood_tables || !ops->flood_index))
if (WARN_ON(!fid_family->flood_tables))
return -EINVAL;
flood_table = mlxsw_sp_fid_flood_table_lookup(fid, packet_type);
if (!flood_table)
return -ESRCH;
if (fid_family->mlxsw_sp->ubridge) {
mid_index = mlxsw_sp_fid_flood_table_mid(fid_family,
flood_table,
fid->fid_offset);
return mlxsw_sp_pgt_entry_port_set(fid_family->mlxsw_sp,
mid_index, fid->fid_index,
local_port, member);
}
sftr2_pl = kmalloc(MLXSW_REG_SFTR2_LEN, GFP_KERNEL);
if (!sftr2_pl)
return -ENOMEM;
mlxsw_reg_sftr2_pack(sftr2_pl, flood_table->table_index,
ops->flood_index(fid), flood_table->table_type, 1,
local_port, member);
err = mlxsw_reg_write(fid_family->mlxsw_sp->core, MLXSW_REG(sftr2),
sftr2_pl);
kfree(sftr2_pl);
return err;
mid_index = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table,
fid->fid_offset);
return mlxsw_sp_pgt_entry_port_set(fid_family->mlxsw_sp, mid_index,
fid->fid_index, local_port, member);
}
int mlxsw_sp_fid_port_vid_map(struct mlxsw_sp_fid *fid,
......@@ -404,11 +387,6 @@ enum mlxsw_sp_fid_type mlxsw_sp_fid_type(const struct mlxsw_sp_fid *fid)
return fid->fid_family->type;
}
void mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif)
{
fid->rif = rif;
}
struct mlxsw_sp_rif *mlxsw_sp_fid_rif(const struct mlxsw_sp_fid *fid)
{
return fid->rif;
......@@ -439,7 +417,7 @@ static void mlxsw_sp_fid_8021q_setup(struct mlxsw_sp_fid *fid, const void *arg)
u16 vid = *(u16 *) arg;
mlxsw_sp_fid_8021q_fid(fid)->vid = vid;
fid->fid_offset = 0;
fid->fid_offset = fid->fid_index - fid->fid_family->start_index;
}
static enum mlxsw_reg_sfmr_op mlxsw_sp_sfmr_op(bool valid)
......@@ -451,50 +429,338 @@ static enum mlxsw_reg_sfmr_op mlxsw_sp_sfmr_op(bool valid)
static int mlxsw_sp_fid_op(const struct mlxsw_sp_fid *fid, bool valid)
{
struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
enum mlxsw_reg_bridge_type bridge_type = 0;
char sfmr_pl[MLXSW_REG_SFMR_LEN];
bool flood_rsp = false;
u16 smpe;
if (mlxsw_sp->ubridge) {
flood_rsp = fid->fid_family->flood_rsp;
bridge_type = fid->fid_family->bridge_type;
}
smpe = fid->fid_family->smpe_index_valid ? fid->fid_index : 0;
mlxsw_reg_sfmr_pack(sfmr_pl, mlxsw_sp_sfmr_op(valid), fid->fid_index,
fid->fid_offset, flood_rsp, bridge_type);
fid->fid_offset, fid->fid_family->flood_rsp,
fid->fid_family->bridge_type,
fid->fid_family->smpe_index_valid, smpe);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
}
static int mlxsw_sp_fid_edit_op(const struct mlxsw_sp_fid *fid)
static int mlxsw_sp_fid_edit_op(const struct mlxsw_sp_fid *fid,
const struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
enum mlxsw_reg_bridge_type bridge_type = 0;
char sfmr_pl[MLXSW_REG_SFMR_LEN];
bool flood_rsp = false;
u16 smpe;
if (mlxsw_sp->ubridge) {
flood_rsp = fid->fid_family->flood_rsp;
bridge_type = fid->fid_family->bridge_type;
}
smpe = fid->fid_family->smpe_index_valid ? fid->fid_index : 0;
mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID,
fid->fid_index, fid->fid_offset, flood_rsp,
bridge_type);
fid->fid_index, fid->fid_offset,
fid->fid_family->flood_rsp,
fid->fid_family->bridge_type,
fid->fid_family->smpe_index_valid, smpe);
mlxsw_reg_sfmr_vv_set(sfmr_pl, fid->vni_valid);
mlxsw_reg_sfmr_vni_set(sfmr_pl, be32_to_cpu(fid->vni));
mlxsw_reg_sfmr_vtfp_set(sfmr_pl, fid->nve_flood_index_valid);
mlxsw_reg_sfmr_nve_tunnel_flood_ptr_set(sfmr_pl, fid->nve_flood_index);
if (rif) {
mlxsw_reg_sfmr_irif_v_set(sfmr_pl, true);
mlxsw_reg_sfmr_irif_set(sfmr_pl, mlxsw_sp_rif_index(rif));
}
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
}
static int mlxsw_sp_fid_vni_to_fid_map(const struct mlxsw_sp_fid *fid,
const struct mlxsw_sp_rif *rif,
bool valid)
{
struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
char svfa_pl[MLXSW_REG_SVFA_LEN];
bool irif_valid;
u16 irif_index;
irif_valid = !!rif;
irif_index = rif ? mlxsw_sp_rif_index(rif) : 0;
mlxsw_reg_svfa_vni_pack(svfa_pl, valid, fid->fid_index,
be32_to_cpu(fid->vni), irif_valid, irif_index);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
}
static int mlxsw_sp_fid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
const struct mlxsw_sp_rif *rif)
{
return mlxsw_sp_fid_edit_op(fid, rif);
}
static int mlxsw_sp_fid_vni_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
const struct mlxsw_sp_rif *rif)
{
if (!fid->vni_valid)
return 0;
return mlxsw_sp_fid_vni_to_fid_map(fid, rif, fid->vni_valid);
}
static int
mlxsw_sp_fid_vid_to_fid_map(const struct mlxsw_sp_fid *fid, u16 vid, bool valid,
const struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
char svfa_pl[MLXSW_REG_SVFA_LEN];
bool irif_valid;
u16 irif_index;
irif_valid = !!rif;
irif_index = rif ? mlxsw_sp_rif_index(rif) : 0;
mlxsw_reg_svfa_vid_pack(svfa_pl, valid, fid->fid_index, vid, irif_valid,
irif_index);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
}
static int
mlxsw_sp_fid_8021q_vid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
const struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp_fid_8021q *fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
/* Update the global VID => FID mapping we created when the FID was
* configured.
*/
return mlxsw_sp_fid_vid_to_fid_map(fid, fid_8021q->vid, true, rif);
}
static int
mlxsw_sp_fid_port_vid_to_fid_rif_update_one(const struct mlxsw_sp_fid *fid,
struct mlxsw_sp_fid_port_vid *pv,
bool irif_valid, u16 irif_index)
{
struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
char svfa_pl[MLXSW_REG_SVFA_LEN];
mlxsw_reg_svfa_port_vid_pack(svfa_pl, pv->local_port, true,
fid->fid_index, pv->vid, irif_valid,
irif_index);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
}
static int mlxsw_sp_fid_vid_to_fid_rif_set(const struct mlxsw_sp_fid *fid,
const struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
struct mlxsw_sp_fid_port_vid *pv;
u16 irif_index;
int err;
err = fid->fid_family->ops->vid_to_fid_rif_update(fid, rif);
if (err)
return err;
irif_index = mlxsw_sp_rif_index(rif);
list_for_each_entry(pv, &fid->port_vid_list, list) {
/* If port is not in virtual mode, then it does not have any
* {Port, VID}->FID mappings that need to be updated with the
* ingress RIF.
*/
if (!mlxsw_sp->fid_core->port_fid_mappings[pv->local_port])
continue;
err = mlxsw_sp_fid_port_vid_to_fid_rif_update_one(fid, pv,
true,
irif_index);
if (err)
goto err_port_vid_to_fid_rif_update_one;
}
return 0;
err_port_vid_to_fid_rif_update_one:
list_for_each_entry_continue_reverse(pv, &fid->port_vid_list, list) {
if (!mlxsw_sp->fid_core->port_fid_mappings[pv->local_port])
continue;
mlxsw_sp_fid_port_vid_to_fid_rif_update_one(fid, pv, false, 0);
}
fid->fid_family->ops->vid_to_fid_rif_update(fid, NULL);
return err;
}
static void mlxsw_sp_fid_vid_to_fid_rif_unset(const struct mlxsw_sp_fid *fid)
{
struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
struct mlxsw_sp_fid_port_vid *pv;
list_for_each_entry(pv, &fid->port_vid_list, list) {
/* If port is not in virtual mode, then it does not have any
* {Port, VID}->FID mappings that need to be updated.
*/
if (!mlxsw_sp->fid_core->port_fid_mappings[pv->local_port])
continue;
mlxsw_sp_fid_port_vid_to_fid_rif_update_one(fid, pv, false, 0);
}
fid->fid_family->ops->vid_to_fid_rif_update(fid, NULL);
}
static int mlxsw_sp_fid_reiv_handle(struct mlxsw_sp_fid *fid, u16 rif_index,
bool valid, u8 port_page)
{
u16 local_port_end = (port_page + 1) * MLXSW_REG_REIV_REC_MAX_COUNT - 1;
u16 local_port_start = port_page * MLXSW_REG_REIV_REC_MAX_COUNT;
struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
struct mlxsw_sp_fid_port_vid *port_vid;
u8 rec_num, entries_num = 0;
char *reiv_pl;
int err;
reiv_pl = kmalloc(MLXSW_REG_REIV_LEN, GFP_KERNEL);
if (!reiv_pl)
return -ENOMEM;
mlxsw_reg_reiv_pack(reiv_pl, port_page, rif_index);
list_for_each_entry(port_vid, &fid->port_vid_list, list) {
/* port_vid_list is sorted by local_port. */
if (port_vid->local_port < local_port_start)
continue;
if (port_vid->local_port > local_port_end)
break;
rec_num = port_vid->local_port % MLXSW_REG_REIV_REC_MAX_COUNT;
mlxsw_reg_reiv_rec_update_set(reiv_pl, rec_num, true);
mlxsw_reg_reiv_rec_evid_set(reiv_pl, rec_num,
valid ? port_vid->vid : 0);
entries_num++;
}
if (!entries_num) {
kfree(reiv_pl);
return 0;
}
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(reiv), reiv_pl);
if (err)
goto err_reg_write;
kfree(reiv_pl);
return 0;
err_reg_write:
kfree(reiv_pl);
return err;
}
static int mlxsw_sp_fid_erif_eport_to_vid_map(struct mlxsw_sp_fid *fid,
u16 rif_index, bool valid)
{
struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
u8 num_port_pages;
int err, i;
num_port_pages = mlxsw_core_max_ports(mlxsw_sp->core) /
MLXSW_REG_REIV_REC_MAX_COUNT + 1;
for (i = 0; i < num_port_pages; i++) {
err = mlxsw_sp_fid_reiv_handle(fid, rif_index, valid, i);
if (err)
goto err_reiv_handle;
}
return 0;
err_reiv_handle:
for (; i >= 0; i--)
mlxsw_sp_fid_reiv_handle(fid, rif_index, !valid, i);
return err;
}
int mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif)
{
u16 rif_index = mlxsw_sp_rif_index(rif);
int err;
err = mlxsw_sp_fid_to_fid_rif_update(fid, rif);
if (err)
return err;
err = mlxsw_sp_fid_vni_to_fid_rif_update(fid, rif);
if (err)
goto err_vni_to_fid_rif_update;
err = mlxsw_sp_fid_vid_to_fid_rif_set(fid, rif);
if (err)
goto err_vid_to_fid_rif_set;
err = mlxsw_sp_fid_erif_eport_to_vid_map(fid, rif_index, true);
if (err)
goto err_erif_eport_to_vid_map;
fid->rif = rif;
return 0;
err_erif_eport_to_vid_map:
mlxsw_sp_fid_vid_to_fid_rif_unset(fid);
err_vid_to_fid_rif_set:
mlxsw_sp_fid_vni_to_fid_rif_update(fid, NULL);
err_vni_to_fid_rif_update:
mlxsw_sp_fid_to_fid_rif_update(fid, NULL);
return err;
}
void mlxsw_sp_fid_rif_unset(struct mlxsw_sp_fid *fid)
{
u16 rif_index;
if (!fid->rif)
return;
rif_index = mlxsw_sp_rif_index(fid->rif);
fid->rif = NULL;
mlxsw_sp_fid_erif_eport_to_vid_map(fid, rif_index, false);
mlxsw_sp_fid_vid_to_fid_rif_unset(fid);
mlxsw_sp_fid_vni_to_fid_rif_update(fid, NULL);
mlxsw_sp_fid_to_fid_rif_update(fid, NULL);
}
static int mlxsw_sp_fid_vni_op(const struct mlxsw_sp_fid *fid)
{
int err;
err = mlxsw_sp_fid_vni_to_fid_map(fid, fid->rif, fid->vni_valid);
if (err)
return err;
err = mlxsw_sp_fid_edit_op(fid, fid->rif);
if (err)
goto err_fid_edit_op;
return 0;
err_fid_edit_op:
mlxsw_sp_fid_vni_to_fid_map(fid, fid->rif, !fid->vni_valid);
return err;
}
static int __mlxsw_sp_fid_port_vid_map(const struct mlxsw_sp_fid *fid,
u16 local_port, u16 vid, bool valid)
{
struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
char svfa_pl[MLXSW_REG_SVFA_LEN];
bool irif_valid = false;
u16 irif_index = 0;
if (fid->rif) {
irif_valid = true;
irif_index = mlxsw_sp_rif_index(fid->rif);
}
mlxsw_reg_svfa_port_vid_pack(svfa_pl, local_port, valid, fid->fid_index,
vid);
vid, irif_valid, irif_index);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
}
......@@ -509,7 +775,7 @@ static void mlxsw_sp_fid_8021d_setup(struct mlxsw_sp_fid *fid, const void *arg)
int br_ifindex = *(int *) arg;
mlxsw_sp_fid_8021d_fid(fid)->br_ifindex = br_ifindex;
fid->fid_offset = 0;
fid->fid_offset = fid->fid_index - fid->fid_family->start_index;
}
static int mlxsw_sp_fid_8021d_configure(struct mlxsw_sp_fid *fid)
......@@ -547,11 +813,6 @@ mlxsw_sp_fid_8021d_compare(const struct mlxsw_sp_fid *fid, const void *arg)
return mlxsw_sp_fid_8021d_fid(fid)->br_ifindex == br_ifindex;
}
static u16 mlxsw_sp_fid_8021d_flood_index(const struct mlxsw_sp_fid *fid)
{
return fid->fid_index - VLAN_N_VID;
}
static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
......@@ -663,6 +924,53 @@ mlxsw_sp_fid_mpe_table_map(const struct mlxsw_sp_fid *fid, u16 local_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smpe), smpe_pl);
}
static int
mlxsw_sp_fid_erif_eport_to_vid_map_one(const struct mlxsw_sp_fid *fid,
u16 local_port, u16 vid, bool valid)
{
u8 port_page = local_port / MLXSW_REG_REIV_REC_MAX_COUNT;
u8 rec_num = local_port % MLXSW_REG_REIV_REC_MAX_COUNT;
struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
u16 rif_index = mlxsw_sp_rif_index(fid->rif);
char *reiv_pl;
int err;
reiv_pl = kmalloc(MLXSW_REG_REIV_LEN, GFP_KERNEL);
if (!reiv_pl)
return -ENOMEM;
mlxsw_reg_reiv_pack(reiv_pl, port_page, rif_index);
mlxsw_reg_reiv_rec_update_set(reiv_pl, rec_num, true);
mlxsw_reg_reiv_rec_evid_set(reiv_pl, rec_num, valid ? vid : 0);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(reiv), reiv_pl);
kfree(reiv_pl);
return err;
}
static int mlxsw_sp_fid_evid_map(const struct mlxsw_sp_fid *fid, u16 local_port,
u16 vid, bool valid)
{
int err;
err = mlxsw_sp_fid_mpe_table_map(fid, local_port, vid, valid);
if (err)
return err;
if (!fid->rif)
return 0;
err = mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port, vid,
valid);
if (err)
goto err_erif_eport_to_vid_map_one;
return 0;
err_erif_eport_to_vid_map_one:
mlxsw_sp_fid_mpe_table_map(fid, local_port, vid, !valid);
return err;
}
static int mlxsw_sp_fid_8021d_port_vid_map(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid)
......@@ -676,11 +984,9 @@ static int mlxsw_sp_fid_8021d_port_vid_map(struct mlxsw_sp_fid *fid,
if (err)
return err;
if (fid->fid_family->mlxsw_sp->ubridge) {
err = mlxsw_sp_fid_mpe_table_map(fid, local_port, vid, true);
if (err)
goto err_mpe_table_map;
}
err = mlxsw_sp_fid_evid_map(fid, local_port, vid, true);
if (err)
goto err_fid_evid_map;
err = mlxsw_sp_fid_port_vid_list_add(fid, mlxsw_sp_port->local_port,
vid);
......@@ -699,9 +1005,8 @@ static int mlxsw_sp_fid_8021d_port_vid_map(struct mlxsw_sp_fid *fid,
mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
err_port_vid_list_add:
if (fid->fid_family->mlxsw_sp->ubridge)
mlxsw_sp_fid_mpe_table_map(fid, local_port, vid, false);
err_mpe_table_map:
mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
err_fid_evid_map:
__mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
return err;
}
......@@ -717,29 +1022,28 @@ mlxsw_sp_fid_8021d_port_vid_unmap(struct mlxsw_sp_fid *fid,
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
if (fid->fid_family->mlxsw_sp->ubridge)
mlxsw_sp_fid_mpe_table_map(fid, local_port, vid, false);
mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
__mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
}
static int mlxsw_sp_fid_8021d_vni_set(struct mlxsw_sp_fid *fid)
{
return mlxsw_sp_fid_edit_op(fid);
return mlxsw_sp_fid_vni_op(fid);
}
static void mlxsw_sp_fid_8021d_vni_clear(struct mlxsw_sp_fid *fid)
{
mlxsw_sp_fid_edit_op(fid);
mlxsw_sp_fid_vni_op(fid);
}
static int mlxsw_sp_fid_8021d_nve_flood_index_set(struct mlxsw_sp_fid *fid)
{
return mlxsw_sp_fid_edit_op(fid);
return mlxsw_sp_fid_edit_op(fid, fid->rif);
}
static void mlxsw_sp_fid_8021d_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
{
mlxsw_sp_fid_edit_op(fid);
mlxsw_sp_fid_edit_op(fid, fid->rif);
}
static void
......@@ -749,13 +1053,19 @@ mlxsw_sp_fid_8021d_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
br_fdb_clear_offload(nve_dev, 0);
}
static int
mlxsw_sp_fid_8021d_vid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
const struct mlxsw_sp_rif *rif)
{
return 0;
}
static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
.setup = mlxsw_sp_fid_8021d_setup,
.configure = mlxsw_sp_fid_8021d_configure,
.deconfigure = mlxsw_sp_fid_8021d_deconfigure,
.index_alloc = mlxsw_sp_fid_8021d_index_alloc,
.compare = mlxsw_sp_fid_8021d_compare,
.flood_index = mlxsw_sp_fid_8021d_flood_index,
.port_vid_map = mlxsw_sp_fid_8021d_port_vid_map,
.port_vid_unmap = mlxsw_sp_fid_8021d_port_vid_unmap,
.vni_set = mlxsw_sp_fid_8021d_vni_set,
......@@ -763,44 +1073,32 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
.nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
.nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
.fdb_clear_offload = mlxsw_sp_fid_8021d_fdb_clear_offload,
.vid_to_fid_rif_update = mlxsw_sp_fid_8021d_vid_to_fid_rif_update,
};
#define MLXSW_SP_FID_8021Q_MAX (VLAN_N_VID - 2)
#define MLXSW_SP_FID_RFID_MAX (11 * 1024)
#define MLXSW_SP_FID_8021Q_PGT_BASE 0
#define MLXSW_SP_FID_8021D_PGT_BASE (3 * MLXSW_SP_FID_8021Q_MAX)
static const struct mlxsw_sp_flood_table mlxsw_sp_fid_8021d_flood_tables[] = {
{
.packet_type = MLXSW_SP_FLOOD_TYPE_UC,
.table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID,
.table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
.table_index = 0,
},
{
.packet_type = MLXSW_SP_FLOOD_TYPE_MC,
.table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID,
.table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
.table_index = 1,
},
{
.packet_type = MLXSW_SP_FLOOD_TYPE_BC,
.table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID,
.table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
.table_index = 2,
},
};
/* Range and flood configuration must match mlxsw_config_profile */
static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021d_family = {
.type = MLXSW_SP_FID_TYPE_8021D,
.fid_size = sizeof(struct mlxsw_sp_fid_8021d),
.start_index = VLAN_N_VID,
.end_index = VLAN_N_VID + MLXSW_SP_FID_8021D_MAX - 1,
.flood_tables = mlxsw_sp_fid_8021d_flood_tables,
.nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
.rif_type = MLXSW_SP_RIF_TYPE_FID,
.ops = &mlxsw_sp_fid_8021d_ops,
.bridge_type = MLXSW_REG_BRIDGE_TYPE_1,
.pgt_base = MLXSW_SP_FID_8021D_PGT_BASE,
};
static bool
mlxsw_sp_fid_8021q_compare(const struct mlxsw_sp_fid *fid, const void *arg)
{
......@@ -816,41 +1114,6 @@ mlxsw_sp_fid_8021q_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
br_fdb_clear_offload(nve_dev, mlxsw_sp_fid_8021q_vid(fid));
}
static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_emu_ops = {
.setup = mlxsw_sp_fid_8021q_setup,
.configure = mlxsw_sp_fid_8021d_configure,
.deconfigure = mlxsw_sp_fid_8021d_deconfigure,
.index_alloc = mlxsw_sp_fid_8021d_index_alloc,
.compare = mlxsw_sp_fid_8021q_compare,
.flood_index = mlxsw_sp_fid_8021d_flood_index,
.port_vid_map = mlxsw_sp_fid_8021d_port_vid_map,
.port_vid_unmap = mlxsw_sp_fid_8021d_port_vid_unmap,
.vni_set = mlxsw_sp_fid_8021d_vni_set,
.vni_clear = mlxsw_sp_fid_8021d_vni_clear,
.nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
.nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
.fdb_clear_offload = mlxsw_sp_fid_8021q_fdb_clear_offload,
};
/* There are 4K-2 emulated 802.1Q FIDs, starting right after the 802.1D FIDs */
#define MLXSW_SP_FID_8021Q_EMU_START (VLAN_N_VID + MLXSW_SP_FID_8021D_MAX)
#define MLXSW_SP_FID_8021Q_EMU_END (MLXSW_SP_FID_8021Q_EMU_START + \
MLXSW_SP_FID_8021Q_MAX - 1)
/* Range and flood configuration must match mlxsw_config_profile */
static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021q_emu_family = {
.type = MLXSW_SP_FID_TYPE_8021Q,
.fid_size = sizeof(struct mlxsw_sp_fid_8021q),
.start_index = MLXSW_SP_FID_8021Q_EMU_START,
.end_index = MLXSW_SP_FID_8021Q_EMU_END,
.flood_tables = mlxsw_sp_fid_8021d_flood_tables,
.nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
.rif_type = MLXSW_SP_RIF_TYPE_VLAN_EMU,
.ops = &mlxsw_sp_fid_8021q_emu_ops,
.bridge_type = MLXSW_REG_BRIDGE_TYPE_1,
.pgt_base = MLXSW_SP_FID_8021Q_PGT_BASE,
};
static void mlxsw_sp_fid_rfid_setup(struct mlxsw_sp_fid *fid, const void *arg)
{
fid->fid_offset = 0;
......@@ -858,12 +1121,12 @@ static void mlxsw_sp_fid_rfid_setup(struct mlxsw_sp_fid *fid, const void *arg)
static int mlxsw_sp_fid_rfid_configure(struct mlxsw_sp_fid *fid)
{
/* rFIDs are allocated by the device during init */
return 0;
return mlxsw_sp_fid_op(fid, true);
}
static void mlxsw_sp_fid_rfid_deconfigure(struct mlxsw_sp_fid *fid)
{
mlxsw_sp_fid_op(fid, false);
}
static int mlxsw_sp_fid_rfid_index_alloc(struct mlxsw_sp_fid *fid,
......@@ -897,9 +1160,23 @@ static int mlxsw_sp_fid_rfid_port_vid_map(struct mlxsw_sp_fid *fid,
if (err)
return err;
/* We only need to transition the port to virtual mode since
* {Port, VID} => FID is done by the firmware upon RIF creation.
/* Using legacy bridge model, we only need to transition the port to
* virtual mode since {Port, VID} => FID is done by the firmware upon
* RIF creation. Using unified bridge model, we need to map
* {Port, VID} => FID and map egress VID.
*/
err = __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid,
true);
if (err)
goto err_port_vid_map;
if (fid->rif) {
err = mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port,
vid, true);
if (err)
goto err_erif_eport_to_vid_map_one;
}
if (mlxsw_sp->fid_core->port_fid_mappings[local_port]++ == 0) {
err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
if (err)
......@@ -910,6 +1187,12 @@ static int mlxsw_sp_fid_rfid_port_vid_map(struct mlxsw_sp_fid *fid,
err_port_vp_mode_trans:
mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
if (fid->rif)
mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port, vid,
false);
err_erif_eport_to_vid_map_one:
__mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
err_port_vid_map:
mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
return err;
}
......@@ -924,6 +1207,11 @@ mlxsw_sp_fid_rfid_port_vid_unmap(struct mlxsw_sp_fid *fid,
if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1)
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
if (fid->rif)
mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port, vid,
false);
__mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
}
......@@ -947,6 +1235,13 @@ static void mlxsw_sp_fid_rfid_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
WARN_ON_ONCE(1);
}
static int
mlxsw_sp_fid_rfid_vid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
const struct mlxsw_sp_rif *rif)
{
return 0;
}
static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_rfid_ops = {
.setup = mlxsw_sp_fid_rfid_setup,
.configure = mlxsw_sp_fid_rfid_configure,
......@@ -959,19 +1254,7 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_rfid_ops = {
.vni_clear = mlxsw_sp_fid_rfid_vni_clear,
.nve_flood_index_set = mlxsw_sp_fid_rfid_nve_flood_index_set,
.nve_flood_index_clear = mlxsw_sp_fid_rfid_nve_flood_index_clear,
};
#define MLXSW_SP_RFID_BASE (15 * 1024)
#define MLXSW_SP_RFID_MAX 1024
static const struct mlxsw_sp_fid_family mlxsw_sp_fid_rfid_family = {
.type = MLXSW_SP_FID_TYPE_RFID,
.fid_size = sizeof(struct mlxsw_sp_fid),
.start_index = MLXSW_SP_RFID_BASE,
.end_index = MLXSW_SP_RFID_BASE + MLXSW_SP_RFID_MAX - 1,
.rif_type = MLXSW_SP_RIF_TYPE_SUBPORT,
.ops = &mlxsw_sp_fid_rfid_ops,
.flood_rsp = true,
.vid_to_fid_rif_update = mlxsw_sp_fid_rfid_vid_to_fid_rif_update,
};
static void mlxsw_sp_fid_dummy_setup(struct mlxsw_sp_fid *fid, const void *arg)
......@@ -1035,26 +1318,220 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = {
.nve_flood_index_clear = mlxsw_sp_fid_dummy_nve_flood_index_clear,
};
static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = {
static int mlxsw_sp_fid_8021q_configure(struct mlxsw_sp_fid *fid)
{
struct mlxsw_sp_fid_8021q *fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
int err;
err = mlxsw_sp_fid_op(fid, true);
if (err)
return err;
err = mlxsw_sp_fid_vid_to_fid_map(fid, fid_8021q->vid, true, fid->rif);
if (err)
goto err_vid_to_fid_map;
return 0;
err_vid_to_fid_map:
mlxsw_sp_fid_op(fid, false);
return err;
}
static void mlxsw_sp_fid_8021q_deconfigure(struct mlxsw_sp_fid *fid)
{
struct mlxsw_sp_fid_8021q *fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
if (fid->vni_valid)
mlxsw_sp_nve_fid_disable(fid->fid_family->mlxsw_sp, fid);
mlxsw_sp_fid_vid_to_fid_map(fid, fid_8021q->vid, false, NULL);
mlxsw_sp_fid_op(fid, false);
}
static int mlxsw_sp_fid_8021q_port_vid_map(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
int err;
/* In case there are no {Port, VID} => FID mappings on the port,
* we can use the global VID => FID mapping we created when the
* FID was configured, otherwise, configure new mapping.
*/
if (mlxsw_sp->fid_core->port_fid_mappings[local_port]) {
err = __mlxsw_sp_fid_port_vid_map(fid, local_port, vid, true);
if (err)
return err;
}
err = mlxsw_sp_fid_evid_map(fid, local_port, vid, true);
if (err)
goto err_fid_evid_map;
err = mlxsw_sp_fid_port_vid_list_add(fid, mlxsw_sp_port->local_port,
vid);
if (err)
goto err_port_vid_list_add;
return 0;
err_port_vid_list_add:
mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
err_fid_evid_map:
if (mlxsw_sp->fid_core->port_fid_mappings[local_port])
__mlxsw_sp_fid_port_vid_map(fid, local_port, vid, false);
return err;
}
static void
mlxsw_sp_fid_8021q_port_vid_unmap(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
if (mlxsw_sp->fid_core->port_fid_mappings[local_port])
__mlxsw_sp_fid_port_vid_map(fid, local_port, vid, false);
}
static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_ops = {
.setup = mlxsw_sp_fid_8021q_setup,
.configure = mlxsw_sp_fid_8021q_configure,
.deconfigure = mlxsw_sp_fid_8021q_deconfigure,
.index_alloc = mlxsw_sp_fid_8021d_index_alloc,
.compare = mlxsw_sp_fid_8021q_compare,
.port_vid_map = mlxsw_sp_fid_8021q_port_vid_map,
.port_vid_unmap = mlxsw_sp_fid_8021q_port_vid_unmap,
.vni_set = mlxsw_sp_fid_8021d_vni_set,
.vni_clear = mlxsw_sp_fid_8021d_vni_clear,
.nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
.nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
.fdb_clear_offload = mlxsw_sp_fid_8021q_fdb_clear_offload,
.vid_to_fid_rif_update = mlxsw_sp_fid_8021q_vid_to_fid_rif_update,
};
/* There are 4K-2 802.1Q FIDs */
#define MLXSW_SP_FID_8021Q_START 1 /* FID 0 is reserved. */
#define MLXSW_SP_FID_8021Q_END (MLXSW_SP_FID_8021Q_START + \
MLXSW_SP_FID_8021Q_MAX - 1)
/* There are 1K 802.1D FIDs */
#define MLXSW_SP_FID_8021D_START (MLXSW_SP_FID_8021Q_END + 1)
#define MLXSW_SP_FID_8021D_END (MLXSW_SP_FID_8021D_START + \
MLXSW_SP_FID_8021D_MAX - 1)
/* There is one dummy FID */
#define MLXSW_SP_FID_DUMMY (MLXSW_SP_FID_8021D_END + 1)
/* There are 11K rFIDs */
#define MLXSW_SP_RFID_START (MLXSW_SP_FID_DUMMY + 1)
#define MLXSW_SP_RFID_END (MLXSW_SP_RFID_START + \
MLXSW_SP_FID_RFID_MAX - 1)
static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_8021q_family = {
.type = MLXSW_SP_FID_TYPE_8021Q,
.fid_size = sizeof(struct mlxsw_sp_fid_8021q),
.start_index = MLXSW_SP_FID_8021Q_START,
.end_index = MLXSW_SP_FID_8021Q_END,
.flood_tables = mlxsw_sp_fid_8021d_flood_tables,
.nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
.rif_type = MLXSW_SP_RIF_TYPE_VLAN,
.ops = &mlxsw_sp_fid_8021q_ops,
.flood_rsp = false,
.bridge_type = MLXSW_REG_BRIDGE_TYPE_0,
.pgt_base = MLXSW_SP_FID_8021Q_PGT_BASE,
.smpe_index_valid = false,
};
static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_8021d_family = {
.type = MLXSW_SP_FID_TYPE_8021D,
.fid_size = sizeof(struct mlxsw_sp_fid_8021d),
.start_index = MLXSW_SP_FID_8021D_START,
.end_index = MLXSW_SP_FID_8021D_END,
.flood_tables = mlxsw_sp_fid_8021d_flood_tables,
.nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
.rif_type = MLXSW_SP_RIF_TYPE_FID,
.ops = &mlxsw_sp_fid_8021d_ops,
.bridge_type = MLXSW_REG_BRIDGE_TYPE_1,
.pgt_base = MLXSW_SP_FID_8021D_PGT_BASE,
.smpe_index_valid = false,
};
static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_dummy_family = {
.type = MLXSW_SP_FID_TYPE_DUMMY,
.fid_size = sizeof(struct mlxsw_sp_fid),
.start_index = VLAN_N_VID - 1,
.end_index = VLAN_N_VID - 1,
.start_index = MLXSW_SP_FID_DUMMY,
.end_index = MLXSW_SP_FID_DUMMY,
.ops = &mlxsw_sp_fid_dummy_ops,
.smpe_index_valid = false,
};
static const struct mlxsw_sp_fid_family mlxsw_sp_fid_rfid_family = {
.type = MLXSW_SP_FID_TYPE_RFID,
.fid_size = sizeof(struct mlxsw_sp_fid),
.start_index = MLXSW_SP_RFID_START,
.end_index = MLXSW_SP_RFID_END,
.rif_type = MLXSW_SP_RIF_TYPE_SUBPORT,
.ops = &mlxsw_sp_fid_rfid_ops,
.flood_rsp = true,
.smpe_index_valid = false,
};
const struct mlxsw_sp_fid_family *mlxsw_sp1_fid_family_arr[] = {
[MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp_fid_8021q_emu_family,
[MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp_fid_8021d_family,
[MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp1_fid_8021q_family,
[MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp1_fid_8021d_family,
[MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp1_fid_dummy_family,
[MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family,
[MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp_fid_dummy_family,
};
static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021q_family = {
.type = MLXSW_SP_FID_TYPE_8021Q,
.fid_size = sizeof(struct mlxsw_sp_fid_8021q),
.start_index = MLXSW_SP_FID_8021Q_START,
.end_index = MLXSW_SP_FID_8021Q_END,
.flood_tables = mlxsw_sp_fid_8021d_flood_tables,
.nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
.rif_type = MLXSW_SP_RIF_TYPE_VLAN,
.ops = &mlxsw_sp_fid_8021q_ops,
.flood_rsp = false,
.bridge_type = MLXSW_REG_BRIDGE_TYPE_0,
.pgt_base = MLXSW_SP_FID_8021Q_PGT_BASE,
.smpe_index_valid = true,
};
static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021d_family = {
.type = MLXSW_SP_FID_TYPE_8021D,
.fid_size = sizeof(struct mlxsw_sp_fid_8021d),
.start_index = MLXSW_SP_FID_8021D_START,
.end_index = MLXSW_SP_FID_8021D_END,
.flood_tables = mlxsw_sp_fid_8021d_flood_tables,
.nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
.rif_type = MLXSW_SP_RIF_TYPE_FID,
.ops = &mlxsw_sp_fid_8021d_ops,
.bridge_type = MLXSW_REG_BRIDGE_TYPE_1,
.pgt_base = MLXSW_SP_FID_8021D_PGT_BASE,
.smpe_index_valid = true,
};
static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_dummy_family = {
.type = MLXSW_SP_FID_TYPE_DUMMY,
.fid_size = sizeof(struct mlxsw_sp_fid),
.start_index = MLXSW_SP_FID_DUMMY,
.end_index = MLXSW_SP_FID_DUMMY,
.ops = &mlxsw_sp_fid_dummy_ops,
.smpe_index_valid = false,
};
const struct mlxsw_sp_fid_family *mlxsw_sp2_fid_family_arr[] = {
[MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp_fid_8021q_emu_family,
[MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp_fid_8021d_family,
[MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp2_fid_8021q_family,
[MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp2_fid_8021d_family,
[MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp2_fid_dummy_family,
[MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family,
[MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp_fid_dummy_family,
};
static struct mlxsw_sp_fid *mlxsw_sp_fid_lookup(struct mlxsw_sp *mlxsw_sp,
......@@ -1186,8 +1663,8 @@ mlxsw_sp_fid_flood_table_init(struct mlxsw_sp_fid_family *fid_family,
{
enum mlxsw_sp_flood_type packet_type = flood_table->packet_type;
struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
u16 mid_base, num_fids, table_index;
const int *sfgc_packet_types;
u16 num_fids, mid_base;
int err, i;
mid_base = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table, 0);
......@@ -1203,12 +1680,8 @@ mlxsw_sp_fid_flood_table_init(struct mlxsw_sp_fid_family *fid_family,
if (!sfgc_packet_types[i])
continue;
mid_base = mlxsw_sp->ubridge ? mid_base : 0;
table_index = mlxsw_sp->ubridge ? 0 : flood_table->table_index;
mlxsw_reg_sfgc_pack(sfgc_pl, i, fid_family->bridge_type,
flood_table->table_type, table_index,
mid_base);
flood_table->table_type, 0, mid_base);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfgc), sfgc_pl);
if (err)
......@@ -1218,7 +1691,6 @@ mlxsw_sp_fid_flood_table_init(struct mlxsw_sp_fid_family *fid_family,
return 0;
err_reg_write:
mid_base = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table, 0);
mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mid_base, num_fids);
return err;
}
......
......@@ -182,16 +182,6 @@ static void mlxsw_sp_pgt_entry_put(struct mlxsw_sp_pgt *pgt, u16 mid)
mlxsw_sp_pgt_entry_destroy(pgt, pgt_entry);
}
#define MLXSW_SP_FID_PGT_FLOOD_ENTRIES 15354 /* Reserved for flooding. */
u16 mlxsw_sp_pgt_index_to_mid(const struct mlxsw_sp *mlxsw_sp, u16 pgt_index)
{
if (mlxsw_sp->ubridge)
return pgt_index;
return pgt_index - MLXSW_SP_FID_PGT_FLOOD_ENTRIES;
}
static void mlxsw_sp_pgt_smid2_port_set(char *smid2_pl, u16 local_port,
bool member)
{
......@@ -204,21 +194,16 @@ mlxsw_sp_pgt_entry_port_write(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_pgt_entry *pgt_entry,
u16 local_port, bool member)
{
bool smpe_index_valid;
char *smid2_pl;
u16 smpe, mid;
int err;
smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
if (!smid2_pl)
return -ENOMEM;
smpe_index_valid = mlxsw_sp->ubridge ? mlxsw_sp->pgt->smpe_index_valid :
false;
smpe = mlxsw_sp->ubridge ? pgt_entry->smpe_index : 0;
mid = mlxsw_sp_pgt_index_to_mid(mlxsw_sp, pgt_entry->index);
mlxsw_reg_smid2_pack(smid2_pl, mid, 0, 0, smpe_index_valid, smpe);
mlxsw_reg_smid2_pack(smid2_pl, pgt_entry->index, 0, 0,
mlxsw_sp->pgt->smpe_index_valid,
pgt_entry->smpe_index);
mlxsw_sp_pgt_smid2_port_set(smid2_pl, local_port, member);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
......
......@@ -7730,7 +7730,7 @@ u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
/* We only return the VID for VLAN RIFs. Otherwise we return an
* invalid value (0).
*/
if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN_EMU)
if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
goto out;
vid = mlxsw_sp_fid_8021q_vid(rif->fid);
......@@ -9316,17 +9316,18 @@ static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
struct mlxsw_sp_rif_subport *rif_subport;
char ritr_pl[MLXSW_REG_RITR_LEN];
u16 efid;
rif_subport = mlxsw_sp_rif_subport_rif(rif);
mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
rif->rif_index, rif->vr_id, rif->dev->mtu);
mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
efid = mlxsw_sp_fid_index(rif->fid);
mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
rif_subport->lag ? rif_subport->lag_id :
rif_subport->system_port,
rif_subport->vid);
efid, 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
......@@ -9351,9 +9352,15 @@ static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
if (err)
goto err_rif_fdb_op;
mlxsw_sp_fid_rif_set(rif->fid, rif);
err = mlxsw_sp_fid_rif_set(rif->fid, rif);
if (err)
goto err_fid_rif_set;
return 0;
err_fid_rif_set:
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), false);
err_rif_fdb_op:
mlxsw_sp_rif_subport_op(rif, false);
err_rif_subport_op:
......@@ -9365,7 +9372,7 @@ static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp_fid *fid = rif->fid;
mlxsw_sp_fid_rif_set(fid, NULL);
mlxsw_sp_fid_rif_unset(fid);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
mlxsw_sp_rif_macvlan_flush(rif);
......@@ -9442,9 +9449,15 @@ static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
if (err)
goto err_rif_fdb_op;
mlxsw_sp_fid_rif_set(rif->fid, rif);
err = mlxsw_sp_fid_rif_set(rif->fid, rif);
if (err)
goto err_fid_rif_set;
return 0;
err_fid_rif_set:
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), false);
err_rif_fdb_op:
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
mlxsw_sp_router_port(mlxsw_sp), false);
......@@ -9464,7 +9477,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
struct mlxsw_sp_fid *fid = rif->fid;
mlxsw_sp_fid_rif_set(fid, NULL);
mlxsw_sp_fid_rif_unset(fid);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
mlxsw_sp_rif_macvlan_flush(rif);
......@@ -9549,11 +9562,119 @@ static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
NULL);
}
static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
.type = MLXSW_SP_RIF_TYPE_VLAN_EMU,
static int mlxsw_sp_rif_vlan_op(struct mlxsw_sp_rif *rif, u16 vid, u16 efid,
bool enable)
{
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
char ritr_pl[MLXSW_REG_RITR_LEN];
mlxsw_reg_ritr_vlan_if_pack(ritr_pl, enable, rif->rif_index, rif->vr_id,
rif->dev->mtu, rif->dev->dev_addr,
rif->mac_profile_id, vid, efid);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif, u16 efid,
struct netlink_ext_ack *extack)
{
u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
u8 mac_profile;
int err;
err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
&mac_profile, extack);
if (err)
return err;
rif->mac_profile_id = mac_profile;
err = mlxsw_sp_rif_vlan_op(rif, vid, efid, true);
if (err)
goto err_rif_vlan_fid_op;
err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), true);
if (err)
goto err_fid_mc_flood_set;
err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
mlxsw_sp_router_port(mlxsw_sp), true);
if (err)
goto err_fid_bc_flood_set;
err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), true);
if (err)
goto err_rif_fdb_op;
err = mlxsw_sp_fid_rif_set(rif->fid, rif);
if (err)
goto err_fid_rif_set;
return 0;
err_fid_rif_set:
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), false);
err_rif_fdb_op:
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
mlxsw_sp_router_port(mlxsw_sp), false);
err_fid_bc_flood_set:
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), false);
err_fid_mc_flood_set:
mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
err_rif_vlan_fid_op:
mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
return err;
}
static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
{
u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
mlxsw_sp_fid_rif_unset(rif->fid);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), false);
mlxsw_sp_rif_macvlan_flush(rif);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
mlxsw_sp_router_port(mlxsw_sp), false);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), false);
mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
}
static int mlxsw_sp1_rif_vlan_configure(struct mlxsw_sp_rif *rif,
struct netlink_ext_ack *extack)
{
return mlxsw_sp_rif_vlan_configure(rif, 0, extack);
}
static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_vlan_ops = {
.type = MLXSW_SP_RIF_TYPE_VLAN,
.rif_size = sizeof(struct mlxsw_sp_rif),
.configure = mlxsw_sp_rif_fid_configure,
.deconfigure = mlxsw_sp_rif_fid_deconfigure,
.configure = mlxsw_sp1_rif_vlan_configure,
.deconfigure = mlxsw_sp_rif_vlan_deconfigure,
.fid_get = mlxsw_sp_rif_vlan_fid_get,
.fdb_del = mlxsw_sp_rif_vlan_fdb_del,
};
static int mlxsw_sp2_rif_vlan_configure(struct mlxsw_sp_rif *rif,
struct netlink_ext_ack *extack)
{
u16 efid = mlxsw_sp_fid_index(rif->fid);
return mlxsw_sp_rif_vlan_configure(rif, efid, extack);
}
static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_vlan_ops = {
.type = MLXSW_SP_RIF_TYPE_VLAN,
.rif_size = sizeof(struct mlxsw_sp_rif),
.configure = mlxsw_sp2_rif_vlan_configure,
.deconfigure = mlxsw_sp_rif_vlan_deconfigure,
.fid_get = mlxsw_sp_rif_vlan_fid_get,
.fdb_del = mlxsw_sp_rif_vlan_fdb_del,
};
......@@ -9628,7 +9749,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
[MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
[MLXSW_SP_RIF_TYPE_VLAN_EMU] = &mlxsw_sp_rif_vlan_emu_ops,
[MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp1_rif_vlan_ops,
[MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
[MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp1_rif_ipip_lb_ops,
};
......@@ -9816,7 +9937,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
[MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
[MLXSW_SP_RIF_TYPE_VLAN_EMU] = &mlxsw_sp_rif_vlan_emu_ops,
[MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp2_rif_vlan_ops,
[MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
[MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops,
};
......
......@@ -82,7 +82,6 @@ struct mlxsw_sp_ipip_entry;
struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
u16 rif_index);
u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif);
......
......@@ -1681,7 +1681,8 @@ static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
}
static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
const char *mac, u16 fid, bool adding,
const char *mac, u16 fid, u16 vid,
bool adding,
enum mlxsw_reg_sfd_rec_action action,
enum mlxsw_reg_sfd_rec_policy policy)
{
......@@ -1694,7 +1695,8 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
return -ENOMEM;
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, vid, action,
local_port);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
if (err)
......@@ -1709,18 +1711,18 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
}
static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
const char *mac, u16 fid, bool adding,
bool dynamic)
const char *mac, u16 fid, u16 vid,
bool adding, bool dynamic)
{
return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
MLXSW_REG_SFD_REC_ACTION_NOP,
return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, vid,
adding, MLXSW_REG_SFD_REC_ACTION_NOP,
mlxsw_sp_sfd_rec_policy(dynamic));
}
int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
bool adding)
{
return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, 0, adding,
MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
}
......@@ -1782,7 +1784,7 @@ mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_port->lagged)
return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
bridge_port->system_port,
fdb_info->addr, fid_index,
fdb_info->addr, fid_index, vid,
adding, false);
else
return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
......@@ -1796,7 +1798,6 @@ static int mlxsw_sp_mdb_entry_write(struct mlxsw_sp *mlxsw_sp,
bool adding)
{
char *sfd_pl;
u16 mid_idx;
u8 num_rec;
int err;
......@@ -1804,11 +1805,10 @@ static int mlxsw_sp_mdb_entry_write(struct mlxsw_sp *mlxsw_sp,
if (!sfd_pl)
return -ENOMEM;
mid_idx = mlxsw_sp_pgt_index_to_mid(mlxsw_sp, mdb_entry->mid);
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
mlxsw_reg_sfd_mc_pack(sfd_pl, 0, mdb_entry->key.addr,
mdb_entry->key.fid, MLXSW_REG_SFD_REC_ACTION_NOP,
mid_idx);
mdb_entry->mid);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
if (err)
......@@ -2906,10 +2906,9 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_port *mlxsw_sp_port;
u16 local_port, vid, fid, evid = 0;
enum switchdev_notifier_type type;
char mac[ETH_ALEN];
u16 local_port;
u16 vid, fid;
bool do_notification = true;
int err;
......@@ -2940,9 +2939,10 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
bridge_device = bridge_port->bridge_device;
vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
evid = mlxsw_sp_port_vlan->vid;
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, evid,
adding, true);
if (err) {
dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment