Commit 28cfea98 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-updates-2023-05-31' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2023-05-31

net/mlx5: Support 4 ports VF LAG, part 1/2

This series continues the series[1] "Support 4 ports HCAs LAG mode"
by Mark Bloch. This series adds support for 4 ports VF LAG (single FDB
E-Switch).

This series of patches focuses on refactoring different sections of the
code that make assumptions about VF LAG supporting only two ports. For
instance, it assumes that each device can only have one peer.

Patches 1-5:
- Refactor ETH handling of TC rules of eswitches with peers.
Patch 6:
- Refactors peer miss group table.
Patches 7-9:
- Refactor single FDB E-Switch creation.
Patch 10:
- Refactor the DR layer.
Patches 11-14:
- Refactors devcom layer.

Next series will refactor LAG layer and enable 4 ports VF LAG.
This series specifically allows HCAs with 4 ports to create a VF LAG
with only 4 ports. It is not possible to create a VF LAG with 2 or 3
ports using HCAs that have 4 ports.

Currently, the Merged E-Switch feature only supports HCAs with 2 ports.
However, upcoming patches will introduce support for HCAs with 4 ports.

In order to activate VF LAG a user can execute:

devlink dev eswitch set pci/0000:08:00.0 mode switchdev
devlink dev eswitch set pci/0000:08:00.1 mode switchdev
devlink dev eswitch set pci/0000:08:00.2 mode switchdev
devlink dev eswitch set pci/0000:08:00.3 mode switchdev
ip link add name bond0 type bond
ip link set dev bond0 type bond mode 802.3ad
ip link set dev eth2 master bond0
ip link set dev eth3 master bond0
ip link set dev eth4 master bond0
ip link set dev eth5 master bond0

Where eth2, eth3, eth4 and eth5 are net-interfaces of pci/0000:08:00.0
pci/0000:08:00.1 pci/0000:08:00.2 pci/0000:08:00.3 respectively.

User can verify LAG state and type via debugfs:
/sys/kernel/debug/mlx5/0000\:08\:00.0/lag/state
/sys/kernel/debug/mlx5/0000\:08\:00.0/lag/type

[1]
https://lore.kernel.org/netdev/20220510055743.118828-1-saeedm@nvidia.com/

* tag 'mlx5-updates-2023-05-31' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5: Devcom, extend mlx5_devcom_send_event to work with more than two devices
  net/mlx5: Devcom, introduce devcom_for_each_peer_entry
  net/mlx5: E-switch, mark devcom as not ready when all eswitches are unpaired
  net/mlx5: Devcom, Rename paired to ready
  net/mlx5: DR, handle more than one peer domain
  net/mlx5: E-switch, generalize shared FDB creation
  net/mlx5: E-switch, Handle multiple master egress rules
  net/mlx5: E-switch, refactor FDB miss rule add/remove
  net/mlx5: E-switch, enlarge peer miss group table
  net/mlx5e: Handle offloads flows per peer
  net/mlx5e: en_tc, re-factor query route port
  net/mlx5e: rep, store send to vport rules per peer
  net/mlx5e: tc, Refactor peer add/del flow
  net/mlx5e: en_tc, Extend peer flows to a list
====================

Link: https://lore.kernel.org/r/20230602191301.47004-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents c422ac94 e2a82bf8
...@@ -94,13 +94,13 @@ struct mlx5e_tc_flow { ...@@ -94,13 +94,13 @@ struct mlx5e_tc_flow {
* destinations. * destinations.
*/ */
struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS]; struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
struct mlx5e_tc_flow *peer_flow;
struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */ struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
struct list_head hairpin; /* flows sharing the same hairpin */ struct list_head hairpin; /* flows sharing the same hairpin */
struct list_head peer; /* flows with peer flow */ struct list_head peer[MLX5_MAX_PORTS]; /* flows with peer flow */
struct list_head unready; /* flows not ready to be offloaded (e.g struct list_head unready; /* flows not ready to be offloaded (e.g
* due to missing route) * due to missing route)
*/ */
struct list_head peer_flows; /* flows on peer */
struct net_device *orig_dev; /* netdev adding flow first */ struct net_device *orig_dev; /* netdev adding flow first */
int tmp_entry_index; int tmp_entry_index;
struct list_head tmp_list; /* temporary flow list used by neigh update */ struct list_head tmp_list; /* temporary flow list used by neigh update */
......
...@@ -374,7 +374,9 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw, ...@@ -374,7 +374,9 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep) struct mlx5_eswitch_rep *rep)
{ {
struct mlx5e_rep_sq *rep_sq, *tmp; struct mlx5e_rep_sq *rep_sq, *tmp;
struct mlx5e_rep_sq_peer *sq_peer;
struct mlx5e_rep_priv *rpriv; struct mlx5e_rep_priv *rpriv;
unsigned long i;
if (esw->mode != MLX5_ESWITCH_OFFLOADS) if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return; return;
...@@ -382,31 +384,78 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw, ...@@ -382,31 +384,78 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
rpriv = mlx5e_rep_to_rep_priv(rep); rpriv = mlx5e_rep_to_rep_priv(rep);
list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) { list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule); mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
if (rep_sq->send_to_vport_rule_peer) xa_for_each(&rep_sq->sq_peer, i, sq_peer) {
mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule_peer); if (sq_peer->rule)
mlx5_eswitch_del_send_to_vport_rule(sq_peer->rule);
xa_erase(&rep_sq->sq_peer, i);
kfree(sq_peer);
}
xa_destroy(&rep_sq->sq_peer);
list_del(&rep_sq->list); list_del(&rep_sq->list);
kfree(rep_sq); kfree(rep_sq);
} }
} }
static int mlx5e_sqs2vport_add_peers_rules(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep,
struct mlx5_devcom *devcom,
struct mlx5e_rep_sq *rep_sq, int i)
{
struct mlx5_eswitch *peer_esw = NULL;
struct mlx5_flow_handle *flow_rule;
int tmp;
mlx5_devcom_for_each_peer_entry(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
peer_esw, tmp) {
int peer_rule_idx = mlx5_get_dev_index(peer_esw->dev);
struct mlx5e_rep_sq_peer *sq_peer;
int err;
sq_peer = kzalloc(sizeof(*sq_peer), GFP_KERNEL);
if (!sq_peer)
return -ENOMEM;
flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw,
rep, rep_sq->sqn);
if (IS_ERR(flow_rule)) {
kfree(sq_peer);
return PTR_ERR(flow_rule);
}
sq_peer->rule = flow_rule;
sq_peer->peer = peer_esw;
err = xa_insert(&rep_sq->sq_peer, peer_rule_idx, sq_peer, GFP_KERNEL);
if (err) {
kfree(sq_peer);
mlx5_eswitch_del_send_to_vport_rule(flow_rule);
return err;
}
}
return 0;
}
static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, struct mlx5_eswitch_rep *rep,
u32 *sqns_array, int sqns_num) u32 *sqns_array, int sqns_num)
{ {
struct mlx5_eswitch *peer_esw = NULL;
struct mlx5_flow_handle *flow_rule; struct mlx5_flow_handle *flow_rule;
struct mlx5e_rep_priv *rpriv; struct mlx5e_rep_priv *rpriv;
struct mlx5e_rep_sq *rep_sq; struct mlx5e_rep_sq *rep_sq;
struct mlx5_devcom *devcom;
bool devcom_locked = false;
int err; int err;
int i; int i;
if (esw->mode != MLX5_ESWITCH_OFFLOADS) if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return 0; return 0;
devcom = esw->dev->priv.devcom;
rpriv = mlx5e_rep_to_rep_priv(rep); rpriv = mlx5e_rep_to_rep_priv(rep);
if (mlx5_devcom_is_paired(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS)) if (mlx5_devcom_comp_is_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS) &&
peer_esw = mlx5_devcom_get_peer_data(esw->dev->priv.devcom, mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
MLX5_DEVCOM_ESW_OFFLOADS); devcom_locked = true;
for (i = 0; i < sqns_num; i++) { for (i = 0; i < sqns_num; i++) {
rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL); rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
...@@ -426,31 +475,30 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, ...@@ -426,31 +475,30 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
rep_sq->send_to_vport_rule = flow_rule; rep_sq->send_to_vport_rule = flow_rule;
rep_sq->sqn = sqns_array[i]; rep_sq->sqn = sqns_array[i];
if (peer_esw) { xa_init(&rep_sq->sq_peer);
flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw, if (devcom_locked) {
rep, sqns_array[i]); err = mlx5e_sqs2vport_add_peers_rules(esw, rep, devcom, rep_sq, i);
if (IS_ERR(flow_rule)) { if (err) {
err = PTR_ERR(flow_rule);
mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule); mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
xa_destroy(&rep_sq->sq_peer);
kfree(rep_sq); kfree(rep_sq);
goto out_err; goto out_err;
} }
rep_sq->send_to_vport_rule_peer = flow_rule;
} }
list_add(&rep_sq->list, &rpriv->vport_sqs_list); list_add(&rep_sq->list, &rpriv->vport_sqs_list);
} }
if (peer_esw) if (devcom_locked)
mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS); mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
return 0; return 0;
out_err: out_err:
mlx5e_sqs2vport_stop(esw, rep); mlx5e_sqs2vport_stop(esw, rep);
if (peer_esw) if (devcom_locked)
mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS); mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
return err; return err;
} }
...@@ -1530,17 +1578,24 @@ static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep) ...@@ -1530,17 +1578,24 @@ static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
return rpriv->netdev; return rpriv->netdev;
} }
static void mlx5e_vport_rep_event_unpair(struct mlx5_eswitch_rep *rep) static void mlx5e_vport_rep_event_unpair(struct mlx5_eswitch_rep *rep,
struct mlx5_eswitch *peer_esw)
{ {
int i = mlx5_get_dev_index(peer_esw->dev);
struct mlx5e_rep_priv *rpriv; struct mlx5e_rep_priv *rpriv;
struct mlx5e_rep_sq *rep_sq; struct mlx5e_rep_sq *rep_sq;
WARN_ON_ONCE(!peer_esw);
rpriv = mlx5e_rep_to_rep_priv(rep); rpriv = mlx5e_rep_to_rep_priv(rep);
list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) { list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) {
if (!rep_sq->send_to_vport_rule_peer) struct mlx5e_rep_sq_peer *sq_peer = xa_load(&rep_sq->sq_peer, i);
if (!sq_peer || sq_peer->peer != peer_esw)
continue; continue;
mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule_peer);
rep_sq->send_to_vport_rule_peer = NULL; mlx5_eswitch_del_send_to_vport_rule(sq_peer->rule);
xa_erase(&rep_sq->sq_peer, i);
kfree(sq_peer);
} }
} }
...@@ -1548,24 +1603,52 @@ static int mlx5e_vport_rep_event_pair(struct mlx5_eswitch *esw, ...@@ -1548,24 +1603,52 @@ static int mlx5e_vport_rep_event_pair(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, struct mlx5_eswitch_rep *rep,
struct mlx5_eswitch *peer_esw) struct mlx5_eswitch *peer_esw)
{ {
int i = mlx5_get_dev_index(peer_esw->dev);
struct mlx5_flow_handle *flow_rule; struct mlx5_flow_handle *flow_rule;
struct mlx5e_rep_sq_peer *sq_peer;
struct mlx5e_rep_priv *rpriv; struct mlx5e_rep_priv *rpriv;
struct mlx5e_rep_sq *rep_sq; struct mlx5e_rep_sq *rep_sq;
int err;
rpriv = mlx5e_rep_to_rep_priv(rep); rpriv = mlx5e_rep_to_rep_priv(rep);
list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) { list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) {
if (rep_sq->send_to_vport_rule_peer) sq_peer = xa_load(&rep_sq->sq_peer, i);
if (sq_peer && sq_peer->peer)
continue; continue;
flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw, rep, rep_sq->sqn);
if (IS_ERR(flow_rule)) flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw, rep,
rep_sq->sqn);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
goto err_out; goto err_out;
rep_sq->send_to_vport_rule_peer = flow_rule; }
if (sq_peer) {
sq_peer->rule = flow_rule;
sq_peer->peer = peer_esw;
continue;
}
sq_peer = kzalloc(sizeof(*sq_peer), GFP_KERNEL);
if (!sq_peer) {
err = -ENOMEM;
goto err_sq_alloc;
}
err = xa_insert(&rep_sq->sq_peer, i, sq_peer, GFP_KERNEL);
if (err)
goto err_xa;
sq_peer->rule = flow_rule;
sq_peer->peer = peer_esw;
} }
return 0; return 0;
err_xa:
kfree(sq_peer);
err_sq_alloc:
mlx5_eswitch_del_send_to_vport_rule(flow_rule);
err_out: err_out:
mlx5e_vport_rep_event_unpair(rep); mlx5e_vport_rep_event_unpair(rep, peer_esw);
return PTR_ERR(flow_rule); return err;
} }
static int mlx5e_vport_rep_event(struct mlx5_eswitch *esw, static int mlx5e_vport_rep_event(struct mlx5_eswitch *esw,
...@@ -1578,7 +1661,7 @@ static int mlx5e_vport_rep_event(struct mlx5_eswitch *esw, ...@@ -1578,7 +1661,7 @@ static int mlx5e_vport_rep_event(struct mlx5_eswitch *esw,
if (event == MLX5_SWITCHDEV_EVENT_PAIR) if (event == MLX5_SWITCHDEV_EVENT_PAIR)
err = mlx5e_vport_rep_event_pair(esw, rep, data); err = mlx5e_vport_rep_event_pair(esw, rep, data);
else if (event == MLX5_SWITCHDEV_EVENT_UNPAIR) else if (event == MLX5_SWITCHDEV_EVENT_UNPAIR)
mlx5e_vport_rep_event_unpair(rep); mlx5e_vport_rep_event_unpair(rep, data);
return err; return err;
} }
......
...@@ -225,9 +225,14 @@ struct mlx5e_encap_entry { ...@@ -225,9 +225,14 @@ struct mlx5e_encap_entry {
struct rcu_head rcu; struct rcu_head rcu;
}; };
struct mlx5e_rep_sq_peer {
struct mlx5_flow_handle *rule;
void *peer;
};
struct mlx5e_rep_sq { struct mlx5e_rep_sq {
struct mlx5_flow_handle *send_to_vport_rule; struct mlx5_flow_handle *send_to_vport_rule;
struct mlx5_flow_handle *send_to_vport_rule_peer; struct xarray sq_peer;
u32 sqn; u32 sqn;
struct list_head list; struct list_head list;
}; };
......
...@@ -15,13 +15,27 @@ static void esw_acl_egress_ofld_fwd2vport_destroy(struct mlx5_vport *vport) ...@@ -15,13 +15,27 @@ static void esw_acl_egress_ofld_fwd2vport_destroy(struct mlx5_vport *vport)
vport->egress.offloads.fwd_rule = NULL; vport->egress.offloads.fwd_rule = NULL;
} }
static void esw_acl_egress_ofld_bounce_rule_destroy(struct mlx5_vport *vport) void esw_acl_egress_ofld_bounce_rule_destroy(struct mlx5_vport *vport, int rule_index)
{ {
if (!vport->egress.offloads.bounce_rule) struct mlx5_flow_handle *bounce_rule =
xa_load(&vport->egress.offloads.bounce_rules, rule_index);
if (!bounce_rule)
return; return;
mlx5_del_flow_rules(vport->egress.offloads.bounce_rule); mlx5_del_flow_rules(bounce_rule);
vport->egress.offloads.bounce_rule = NULL; xa_erase(&vport->egress.offloads.bounce_rules, rule_index);
}
static void esw_acl_egress_ofld_bounce_rules_destroy(struct mlx5_vport *vport)
{
struct mlx5_flow_handle *bounce_rule;
unsigned long i;
xa_for_each(&vport->egress.offloads.bounce_rules, i, bounce_rule) {
mlx5_del_flow_rules(bounce_rule);
xa_erase(&vport->egress.offloads.bounce_rules, i);
}
} }
static int esw_acl_egress_ofld_fwd2vport_create(struct mlx5_eswitch *esw, static int esw_acl_egress_ofld_fwd2vport_create(struct mlx5_eswitch *esw,
...@@ -96,7 +110,7 @@ static void esw_acl_egress_ofld_rules_destroy(struct mlx5_vport *vport) ...@@ -96,7 +110,7 @@ static void esw_acl_egress_ofld_rules_destroy(struct mlx5_vport *vport)
{ {
esw_acl_egress_vlan_destroy(vport); esw_acl_egress_vlan_destroy(vport);
esw_acl_egress_ofld_fwd2vport_destroy(vport); esw_acl_egress_ofld_fwd2vport_destroy(vport);
esw_acl_egress_ofld_bounce_rule_destroy(vport); esw_acl_egress_ofld_bounce_rules_destroy(vport);
} }
static int esw_acl_egress_ofld_groups_create(struct mlx5_eswitch *esw, static int esw_acl_egress_ofld_groups_create(struct mlx5_eswitch *esw,
...@@ -194,6 +208,7 @@ int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport ...@@ -194,6 +208,7 @@ int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport
vport->egress.acl = NULL; vport->egress.acl = NULL;
return err; return err;
} }
vport->egress.type = VPORT_EGRESS_ACL_TYPE_DEFAULT;
err = esw_acl_egress_ofld_groups_create(esw, vport); err = esw_acl_egress_ofld_groups_create(esw, vport);
if (err) if (err)
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
/* Eswitch acl egress external APIs */ /* Eswitch acl egress external APIs */
int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
void esw_acl_egress_ofld_cleanup(struct mlx5_vport *vport); void esw_acl_egress_ofld_cleanup(struct mlx5_vport *vport);
void esw_acl_egress_ofld_bounce_rule_destroy(struct mlx5_vport *vport, int rule_index);
int mlx5_esw_acl_egress_vport_bond(struct mlx5_eswitch *esw, u16 active_vport_num, int mlx5_esw_acl_egress_vport_bond(struct mlx5_eswitch *esw, u16 active_vport_num,
u16 passive_vport_num); u16 passive_vport_num);
int mlx5_esw_acl_egress_vport_unbond(struct mlx5_eswitch *esw, u16 vport_num); int mlx5_esw_acl_egress_vport_unbond(struct mlx5_eswitch *esw, u16 vport_num);
......
...@@ -647,22 +647,35 @@ mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr, ...@@ -647,22 +647,35 @@ mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
} }
static struct mlx5_flow_handle * static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, const unsigned char *addr, mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
const unsigned char *addr,
struct mlx5_esw_bridge_vlan *vlan, u32 counter_id, struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
struct mlx5_esw_bridge *bridge) struct mlx5_esw_bridge *bridge)
{ {
struct mlx5_devcom *devcom = bridge->br_offloads->esw->dev->priv.devcom; struct mlx5_devcom *devcom = bridge->br_offloads->esw->dev->priv.devcom;
struct mlx5_eswitch *tmp, *peer_esw = NULL;
static struct mlx5_flow_handle *handle; static struct mlx5_flow_handle *handle;
struct mlx5_eswitch *peer_esw; int i;
peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
if (!peer_esw)
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
mlx5_devcom_for_each_peer_entry(devcom,
MLX5_DEVCOM_ESW_OFFLOADS,
tmp, i) {
if (mlx5_esw_is_owner(tmp, vport_num, esw_owner_vhca_id)) {
peer_esw = tmp;
break;
}
}
if (!peer_esw) {
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
return ERR_PTR(-ENODEV);
}
handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id, handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
bridge, peer_esw); bridge, peer_esw);
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
return handle; return handle;
} }
...@@ -1369,8 +1382,9 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow ...@@ -1369,8 +1382,9 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow
entry->ingress_counter = counter; entry->ingress_counter = counter;
handle = peer ? handle = peer ?
mlx5_esw_bridge_ingress_flow_peer_create(vport_num, addr, vlan, mlx5_esw_bridge_ingress_flow_peer_create(vport_num, esw_owner_vhca_id,
mlx5_fc_id(counter), bridge) : addr, vlan, mlx5_fc_id(counter),
bridge) :
mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan, mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan,
mlx5_fc_id(counter), bridge); mlx5_fc_id(counter), bridge);
if (IS_ERR(handle)) { if (IS_ERR(handle)) {
......
...@@ -540,16 +540,29 @@ static struct mlx5_flow_handle * ...@@ -540,16 +540,29 @@ static struct mlx5_flow_handle *
mlx5_esw_bridge_mcast_filter_flow_peer_create(struct mlx5_esw_bridge_port *port) mlx5_esw_bridge_mcast_filter_flow_peer_create(struct mlx5_esw_bridge_port *port)
{ {
struct mlx5_devcom *devcom = port->bridge->br_offloads->esw->dev->priv.devcom; struct mlx5_devcom *devcom = port->bridge->br_offloads->esw->dev->priv.devcom;
struct mlx5_eswitch *tmp, *peer_esw = NULL;
static struct mlx5_flow_handle *handle; static struct mlx5_flow_handle *handle;
struct mlx5_eswitch *peer_esw; int i;
peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
if (!peer_esw)
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
mlx5_devcom_for_each_peer_entry(devcom,
MLX5_DEVCOM_ESW_OFFLOADS,
tmp, i) {
if (mlx5_esw_is_owner(tmp, port->vport_num, port->esw_owner_vhca_id)) {
peer_esw = tmp;
break;
}
}
if (!peer_esw) {
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
return ERR_PTR(-ENODEV);
}
handle = mlx5_esw_bridge_mcast_flow_with_esw_create(port, peer_esw); handle = mlx5_esw_bridge_mcast_flow_with_esw_create(port, peer_esw);
mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
return handle; return handle;
} }
......
...@@ -123,8 +123,14 @@ struct vport_ingress { ...@@ -123,8 +123,14 @@ struct vport_ingress {
} offloads; } offloads;
}; };
enum vport_egress_acl_type {
VPORT_EGRESS_ACL_TYPE_DEFAULT,
VPORT_EGRESS_ACL_TYPE_SHARED_FDB,
};
struct vport_egress { struct vport_egress {
struct mlx5_flow_table *acl; struct mlx5_flow_table *acl;
enum vport_egress_acl_type type;
struct mlx5_flow_handle *allowed_vlan; struct mlx5_flow_handle *allowed_vlan;
struct mlx5_flow_group *vlan_grp; struct mlx5_flow_group *vlan_grp;
union { union {
...@@ -136,7 +142,7 @@ struct vport_egress { ...@@ -136,7 +142,7 @@ struct vport_egress {
struct { struct {
struct mlx5_flow_group *fwd_grp; struct mlx5_flow_group *fwd_grp;
struct mlx5_flow_handle *fwd_rule; struct mlx5_flow_handle *fwd_rule;
struct mlx5_flow_handle *bounce_rule; struct xarray bounce_rules;
struct mlx5_flow_group *bounce_grp; struct mlx5_flow_group *bounce_grp;
} offloads; } offloads;
}; };
...@@ -218,7 +224,7 @@ struct mlx5_eswitch_fdb { ...@@ -218,7 +224,7 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_group *send_to_vport_grp; struct mlx5_flow_group *send_to_vport_grp;
struct mlx5_flow_group *send_to_vport_meta_grp; struct mlx5_flow_group *send_to_vport_meta_grp;
struct mlx5_flow_group *peer_miss_grp; struct mlx5_flow_group *peer_miss_grp;
struct mlx5_flow_handle **peer_miss_rules; struct mlx5_flow_handle **peer_miss_rules[MLX5_MAX_PORTS];
struct mlx5_flow_group *miss_grp; struct mlx5_flow_group *miss_grp;
struct mlx5_flow_handle **send_to_vport_meta_rules; struct mlx5_flow_handle **send_to_vport_meta_rules;
struct mlx5_flow_handle *miss_rule_uni; struct mlx5_flow_handle *miss_rule_uni;
...@@ -249,7 +255,7 @@ struct mlx5_esw_offload { ...@@ -249,7 +255,7 @@ struct mlx5_esw_offload {
struct mlx5_flow_group *vport_rx_drop_group; struct mlx5_flow_group *vport_rx_drop_group;
struct mlx5_flow_handle *vport_rx_drop_rule; struct mlx5_flow_handle *vport_rx_drop_rule;
struct xarray vport_reps; struct xarray vport_reps;
struct list_head peer_flows; struct list_head peer_flows[MLX5_MAX_PORTS];
struct mutex peer_mutex; struct mutex peer_mutex;
struct mutex encap_tbl_lock; /* protects encap_tbl */ struct mutex encap_tbl_lock; /* protects encap_tbl */
DECLARE_HASHTABLE(encap_tbl, 8); DECLARE_HASHTABLE(encap_tbl, 8);
...@@ -337,6 +343,7 @@ struct mlx5_eswitch { ...@@ -337,6 +343,7 @@ struct mlx5_eswitch {
int mode; int mode;
u16 manager_vport; u16 manager_vport;
u16 first_host_vport; u16 first_host_vport;
u8 num_peers;
struct mlx5_esw_functions esw_funcs; struct mlx5_esw_functions esw_funcs;
struct { struct {
u32 large_group_num; u32 large_group_num;
...@@ -578,6 +585,13 @@ mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num) ...@@ -578,6 +585,13 @@ mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num)
return esw->manager_vport == vport_num; return esw->manager_vport == vport_num;
} }
static inline bool mlx5_esw_is_owner(struct mlx5_eswitch *esw, u16 vport_num,
u16 esw_owner_vhca_id)
{
return esw_owner_vhca_id == MLX5_CAP_GEN(esw->dev, vhca_id) ||
(vport_num == MLX5_VPORT_UPLINK && mlx5_lag_is_master(esw->dev));
}
static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev) static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
{ {
return mlx5_core_is_ecpf_esw_manager(dev) ? return mlx5_core_is_ecpf_esw_manager(dev) ?
...@@ -748,9 +762,9 @@ void esw_vport_change_handle_locked(struct mlx5_vport *vport); ...@@ -748,9 +762,9 @@ void esw_vport_change_handle_locked(struct mlx5_vport *vport);
bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller); bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller);
int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw, int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
struct mlx5_eswitch *slave_esw); struct mlx5_eswitch *slave_esw, int max_slaves);
void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw, void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
struct mlx5_eswitch *slave_esw); struct mlx5_eswitch *slave_esw);
int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw); int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
...@@ -802,14 +816,14 @@ mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev, ...@@ -802,14 +816,14 @@ mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
} }
static inline int static inline int
mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw, mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
struct mlx5_eswitch *slave_esw) struct mlx5_eswitch *slave_esw, int max_slaves)
{ {
return 0; return 0;
} }
static inline void static inline void
mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw, mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
struct mlx5_eswitch *slave_esw) {} struct mlx5_eswitch *slave_esw) {}
static inline int static inline int
......
...@@ -139,7 +139,8 @@ static void mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace ...@@ -139,7 +139,8 @@ static void mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace
} }
static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns, static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns) struct mlx5_flow_root_namespace *peer_ns,
u8 peer_idx)
{ {
return 0; return 0;
} }
......
...@@ -93,7 +93,8 @@ struct mlx5_flow_cmds { ...@@ -93,7 +93,8 @@ struct mlx5_flow_cmds {
struct mlx5_modify_hdr *modify_hdr); struct mlx5_modify_hdr *modify_hdr);
int (*set_peer)(struct mlx5_flow_root_namespace *ns, int (*set_peer)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns); struct mlx5_flow_root_namespace *peer_ns,
u8 peer_idx);
int (*create_ns)(struct mlx5_flow_root_namespace *ns); int (*create_ns)(struct mlx5_flow_root_namespace *ns);
int (*destroy_ns)(struct mlx5_flow_root_namespace *ns); int (*destroy_ns)(struct mlx5_flow_root_namespace *ns);
......
...@@ -3620,7 +3620,8 @@ void mlx5_destroy_match_definer(struct mlx5_core_dev *dev, ...@@ -3620,7 +3620,8 @@ void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
} }
int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns, int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns) struct mlx5_flow_root_namespace *peer_ns,
u8 peer_idx)
{ {
if (peer_ns && ns->mode != peer_ns->mode) { if (peer_ns && ns->mode != peer_ns->mode) {
mlx5_core_err(ns->dev, mlx5_core_err(ns->dev,
...@@ -3628,7 +3629,7 @@ int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns, ...@@ -3628,7 +3629,7 @@ int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
return -EINVAL; return -EINVAL;
} }
return ns->cmds->set_peer(ns, peer_ns); return ns->cmds->set_peer(ns, peer_ns, peer_idx);
} }
/* This function should be called only at init stage of the namespace. /* This function should be called only at init stage of the namespace.
......
...@@ -295,7 +295,8 @@ void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev, ...@@ -295,7 +295,8 @@ void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void); const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void);
int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns, int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns); struct mlx5_flow_root_namespace *peer_ns,
u8 peer_idx);
int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns, int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
enum mlx5_flow_steering_mode mode); enum mlx5_flow_steering_mode mode);
......
...@@ -550,6 +550,29 @@ char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags) ...@@ -550,6 +550,29 @@ char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
} }
} }
static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
int err;
int i;
for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
struct mlx5_eswitch *slave_esw = ldev->pf[i].dev->priv.eswitch;
err = mlx5_eswitch_offloads_single_fdb_add_one(master_esw,
slave_esw, ldev->ports);
if (err)
goto err;
}
return 0;
err:
for (; i > MLX5_LAG_P1; i--)
mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
ldev->pf[i].dev->priv.eswitch);
return err;
}
static int mlx5_create_lag(struct mlx5_lag *ldev, static int mlx5_create_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker, struct lag_tracker *tracker,
enum mlx5_lag_mode mode, enum mlx5_lag_mode mode,
...@@ -557,7 +580,6 @@ static int mlx5_create_lag(struct mlx5_lag *ldev, ...@@ -557,7 +580,6 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,
{ {
bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags); bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {}; u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
int err; int err;
...@@ -575,8 +597,7 @@ static int mlx5_create_lag(struct mlx5_lag *ldev, ...@@ -575,8 +597,7 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,
} }
if (shared_fdb) { if (shared_fdb) {
err = mlx5_eswitch_offloads_config_single_fdb(dev0->priv.eswitch, err = mlx5_lag_create_single_fdb(ldev);
dev1->priv.eswitch);
if (err) if (err)
mlx5_core_err(dev0, "Can't enable single FDB mode\n"); mlx5_core_err(dev0, "Can't enable single FDB mode\n");
else else
...@@ -647,19 +668,21 @@ int mlx5_activate_lag(struct mlx5_lag *ldev, ...@@ -647,19 +668,21 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
int mlx5_deactivate_lag(struct mlx5_lag *ldev) int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{ {
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {}; u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
bool roce_lag = __mlx5_lag_is_roce(ldev); bool roce_lag = __mlx5_lag_is_roce(ldev);
unsigned long flags = ldev->mode_flags; unsigned long flags = ldev->mode_flags;
int err; int err;
int i;
ldev->mode = MLX5_LAG_MODE_NONE; ldev->mode = MLX5_LAG_MODE_NONE;
ldev->mode_flags = 0; ldev->mode_flags = 0;
mlx5_lag_mp_reset(ldev); mlx5_lag_mp_reset(ldev);
if (test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags)) { if (test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags)) {
mlx5_eswitch_offloads_destroy_single_fdb(dev0->priv.eswitch, for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++)
dev1->priv.eswitch); mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
ldev->pf[i].dev->priv.eswitch);
clear_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags); clear_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
} }
...@@ -801,8 +824,8 @@ bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev) ...@@ -801,8 +824,8 @@ bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
is_mdev_switchdev_mode(dev1) && is_mdev_switchdev_mode(dev1) &&
mlx5_eswitch_vport_match_metadata_enabled(dev0->priv.eswitch) && mlx5_eswitch_vport_match_metadata_enabled(dev0->priv.eswitch) &&
mlx5_eswitch_vport_match_metadata_enabled(dev1->priv.eswitch) && mlx5_eswitch_vport_match_metadata_enabled(dev1->priv.eswitch) &&
mlx5_devcom_is_paired(dev0->priv.devcom, mlx5_devcom_comp_is_ready(dev0->priv.devcom,
MLX5_DEVCOM_ESW_OFFLOADS) && MLX5_DEVCOM_ESW_OFFLOADS) &&
MLX5_CAP_GEN(dev1, lag_native_fdb_selection) && MLX5_CAP_GEN(dev1, lag_native_fdb_selection) &&
MLX5_CAP_ESW(dev1, root_ft_on_other_esw) && MLX5_CAP_ESW(dev1, root_ft_on_other_esw) &&
MLX5_CAP_ESW(dev0, esw_shared_ingress_acl)) MLX5_CAP_ESW(dev0, esw_shared_ingress_acl))
......
...@@ -19,7 +19,7 @@ struct mlx5_devcom_component { ...@@ -19,7 +19,7 @@ struct mlx5_devcom_component {
mlx5_devcom_event_handler_t handler; mlx5_devcom_event_handler_t handler;
struct rw_semaphore sem; struct rw_semaphore sem;
bool paired; bool ready;
}; };
struct mlx5_devcom_list { struct mlx5_devcom_list {
...@@ -193,7 +193,7 @@ void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom, ...@@ -193,7 +193,7 @@ void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom,
int mlx5_devcom_send_event(struct mlx5_devcom *devcom, int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id, enum mlx5_devcom_components id,
int event, int event, int rollback_event,
void *event_data) void *event_data)
{ {
struct mlx5_devcom_component *comp; struct mlx5_devcom_component *comp;
...@@ -210,84 +210,134 @@ int mlx5_devcom_send_event(struct mlx5_devcom *devcom, ...@@ -210,84 +210,134 @@ int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
if (i != devcom->idx && data) { if (i != devcom->idx && data) {
err = comp->handler(event, data, event_data); err = comp->handler(event, data, event_data);
break; if (err)
goto rollback;
} }
} }
up_write(&comp->sem);
return 0;
rollback:
while (i--) {
void *data = rcu_dereference_protected(comp->device[i].data,
lockdep_is_held(&comp->sem));
if (i != devcom->idx && data)
comp->handler(rollback_event, data, event_data);
}
up_write(&comp->sem); up_write(&comp->sem);
return err; return err;
} }
void mlx5_devcom_set_paired(struct mlx5_devcom *devcom, void mlx5_devcom_comp_set_ready(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id, enum mlx5_devcom_components id,
bool paired) bool ready)
{ {
struct mlx5_devcom_component *comp; struct mlx5_devcom_component *comp;
comp = &devcom->priv->components[id]; comp = &devcom->priv->components[id];
WARN_ON(!rwsem_is_locked(&comp->sem)); WARN_ON(!rwsem_is_locked(&comp->sem));
WRITE_ONCE(comp->paired, paired); WRITE_ONCE(comp->ready, ready);
} }
bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom, bool mlx5_devcom_comp_is_ready(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id) enum mlx5_devcom_components id)
{ {
if (IS_ERR_OR_NULL(devcom)) if (IS_ERR_OR_NULL(devcom))
return false; return false;
return READ_ONCE(devcom->priv->components[id].paired); return READ_ONCE(devcom->priv->components[id].ready);
} }
void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom, bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id) enum mlx5_devcom_components id)
{ {
struct mlx5_devcom_component *comp; struct mlx5_devcom_component *comp;
int i;
if (IS_ERR_OR_NULL(devcom)) if (IS_ERR_OR_NULL(devcom))
return NULL; return false;
comp = &devcom->priv->components[id]; comp = &devcom->priv->components[id];
down_read(&comp->sem); down_read(&comp->sem);
if (!READ_ONCE(comp->paired)) { if (!READ_ONCE(comp->ready)) {
up_read(&comp->sem); up_read(&comp->sem);
return NULL; return false;
} }
for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) return true;
if (i != devcom->idx) }
break;
void mlx5_devcom_for_each_peer_end(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id)
{
struct mlx5_devcom_component *comp = &devcom->priv->components[id];
return rcu_dereference_protected(comp->device[i].data, lockdep_is_held(&comp->sem)); up_read(&comp->sem);
} }
void *mlx5_devcom_get_peer_data_rcu(struct mlx5_devcom *devcom, enum mlx5_devcom_components id) void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id,
int *i)
{ {
struct mlx5_devcom_component *comp; struct mlx5_devcom_component *comp;
int i; void *ret;
int idx;
if (IS_ERR_OR_NULL(devcom)) comp = &devcom->priv->components[id];
return NULL;
for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) if (*i == MLX5_DEVCOM_PORTS_SUPPORTED)
if (i != devcom->idx) return NULL;
break; for (idx = *i; idx < MLX5_DEVCOM_PORTS_SUPPORTED; idx++) {
if (idx != devcom->idx) {
ret = rcu_dereference_protected(comp->device[idx].data,
lockdep_is_held(&comp->sem));
if (ret)
break;
}
}
comp = &devcom->priv->components[id]; if (idx == MLX5_DEVCOM_PORTS_SUPPORTED) {
/* This can change concurrently, however 'data' pointer will remain *i = idx;
* valid for the duration of RCU read section.
*/
if (!READ_ONCE(comp->paired))
return NULL; return NULL;
}
*i = idx + 1;
return rcu_dereference(comp->device[i].data); return ret;
} }
void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom, void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id) enum mlx5_devcom_components id,
int *i)
{ {
struct mlx5_devcom_component *comp = &devcom->priv->components[id]; struct mlx5_devcom_component *comp;
void *ret;
int idx;
up_read(&comp->sem); comp = &devcom->priv->components[id];
if (*i == MLX5_DEVCOM_PORTS_SUPPORTED)
return NULL;
for (idx = *i; idx < MLX5_DEVCOM_PORTS_SUPPORTED; idx++) {
if (idx != devcom->idx) {
/* This can change concurrently, however 'data' pointer will remain
* valid for the duration of RCU read section.
*/
if (!READ_ONCE(comp->ready))
return NULL;
ret = rcu_dereference(comp->device[idx].data);
if (ret)
break;
}
}
if (idx == MLX5_DEVCOM_PORTS_SUPPORTED) {
*i = idx;
return NULL;
}
*i = idx + 1;
return ret;
} }
...@@ -30,20 +30,33 @@ void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom, ...@@ -30,20 +30,33 @@ void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom,
int mlx5_devcom_send_event(struct mlx5_devcom *devcom, int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id, enum mlx5_devcom_components id,
int event, int event, int rollback_event,
void *event_data); void *event_data);
void mlx5_devcom_set_paired(struct mlx5_devcom *devcom, void mlx5_devcom_comp_set_ready(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id, enum mlx5_devcom_components id,
bool paired); bool ready);
bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom, bool mlx5_devcom_comp_is_ready(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id); enum mlx5_devcom_components id);
void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom, bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id); enum mlx5_devcom_components id);
void *mlx5_devcom_get_peer_data_rcu(struct mlx5_devcom *devcom, enum mlx5_devcom_components id); void mlx5_devcom_for_each_peer_end(struct mlx5_devcom *devcom,
void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id); enum mlx5_devcom_components id);
void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id, int *i);
#endif #define mlx5_devcom_for_each_peer_entry(devcom, id, data, i) \
for (i = 0, data = mlx5_devcom_get_next_peer_data(devcom, id, &i); \
data; \
data = mlx5_devcom_get_next_peer_data(devcom, id, &i))
void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id, int *i);
#define mlx5_devcom_for_each_peer_entry_rcu(devcom, id, data, i) \
for (i = 0, data = mlx5_devcom_get_next_peer_data_rcu(devcom, id, &i); \
data; \
data = mlx5_devcom_get_next_peer_data_rcu(devcom, id, &i))
#endif
...@@ -2071,8 +2071,9 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn, ...@@ -2071,8 +2071,9 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
struct mlx5dr_action *action; struct mlx5dr_action *action;
u8 peer_vport; u8 peer_vport;
peer_vport = vhca_id_valid && (vhca_id != dmn->info.caps.gvmi); peer_vport = vhca_id_valid && mlx5_core_is_pf(dmn->mdev) &&
vport_dmn = peer_vport ? dmn->peer_dmn : dmn; (vhca_id != dmn->info.caps.gvmi);
vport_dmn = peer_vport ? dmn->peer_dmn[vhca_id] : dmn;
if (!vport_dmn) { if (!vport_dmn) {
mlx5dr_dbg(dmn, "No peer vport domain for given vhca_id\n"); mlx5dr_dbg(dmn, "No peer vport domain for given vhca_id\n");
return NULL; return NULL;
......
...@@ -555,17 +555,18 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn) ...@@ -555,17 +555,18 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
} }
void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn, void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn) struct mlx5dr_domain *peer_dmn,
u8 peer_idx)
{ {
mlx5dr_domain_lock(dmn); mlx5dr_domain_lock(dmn);
if (dmn->peer_dmn) if (dmn->peer_dmn[peer_idx])
refcount_dec(&dmn->peer_dmn->refcount); refcount_dec(&dmn->peer_dmn[peer_idx]->refcount);
dmn->peer_dmn = peer_dmn; dmn->peer_dmn[peer_idx] = peer_dmn;
if (dmn->peer_dmn) if (dmn->peer_dmn[peer_idx])
refcount_inc(&dmn->peer_dmn->refcount); refcount_inc(&dmn->peer_dmn[peer_idx]->refcount);
mlx5dr_domain_unlock(dmn); mlx5dr_domain_unlock(dmn);
} }
...@@ -1647,6 +1647,7 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, ...@@ -1647,6 +1647,7 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
u8 *tag) u8 *tag)
{ {
struct mlx5dr_match_misc *misc = &value->misc; struct mlx5dr_match_misc *misc = &value->misc;
int id = misc->source_eswitch_owner_vhca_id;
struct mlx5dr_cmd_vport_cap *vport_cap; struct mlx5dr_cmd_vport_cap *vport_cap;
struct mlx5dr_domain *dmn = sb->dmn; struct mlx5dr_domain *dmn = sb->dmn;
struct mlx5dr_domain *vport_dmn; struct mlx5dr_domain *vport_dmn;
...@@ -1657,11 +1658,11 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, ...@@ -1657,11 +1658,11 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
if (sb->vhca_id_valid) { if (sb->vhca_id_valid) {
/* Find port GVMI based on the eswitch_owner_vhca_id */ /* Find port GVMI based on the eswitch_owner_vhca_id */
if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi) if (id == dmn->info.caps.gvmi)
vport_dmn = dmn; vport_dmn = dmn;
else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id == else if (id < MLX5_MAX_PORTS && dmn->peer_dmn[id] &&
dmn->peer_dmn->info.caps.gvmi)) (id == dmn->peer_dmn[id]->info.caps.gvmi))
vport_dmn = dmn->peer_dmn; vport_dmn = dmn->peer_dmn[id];
else else
return -EINVAL; return -EINVAL;
......
...@@ -1979,6 +1979,7 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, ...@@ -1979,6 +1979,7 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
u8 *tag) u8 *tag)
{ {
struct mlx5dr_match_misc *misc = &value->misc; struct mlx5dr_match_misc *misc = &value->misc;
int id = misc->source_eswitch_owner_vhca_id;
struct mlx5dr_cmd_vport_cap *vport_cap; struct mlx5dr_cmd_vport_cap *vport_cap;
struct mlx5dr_domain *dmn = sb->dmn; struct mlx5dr_domain *dmn = sb->dmn;
struct mlx5dr_domain *vport_dmn; struct mlx5dr_domain *vport_dmn;
...@@ -1988,11 +1989,11 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, ...@@ -1988,11 +1989,11 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
if (sb->vhca_id_valid) { if (sb->vhca_id_valid) {
/* Find port GVMI based on the eswitch_owner_vhca_id */ /* Find port GVMI based on the eswitch_owner_vhca_id */
if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi) if (id == dmn->info.caps.gvmi)
vport_dmn = dmn; vport_dmn = dmn;
else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id == else if (id < MLX5_MAX_PORTS && dmn->peer_dmn[id] &&
dmn->peer_dmn->info.caps.gvmi)) (id == dmn->peer_dmn[id]->info.caps.gvmi))
vport_dmn = dmn->peer_dmn; vport_dmn = dmn->peer_dmn[id];
else else
return -EINVAL; return -EINVAL;
......
...@@ -935,7 +935,7 @@ struct mlx5dr_domain_info { ...@@ -935,7 +935,7 @@ struct mlx5dr_domain_info {
}; };
struct mlx5dr_domain { struct mlx5dr_domain {
struct mlx5dr_domain *peer_dmn; struct mlx5dr_domain *peer_dmn[MLX5_MAX_PORTS];
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
u32 pdn; u32 pdn;
struct mlx5_uars_page *uar; struct mlx5_uars_page *uar;
......
...@@ -770,14 +770,15 @@ static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns, ...@@ -770,14 +770,15 @@ static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns,
} }
static int mlx5_cmd_dr_set_peer(struct mlx5_flow_root_namespace *ns, static int mlx5_cmd_dr_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns) struct mlx5_flow_root_namespace *peer_ns,
u8 peer_idx)
{ {
struct mlx5dr_domain *peer_domain = NULL; struct mlx5dr_domain *peer_domain = NULL;
if (peer_ns) if (peer_ns)
peer_domain = peer_ns->fs_dr_domain.dr_domain; peer_domain = peer_ns->fs_dr_domain.dr_domain;
mlx5dr_domain_set_peer(ns->fs_dr_domain.dr_domain, mlx5dr_domain_set_peer(ns->fs_dr_domain.dr_domain,
peer_domain); peer_domain, peer_idx);
return 0; return 0;
} }
......
...@@ -48,7 +48,8 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *domain); ...@@ -48,7 +48,8 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *domain);
int mlx5dr_domain_sync(struct mlx5dr_domain *domain, u32 flags); int mlx5dr_domain_sync(struct mlx5dr_domain *domain, u32 flags);
void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn, void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn); struct mlx5dr_domain *peer_dmn,
u8 peer_idx);
struct mlx5dr_table * struct mlx5dr_table *
mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags, mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment