Commit 88d162b4 authored by Roi Dayan's avatar Roi Dayan Committed by Saeed Mahameed

net/mlx5: Devcom, Infrastructure changes

Update devcom infrastructure to be more generic, without
depending on max supported ports definition or a device guid,
and also more encapsulated so callers don't need to pass
the register devcom component id per event call.
Signed-off-by: default avatarEli Cohen <elic@nvidia.com>
Signed-off-by: default avatarRoi Dayan <roid@nvidia.com>
Reviewed-by: default avatarShay Drory <shayd@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 02ceda65
...@@ -399,15 +399,13 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw, ...@@ -399,15 +399,13 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
} }
static int mlx5e_sqs2vport_add_peers_rules(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep, static int mlx5e_sqs2vport_add_peers_rules(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep,
struct mlx5_devcom *devcom,
struct mlx5e_rep_sq *rep_sq, int i) struct mlx5e_rep_sq *rep_sq, int i)
{ {
struct mlx5_eswitch *peer_esw = NULL;
struct mlx5_flow_handle *flow_rule; struct mlx5_flow_handle *flow_rule;
int tmp; struct mlx5_devcom_comp_dev *tmp;
struct mlx5_eswitch *peer_esw;
mlx5_devcom_for_each_peer_entry(devcom, MLX5_DEVCOM_ESW_OFFLOADS, mlx5_devcom_for_each_peer_entry(esw->devcom, peer_esw, tmp) {
peer_esw, tmp) {
u16 peer_rule_idx = MLX5_CAP_GEN(peer_esw->dev, vhca_id); u16 peer_rule_idx = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
struct mlx5e_rep_sq_peer *sq_peer; struct mlx5e_rep_sq_peer *sq_peer;
int err; int err;
...@@ -443,7 +441,6 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, ...@@ -443,7 +441,6 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *flow_rule; struct mlx5_flow_handle *flow_rule;
struct mlx5e_rep_priv *rpriv; struct mlx5e_rep_priv *rpriv;
struct mlx5e_rep_sq *rep_sq; struct mlx5e_rep_sq *rep_sq;
struct mlx5_devcom *devcom;
bool devcom_locked = false; bool devcom_locked = false;
int err; int err;
int i; int i;
...@@ -451,10 +448,10 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, ...@@ -451,10 +448,10 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
if (esw->mode != MLX5_ESWITCH_OFFLOADS) if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return 0; return 0;
devcom = esw->dev->priv.devcom;
rpriv = mlx5e_rep_to_rep_priv(rep); rpriv = mlx5e_rep_to_rep_priv(rep);
if (mlx5_devcom_comp_is_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS) &&
mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) if (mlx5_devcom_comp_is_ready(esw->devcom) &&
mlx5_devcom_for_each_peer_begin(esw->devcom))
devcom_locked = true; devcom_locked = true;
for (i = 0; i < sqns_num; i++) { for (i = 0; i < sqns_num; i++) {
...@@ -477,7 +474,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, ...@@ -477,7 +474,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
xa_init(&rep_sq->sq_peer); xa_init(&rep_sq->sq_peer);
if (devcom_locked) { if (devcom_locked) {
err = mlx5e_sqs2vport_add_peers_rules(esw, rep, devcom, rep_sq, i); err = mlx5e_sqs2vport_add_peers_rules(esw, rep, rep_sq, i);
if (err) { if (err) {
mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule); mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
xa_destroy(&rep_sq->sq_peer); xa_destroy(&rep_sq->sq_peer);
...@@ -490,7 +487,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, ...@@ -490,7 +487,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
} }
if (devcom_locked) if (devcom_locked)
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); mlx5_devcom_for_each_peer_end(esw->devcom);
return 0; return 0;
...@@ -498,7 +495,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, ...@@ -498,7 +495,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
mlx5e_sqs2vport_stop(esw, rep); mlx5e_sqs2vport_stop(esw, rep);
if (devcom_locked) if (devcom_locked)
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); mlx5_devcom_for_each_peer_end(esw->devcom);
return err; return err;
} }
......
...@@ -1668,11 +1668,10 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro ...@@ -1668,11 +1668,10 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
{ {
struct mlx5e_priv *out_priv, *route_priv; struct mlx5e_priv *out_priv, *route_priv;
struct mlx5_core_dev *route_mdev; struct mlx5_core_dev *route_mdev;
struct mlx5_devcom *devcom; struct mlx5_devcom_comp_dev *pos;
struct mlx5_eswitch *esw; struct mlx5_eswitch *esw;
u16 vhca_id; u16 vhca_id;
int err; int err;
int i;
out_priv = netdev_priv(out_dev); out_priv = netdev_priv(out_dev);
esw = out_priv->mdev->priv.eswitch; esw = out_priv->mdev->priv.eswitch;
...@@ -1688,10 +1687,8 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro ...@@ -1688,10 +1687,8 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
return err; return err;
rcu_read_lock(); rcu_read_lock();
devcom = out_priv->mdev->priv.devcom;
err = -ENODEV; err = -ENODEV;
mlx5_devcom_for_each_peer_entry_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS, mlx5_devcom_for_each_peer_entry_rcu(esw->devcom, esw, pos) {
esw, i) {
err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport); err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
if (!err) if (!err)
break; break;
...@@ -2031,15 +2028,15 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, ...@@ -2031,15 +2028,15 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow) struct mlx5e_tc_flow *flow)
{ {
if (mlx5e_is_eswitch_flow(flow)) { if (mlx5e_is_eswitch_flow(flow)) {
struct mlx5_devcom *devcom = flow->priv->mdev->priv.devcom; struct mlx5_devcom_comp_dev *devcom = flow->priv->mdev->priv.eswitch->devcom;
if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) { if (!mlx5_devcom_for_each_peer_begin(devcom)) {
mlx5e_tc_del_fdb_flow(priv, flow); mlx5e_tc_del_fdb_flow(priv, flow);
return; return;
} }
mlx5e_tc_del_fdb_peers_flow(flow); mlx5e_tc_del_fdb_peers_flow(flow);
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); mlx5_devcom_for_each_peer_end(devcom);
mlx5e_tc_del_fdb_flow(priv, flow); mlx5e_tc_del_fdb_flow(priv, flow);
} else { } else {
mlx5e_tc_del_nic_flow(priv, flow); mlx5e_tc_del_nic_flow(priv, flow);
...@@ -4216,8 +4213,7 @@ static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow) ...@@ -4216,8 +4213,7 @@ static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
flow_flag_test(flow, INGRESS); flow_flag_test(flow, INGRESS);
bool act_is_encap = !!(attr->action & bool act_is_encap = !!(attr->action &
MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT); MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
bool esw_paired = mlx5_devcom_comp_is_ready(esw_attr->in_mdev->priv.devcom, bool esw_paired = mlx5_devcom_comp_is_ready(esw_attr->in_mdev->priv.eswitch->devcom);
MLX5_DEVCOM_ESW_OFFLOADS);
if (!esw_paired) if (!esw_paired)
return false; return false;
...@@ -4471,14 +4467,13 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -4471,14 +4467,13 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct net_device *filter_dev, struct net_device *filter_dev,
struct mlx5e_tc_flow **__flow) struct mlx5e_tc_flow **__flow)
{ {
struct mlx5_devcom *devcom = priv->mdev->priv.devcom; struct mlx5_devcom_comp_dev *devcom = priv->mdev->priv.eswitch->devcom, *pos;
struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *in_rep = rpriv->rep; struct mlx5_eswitch_rep *in_rep = rpriv->rep;
struct mlx5_core_dev *in_mdev = priv->mdev; struct mlx5_core_dev *in_mdev = priv->mdev;
struct mlx5_eswitch *peer_esw; struct mlx5_eswitch *peer_esw;
struct mlx5e_tc_flow *flow; struct mlx5e_tc_flow *flow;
int err; int err;
int i;
flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep, flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
in_mdev); in_mdev);
...@@ -4490,27 +4485,25 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -4490,27 +4485,25 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
return 0; return 0;
} }
if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) { if (!mlx5_devcom_for_each_peer_begin(devcom)) {
err = -ENODEV; err = -ENODEV;
goto clean_flow; goto clean_flow;
} }
mlx5_devcom_for_each_peer_entry(devcom, mlx5_devcom_for_each_peer_entry(devcom, peer_esw, pos) {
MLX5_DEVCOM_ESW_OFFLOADS,
peer_esw, i) {
err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags, peer_esw); err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags, peer_esw);
if (err) if (err)
goto peer_clean; goto peer_clean;
} }
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); mlx5_devcom_for_each_peer_end(devcom);
*__flow = flow; *__flow = flow;
return 0; return 0;
peer_clean: peer_clean:
mlx5e_tc_del_fdb_peers_flow(flow); mlx5e_tc_del_fdb_peers_flow(flow);
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); mlx5_devcom_for_each_peer_end(devcom);
clean_flow: clean_flow:
mlx5e_tc_del_fdb_flow(priv, flow); mlx5e_tc_del_fdb_flow(priv, flow);
return err; return err;
...@@ -4728,7 +4721,7 @@ int mlx5e_tc_fill_action_stats(struct mlx5e_priv *priv, ...@@ -4728,7 +4721,7 @@ int mlx5e_tc_fill_action_stats(struct mlx5e_priv *priv,
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags) struct flow_cls_offload *f, unsigned long flags)
{ {
struct mlx5_devcom *devcom = priv->mdev->priv.devcom; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct rhashtable *tc_ht = get_tc_ht(priv, flags); struct rhashtable *tc_ht = get_tc_ht(priv, flags);
struct mlx5e_tc_flow *flow; struct mlx5e_tc_flow *flow;
struct mlx5_fc *counter; struct mlx5_fc *counter;
...@@ -4764,7 +4757,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, ...@@ -4764,7 +4757,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
/* Under multipath it's possible for one rule to be currently /* Under multipath it's possible for one rule to be currently
* un-offloaded while the other rule is offloaded. * un-offloaded while the other rule is offloaded.
*/ */
if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) if (esw && !mlx5_devcom_for_each_peer_begin(esw->devcom))
goto out; goto out;
if (flow_flag_test(flow, DUP)) { if (flow_flag_test(flow, DUP)) {
...@@ -4795,7 +4788,8 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, ...@@ -4795,7 +4788,8 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
} }
no_peer_counter: no_peer_counter:
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); if (esw)
mlx5_devcom_for_each_peer_end(esw->devcom);
out: out:
flow_stats_update(&f->stats, bytes, packets, 0, lastuse, flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
FLOW_ACTION_HW_STATS_DELAYED); FLOW_ACTION_HW_STATS_DELAYED);
......
...@@ -652,30 +652,30 @@ mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id, ...@@ -652,30 +652,30 @@ mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
struct mlx5_esw_bridge_vlan *vlan, u32 counter_id, struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
struct mlx5_esw_bridge *bridge) struct mlx5_esw_bridge *bridge)
{ {
struct mlx5_devcom *devcom = bridge->br_offloads->esw->dev->priv.devcom; struct mlx5_devcom_comp_dev *devcom = bridge->br_offloads->esw->devcom, *pos;
struct mlx5_eswitch *tmp, *peer_esw = NULL; struct mlx5_eswitch *tmp, *peer_esw = NULL;
static struct mlx5_flow_handle *handle; static struct mlx5_flow_handle *handle;
int i;
if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) if (!mlx5_devcom_for_each_peer_begin(devcom))
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
mlx5_devcom_for_each_peer_entry(devcom, mlx5_devcom_for_each_peer_entry(devcom, tmp, pos) {
MLX5_DEVCOM_ESW_OFFLOADS,
tmp, i) {
if (mlx5_esw_is_owner(tmp, vport_num, esw_owner_vhca_id)) { if (mlx5_esw_is_owner(tmp, vport_num, esw_owner_vhca_id)) {
peer_esw = tmp; peer_esw = tmp;
break; break;
} }
} }
if (!peer_esw) { if (!peer_esw) {
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); handle = ERR_PTR(-ENODEV);
return ERR_PTR(-ENODEV); goto out;
} }
handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id, handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
bridge, peer_esw); bridge, peer_esw);
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
out:
mlx5_devcom_for_each_peer_end(devcom);
return handle; return handle;
} }
...@@ -1391,8 +1391,8 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow ...@@ -1391,8 +1391,8 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow
mlx5_fc_id(counter), bridge); mlx5_fc_id(counter), bridge);
if (IS_ERR(handle)) { if (IS_ERR(handle)) {
err = PTR_ERR(handle); err = PTR_ERR(handle);
esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d)\n", esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d,peer=%d)\n",
vport_num, err); vport_num, err, peer);
goto err_ingress_flow_create; goto err_ingress_flow_create;
} }
entry->ingress_handle = handle; entry->ingress_handle = handle;
......
...@@ -539,30 +539,29 @@ mlx5_esw_bridge_mcast_filter_flow_create(struct mlx5_esw_bridge_port *port) ...@@ -539,30 +539,29 @@ mlx5_esw_bridge_mcast_filter_flow_create(struct mlx5_esw_bridge_port *port)
static struct mlx5_flow_handle * static struct mlx5_flow_handle *
mlx5_esw_bridge_mcast_filter_flow_peer_create(struct mlx5_esw_bridge_port *port) mlx5_esw_bridge_mcast_filter_flow_peer_create(struct mlx5_esw_bridge_port *port)
{ {
struct mlx5_devcom *devcom = port->bridge->br_offloads->esw->dev->priv.devcom; struct mlx5_devcom_comp_dev *devcom = port->bridge->br_offloads->esw->devcom, *pos;
struct mlx5_eswitch *tmp, *peer_esw = NULL; struct mlx5_eswitch *tmp, *peer_esw = NULL;
static struct mlx5_flow_handle *handle; static struct mlx5_flow_handle *handle;
int i;
if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) if (!mlx5_devcom_for_each_peer_begin(devcom))
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
mlx5_devcom_for_each_peer_entry(devcom, mlx5_devcom_for_each_peer_entry(devcom, tmp, pos) {
MLX5_DEVCOM_ESW_OFFLOADS,
tmp, i) {
if (mlx5_esw_is_owner(tmp, port->vport_num, port->esw_owner_vhca_id)) { if (mlx5_esw_is_owner(tmp, port->vport_num, port->esw_owner_vhca_id)) {
peer_esw = tmp; peer_esw = tmp;
break; break;
} }
} }
if (!peer_esw) { if (!peer_esw) {
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); handle = ERR_PTR(-ENODEV);
return ERR_PTR(-ENODEV); goto out;
} }
handle = mlx5_esw_bridge_mcast_flow_with_esw_create(port, peer_esw); handle = mlx5_esw_bridge_mcast_flow_with_esw_create(port, peer_esw);
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); out:
mlx5_devcom_for_each_peer_end(devcom);
return handle; return handle;
} }
......
...@@ -354,6 +354,7 @@ struct mlx5_eswitch { ...@@ -354,6 +354,7 @@ struct mlx5_eswitch {
} params; } params;
struct blocking_notifier_head n_head; struct blocking_notifier_head n_head;
struct xarray paired; struct xarray paired;
struct mlx5_devcom_comp_dev *devcom;
}; };
void esw_offloads_disable(struct mlx5_eswitch *esw); void esw_offloads_disable(struct mlx5_eswitch *esw);
...@@ -383,6 +384,7 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw); ...@@ -383,6 +384,7 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
void mlx5_eswitch_disable(struct mlx5_eswitch *esw); void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw); void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw);
void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw); void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw);
bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
u16 vport, const u8 *mac); u16 vport, const u8 *mac);
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
...@@ -818,6 +820,7 @@ static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool cle ...@@ -818,6 +820,7 @@ static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool cle
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {} static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) {} static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) {}
static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {} static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {}
static inline bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw) { return false; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
static inline static inline
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; } int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
......
...@@ -2811,7 +2811,6 @@ static int mlx5_esw_offloads_devcom_event(int event, ...@@ -2811,7 +2811,6 @@ static int mlx5_esw_offloads_devcom_event(int event,
void *event_data) void *event_data)
{ {
struct mlx5_eswitch *esw = my_data; struct mlx5_eswitch *esw = my_data;
struct mlx5_devcom *devcom = esw->dev->priv.devcom;
struct mlx5_eswitch *peer_esw = event_data; struct mlx5_eswitch *peer_esw = event_data;
u16 esw_i, peer_esw_i; u16 esw_i, peer_esw_i;
bool esw_paired; bool esw_paired;
...@@ -2833,6 +2832,7 @@ static int mlx5_esw_offloads_devcom_event(int event, ...@@ -2833,6 +2832,7 @@ static int mlx5_esw_offloads_devcom_event(int event,
err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true); err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
if (err) if (err)
goto err_out; goto err_out;
err = mlx5_esw_offloads_pair(esw, peer_esw); err = mlx5_esw_offloads_pair(esw, peer_esw);
if (err) if (err)
goto err_peer; goto err_peer;
...@@ -2851,7 +2851,7 @@ static int mlx5_esw_offloads_devcom_event(int event, ...@@ -2851,7 +2851,7 @@ static int mlx5_esw_offloads_devcom_event(int event,
esw->num_peers++; esw->num_peers++;
peer_esw->num_peers++; peer_esw->num_peers++;
mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true); mlx5_devcom_comp_set_ready(esw->devcom, true);
break; break;
case ESW_OFFLOADS_DEVCOM_UNPAIR: case ESW_OFFLOADS_DEVCOM_UNPAIR:
...@@ -2861,7 +2861,7 @@ static int mlx5_esw_offloads_devcom_event(int event, ...@@ -2861,7 +2861,7 @@ static int mlx5_esw_offloads_devcom_event(int event,
peer_esw->num_peers--; peer_esw->num_peers--;
esw->num_peers--; esw->num_peers--;
if (!esw->num_peers && !peer_esw->num_peers) if (!esw->num_peers && !peer_esw->num_peers)
mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false); mlx5_devcom_comp_set_ready(esw->devcom, false);
xa_erase(&peer_esw->paired, esw_i); xa_erase(&peer_esw->paired, esw_i);
xa_erase(&esw->paired, peer_esw_i); xa_erase(&esw->paired, peer_esw_i);
mlx5_esw_offloads_unpair(peer_esw, esw); mlx5_esw_offloads_unpair(peer_esw, esw);
...@@ -2888,7 +2888,7 @@ static int mlx5_esw_offloads_devcom_event(int event, ...@@ -2888,7 +2888,7 @@ static int mlx5_esw_offloads_devcom_event(int event,
void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
{ {
struct mlx5_devcom *devcom = esw->dev->priv.devcom; u64 guid;
int i; int i;
for (i = 0; i < MLX5_MAX_PORTS; i++) for (i = 0; i < MLX5_MAX_PORTS; i++)
...@@ -2902,34 +2902,41 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) ...@@ -2902,34 +2902,41 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
return; return;
xa_init(&esw->paired); xa_init(&esw->paired);
mlx5_devcom_register_component(devcom, guid = mlx5_query_nic_system_image_guid(esw->dev);
MLX5_DEVCOM_ESW_OFFLOADS,
mlx5_esw_offloads_devcom_event,
esw);
esw->num_peers = 0; esw->num_peers = 0;
mlx5_devcom_send_event(devcom, esw->devcom = mlx5_devcom_register_component(esw->dev->priv.devc,
MLX5_DEVCOM_ESW_OFFLOADS, MLX5_DEVCOM_ESW_OFFLOADS,
guid,
mlx5_esw_offloads_devcom_event,
esw);
if (IS_ERR_OR_NULL(esw->devcom))
return;
mlx5_devcom_send_event(esw->devcom,
ESW_OFFLOADS_DEVCOM_PAIR, ESW_OFFLOADS_DEVCOM_PAIR,
ESW_OFFLOADS_DEVCOM_UNPAIR, esw); ESW_OFFLOADS_DEVCOM_UNPAIR,
esw);
} }
void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
{ {
struct mlx5_devcom *devcom = esw->dev->priv.devcom; if (IS_ERR_OR_NULL(esw->devcom))
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
return; return;
if (!mlx5_lag_is_supported(esw->dev)) mlx5_devcom_send_event(esw->devcom,
return; ESW_OFFLOADS_DEVCOM_UNPAIR,
mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
ESW_OFFLOADS_DEVCOM_UNPAIR, ESW_OFFLOADS_DEVCOM_UNPAIR,
ESW_OFFLOADS_DEVCOM_UNPAIR, esw); esw);
mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS); mlx5_devcom_unregister_component(esw->devcom);
xa_destroy(&esw->paired); xa_destroy(&esw->paired);
esw->devcom = NULL;
}
bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw)
{
return mlx5_devcom_comp_is_ready(esw->devcom);
} }
bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw) bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
......
...@@ -835,7 +835,7 @@ static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev) ...@@ -835,7 +835,7 @@ static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
dev = ldev->pf[MLX5_LAG_P1].dev; dev = ldev->pf[MLX5_LAG_P1].dev;
if (is_mdev_switchdev_mode(dev) && if (is_mdev_switchdev_mode(dev) &&
mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) && mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
mlx5_devcom_comp_is_ready(dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS) && mlx5_esw_offloads_devcom_is_ready(dev->priv.eswitch) &&
MLX5_CAP_ESW(dev, esw_shared_ingress_acl) && MLX5_CAP_ESW(dev, esw_shared_ingress_acl) &&
mlx5_eswitch_get_npeers(dev->priv.eswitch) == MLX5_CAP_GEN(dev, num_lag_ports) - 1) mlx5_eswitch_get_npeers(dev->priv.eswitch) == MLX5_CAP_GEN(dev, num_lag_ports) - 1)
return true; return true;
......
...@@ -6,11 +6,8 @@ ...@@ -6,11 +6,8 @@
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#define MLX5_DEVCOM_PORTS_SUPPORTED 4 enum mlx5_devcom_component {
enum mlx5_devcom_components {
MLX5_DEVCOM_ESW_OFFLOADS, MLX5_DEVCOM_ESW_OFFLOADS,
MLX5_DEVCOM_NUM_COMPONENTS, MLX5_DEVCOM_NUM_COMPONENTS,
}; };
...@@ -18,45 +15,40 @@ typedef int (*mlx5_devcom_event_handler_t)(int event, ...@@ -18,45 +15,40 @@ typedef int (*mlx5_devcom_event_handler_t)(int event,
void *my_data, void *my_data,
void *event_data); void *event_data);
struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev); struct mlx5_devcom_dev *mlx5_devcom_register_device(struct mlx5_core_dev *dev);
void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom); void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc);
void mlx5_devcom_register_component(struct mlx5_devcom *devcom, struct mlx5_devcom_comp_dev *
enum mlx5_devcom_components id, mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
mlx5_devcom_event_handler_t handler, enum mlx5_devcom_component id,
void *data); u64 key,
void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom, mlx5_devcom_event_handler_t handler,
enum mlx5_devcom_components id); void *data);
void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom);
int mlx5_devcom_send_event(struct mlx5_devcom *devcom, int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom,
enum mlx5_devcom_components id,
int event, int rollback_event, int event, int rollback_event,
void *event_data); void *event_data);
void mlx5_devcom_comp_set_ready(struct mlx5_devcom *devcom, void mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev *devcom, bool ready);
enum mlx5_devcom_components id, bool mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev *devcom);
bool ready);
bool mlx5_devcom_comp_is_ready(struct mlx5_devcom *devcom, bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom_comp_dev *devcom);
enum mlx5_devcom_components id); void mlx5_devcom_for_each_peer_end(struct mlx5_devcom_comp_dev *devcom);
void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom_comp_dev *devcom,
bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom *devcom, struct mlx5_devcom_comp_dev **pos);
enum mlx5_devcom_components id);
void mlx5_devcom_for_each_peer_end(struct mlx5_devcom *devcom, #define mlx5_devcom_for_each_peer_entry(devcom, data, pos) \
enum mlx5_devcom_components id); for (pos = NULL, data = mlx5_devcom_get_next_peer_data(devcom, &pos); \
void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom *devcom, data; \
enum mlx5_devcom_components id, int *i); data = mlx5_devcom_get_next_peer_data(devcom, &pos))
#define mlx5_devcom_for_each_peer_entry(devcom, id, data, i) \ void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom,
for (i = 0, data = mlx5_devcom_get_next_peer_data(devcom, id, &i); \ struct mlx5_devcom_comp_dev **pos);
data; \
data = mlx5_devcom_get_next_peer_data(devcom, id, &i)) #define mlx5_devcom_for_each_peer_entry_rcu(devcom, data, pos) \
for (pos = NULL, data = mlx5_devcom_get_next_peer_data_rcu(devcom, &pos); \
void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom *devcom, data; \
enum mlx5_devcom_components id, int *i); data = mlx5_devcom_get_next_peer_data_rcu(devcom, &pos))
#define mlx5_devcom_for_each_peer_entry_rcu(devcom, id, data, i) \ #endif /* __LIB_MLX5_DEVCOM_H__ */
for (i = 0, data = mlx5_devcom_get_next_peer_data_rcu(devcom, id, &i); \
data; \
data = mlx5_devcom_get_next_peer_data_rcu(devcom, id, &i))
#endif
...@@ -951,10 +951,10 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) ...@@ -951,10 +951,10 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
{ {
int err; int err;
dev->priv.devcom = mlx5_devcom_register_device(dev); dev->priv.devc = mlx5_devcom_register_device(dev);
if (IS_ERR(dev->priv.devcom)) if (IS_ERR(dev->priv.devc))
mlx5_core_err(dev, "failed to register with devcom (0x%p)\n", mlx5_core_warn(dev, "failed to register devcom device %ld\n",
dev->priv.devcom); PTR_ERR(dev->priv.devc));
err = mlx5_query_board_id(dev); err = mlx5_query_board_id(dev);
if (err) { if (err) {
...@@ -1089,7 +1089,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) ...@@ -1089,7 +1089,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
err_irq_cleanup: err_irq_cleanup:
mlx5_irq_table_cleanup(dev); mlx5_irq_table_cleanup(dev);
err_devcom: err_devcom:
mlx5_devcom_unregister_device(dev->priv.devcom); mlx5_devcom_unregister_device(dev->priv.devc);
return err; return err;
} }
...@@ -1118,7 +1118,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) ...@@ -1118,7 +1118,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_events_cleanup(dev); mlx5_events_cleanup(dev);
mlx5_eq_table_cleanup(dev); mlx5_eq_table_cleanup(dev);
mlx5_irq_table_cleanup(dev); mlx5_irq_table_cleanup(dev);
mlx5_devcom_unregister_device(dev->priv.devcom); mlx5_devcom_unregister_device(dev->priv.devc);
} }
static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeout) static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeout)
......
...@@ -501,7 +501,7 @@ struct mlx5_events; ...@@ -501,7 +501,7 @@ struct mlx5_events;
struct mlx5_mpfs; struct mlx5_mpfs;
struct mlx5_eswitch; struct mlx5_eswitch;
struct mlx5_lag; struct mlx5_lag;
struct mlx5_devcom; struct mlx5_devcom_dev;
struct mlx5_fw_reset; struct mlx5_fw_reset;
struct mlx5_eq_table; struct mlx5_eq_table;
struct mlx5_irq_table; struct mlx5_irq_table;
...@@ -618,7 +618,7 @@ struct mlx5_priv { ...@@ -618,7 +618,7 @@ struct mlx5_priv {
struct mlx5_core_sriov sriov; struct mlx5_core_sriov sriov;
struct mlx5_lag *lag; struct mlx5_lag *lag;
u32 flags; u32 flags;
struct mlx5_devcom *devcom; struct mlx5_devcom_dev *devc;
struct mlx5_fw_reset *fw_reset; struct mlx5_fw_reset *fw_reset;
struct mlx5_core_roce roce; struct mlx5_core_roce roce;
struct mlx5_fc_stats fc_stats; struct mlx5_fc_stats fc_stats;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment