Commit 88d162b4 authored by Roi Dayan's avatar Roi Dayan Committed by Saeed Mahameed

net/mlx5: Devcom, Infrastructure changes

Update devcom infrastructure to be more generic, without
depending on max supported ports definition or a device guid,
and also more encapsulated so callers don't need to pass
the register devcom component id per event call.
Signed-off-by: default avatarEli Cohen <elic@nvidia.com>
Signed-off-by: default avatarRoi Dayan <roid@nvidia.com>
Reviewed-by: default avatarShay Drory <shayd@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 02ceda65
...@@ -399,15 +399,13 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw, ...@@ -399,15 +399,13 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
} }
static int mlx5e_sqs2vport_add_peers_rules(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep, static int mlx5e_sqs2vport_add_peers_rules(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep,
struct mlx5_devcom *devcom,
struct mlx5e_rep_sq *rep_sq, int i) struct mlx5e_rep_sq *rep_sq, int i)
{ {
struct mlx5_eswitch *peer_esw = NULL;
struct mlx5_flow_handle *flow_rule; struct mlx5_flow_handle *flow_rule;
int tmp; struct mlx5_devcom_comp_dev *tmp;
struct mlx5_eswitch *peer_esw;
mlx5_devcom_for_each_peer_entry(devcom, MLX5_DEVCOM_ESW_OFFLOADS, mlx5_devcom_for_each_peer_entry(esw->devcom, peer_esw, tmp) {
peer_esw, tmp) {
u16 peer_rule_idx = MLX5_CAP_GEN(peer_esw->dev, vhca_id); u16 peer_rule_idx = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
struct mlx5e_rep_sq_peer *sq_peer; struct mlx5e_rep_sq_peer *sq_peer;
int err; int err;
...@@ -443,7 +441,6 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, ...@@ -443,7 +441,6 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *flow_rule; struct mlx5_flow_handle *flow_rule;
struct mlx5e_rep_priv *rpriv; struct mlx5e_rep_priv *rpriv;
struct mlx5e_rep_sq *rep_sq; struct mlx5e_rep_sq *rep_sq;
struct mlx5_devcom *devcom;
bool devcom_locked = false; bool devcom_locked = false;
int err; int err;
int i; int i;
...@@ -451,10 +448,10 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, ...@@ -451,10 +448,10 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
if (esw->mode != MLX5_ESWITCH_OFFLOADS) if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return 0; return 0;
devcom = esw->dev->priv.devcom;
rpriv = mlx5e_rep_to_rep_priv(rep); rpriv = mlx5e_rep_to_rep_priv(rep);
if (mlx5_devcom_comp_is_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS) &&
mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) if (mlx5_devcom_comp_is_ready(esw->devcom) &&
mlx5_devcom_for_each_peer_begin(esw->devcom))
devcom_locked = true; devcom_locked = true;
for (i = 0; i < sqns_num; i++) { for (i = 0; i < sqns_num; i++) {
...@@ -477,7 +474,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, ...@@ -477,7 +474,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
xa_init(&rep_sq->sq_peer); xa_init(&rep_sq->sq_peer);
if (devcom_locked) { if (devcom_locked) {
err = mlx5e_sqs2vport_add_peers_rules(esw, rep, devcom, rep_sq, i); err = mlx5e_sqs2vport_add_peers_rules(esw, rep, rep_sq, i);
if (err) { if (err) {
mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule); mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
xa_destroy(&rep_sq->sq_peer); xa_destroy(&rep_sq->sq_peer);
...@@ -490,7 +487,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, ...@@ -490,7 +487,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
} }
if (devcom_locked) if (devcom_locked)
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); mlx5_devcom_for_each_peer_end(esw->devcom);
return 0; return 0;
...@@ -498,7 +495,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, ...@@ -498,7 +495,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
mlx5e_sqs2vport_stop(esw, rep); mlx5e_sqs2vport_stop(esw, rep);
if (devcom_locked) if (devcom_locked)
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); mlx5_devcom_for_each_peer_end(esw->devcom);
return err; return err;
} }
......
...@@ -1668,11 +1668,10 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro ...@@ -1668,11 +1668,10 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
{ {
struct mlx5e_priv *out_priv, *route_priv; struct mlx5e_priv *out_priv, *route_priv;
struct mlx5_core_dev *route_mdev; struct mlx5_core_dev *route_mdev;
struct mlx5_devcom *devcom; struct mlx5_devcom_comp_dev *pos;
struct mlx5_eswitch *esw; struct mlx5_eswitch *esw;
u16 vhca_id; u16 vhca_id;
int err; int err;
int i;
out_priv = netdev_priv(out_dev); out_priv = netdev_priv(out_dev);
esw = out_priv->mdev->priv.eswitch; esw = out_priv->mdev->priv.eswitch;
...@@ -1688,10 +1687,8 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro ...@@ -1688,10 +1687,8 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
return err; return err;
rcu_read_lock(); rcu_read_lock();
devcom = out_priv->mdev->priv.devcom;
err = -ENODEV; err = -ENODEV;
mlx5_devcom_for_each_peer_entry_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS, mlx5_devcom_for_each_peer_entry_rcu(esw->devcom, esw, pos) {
esw, i) {
err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport); err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
if (!err) if (!err)
break; break;
...@@ -2031,15 +2028,15 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, ...@@ -2031,15 +2028,15 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow) struct mlx5e_tc_flow *flow)
{ {
if (mlx5e_is_eswitch_flow(flow)) { if (mlx5e_is_eswitch_flow(flow)) {
struct mlx5_devcom *devcom = flow->priv->mdev->priv.devcom; struct mlx5_devcom_comp_dev *devcom = flow->priv->mdev->priv.eswitch->devcom;
if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) { if (!mlx5_devcom_for_each_peer_begin(devcom)) {
mlx5e_tc_del_fdb_flow(priv, flow); mlx5e_tc_del_fdb_flow(priv, flow);
return; return;
} }
mlx5e_tc_del_fdb_peers_flow(flow); mlx5e_tc_del_fdb_peers_flow(flow);
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); mlx5_devcom_for_each_peer_end(devcom);
mlx5e_tc_del_fdb_flow(priv, flow); mlx5e_tc_del_fdb_flow(priv, flow);
} else { } else {
mlx5e_tc_del_nic_flow(priv, flow); mlx5e_tc_del_nic_flow(priv, flow);
...@@ -4216,8 +4213,7 @@ static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow) ...@@ -4216,8 +4213,7 @@ static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
flow_flag_test(flow, INGRESS); flow_flag_test(flow, INGRESS);
bool act_is_encap = !!(attr->action & bool act_is_encap = !!(attr->action &
MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT); MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
bool esw_paired = mlx5_devcom_comp_is_ready(esw_attr->in_mdev->priv.devcom, bool esw_paired = mlx5_devcom_comp_is_ready(esw_attr->in_mdev->priv.eswitch->devcom);
MLX5_DEVCOM_ESW_OFFLOADS);
if (!esw_paired) if (!esw_paired)
return false; return false;
...@@ -4471,14 +4467,13 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -4471,14 +4467,13 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct net_device *filter_dev, struct net_device *filter_dev,
struct mlx5e_tc_flow **__flow) struct mlx5e_tc_flow **__flow)
{ {
struct mlx5_devcom *devcom = priv->mdev->priv.devcom; struct mlx5_devcom_comp_dev *devcom = priv->mdev->priv.eswitch->devcom, *pos;
struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *in_rep = rpriv->rep; struct mlx5_eswitch_rep *in_rep = rpriv->rep;
struct mlx5_core_dev *in_mdev = priv->mdev; struct mlx5_core_dev *in_mdev = priv->mdev;
struct mlx5_eswitch *peer_esw; struct mlx5_eswitch *peer_esw;
struct mlx5e_tc_flow *flow; struct mlx5e_tc_flow *flow;
int err; int err;
int i;
flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep, flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
in_mdev); in_mdev);
...@@ -4490,27 +4485,25 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -4490,27 +4485,25 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
return 0; return 0;
} }
if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) { if (!mlx5_devcom_for_each_peer_begin(devcom)) {
err = -ENODEV; err = -ENODEV;
goto clean_flow; goto clean_flow;
} }
mlx5_devcom_for_each_peer_entry(devcom, mlx5_devcom_for_each_peer_entry(devcom, peer_esw, pos) {
MLX5_DEVCOM_ESW_OFFLOADS,
peer_esw, i) {
err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags, peer_esw); err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags, peer_esw);
if (err) if (err)
goto peer_clean; goto peer_clean;
} }
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); mlx5_devcom_for_each_peer_end(devcom);
*__flow = flow; *__flow = flow;
return 0; return 0;
peer_clean: peer_clean:
mlx5e_tc_del_fdb_peers_flow(flow); mlx5e_tc_del_fdb_peers_flow(flow);
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); mlx5_devcom_for_each_peer_end(devcom);
clean_flow: clean_flow:
mlx5e_tc_del_fdb_flow(priv, flow); mlx5e_tc_del_fdb_flow(priv, flow);
return err; return err;
...@@ -4728,7 +4721,7 @@ int mlx5e_tc_fill_action_stats(struct mlx5e_priv *priv, ...@@ -4728,7 +4721,7 @@ int mlx5e_tc_fill_action_stats(struct mlx5e_priv *priv,
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags) struct flow_cls_offload *f, unsigned long flags)
{ {
struct mlx5_devcom *devcom = priv->mdev->priv.devcom; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct rhashtable *tc_ht = get_tc_ht(priv, flags); struct rhashtable *tc_ht = get_tc_ht(priv, flags);
struct mlx5e_tc_flow *flow; struct mlx5e_tc_flow *flow;
struct mlx5_fc *counter; struct mlx5_fc *counter;
...@@ -4764,7 +4757,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, ...@@ -4764,7 +4757,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
/* Under multipath it's possible for one rule to be currently /* Under multipath it's possible for one rule to be currently
* un-offloaded while the other rule is offloaded. * un-offloaded while the other rule is offloaded.
*/ */
if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) if (esw && !mlx5_devcom_for_each_peer_begin(esw->devcom))
goto out; goto out;
if (flow_flag_test(flow, DUP)) { if (flow_flag_test(flow, DUP)) {
...@@ -4795,7 +4788,8 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, ...@@ -4795,7 +4788,8 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
} }
no_peer_counter: no_peer_counter:
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); if (esw)
mlx5_devcom_for_each_peer_end(esw->devcom);
out: out:
flow_stats_update(&f->stats, bytes, packets, 0, lastuse, flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
FLOW_ACTION_HW_STATS_DELAYED); FLOW_ACTION_HW_STATS_DELAYED);
......
...@@ -652,30 +652,30 @@ mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id, ...@@ -652,30 +652,30 @@ mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
struct mlx5_esw_bridge_vlan *vlan, u32 counter_id, struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
struct mlx5_esw_bridge *bridge) struct mlx5_esw_bridge *bridge)
{ {
struct mlx5_devcom *devcom = bridge->br_offloads->esw->dev->priv.devcom; struct mlx5_devcom_comp_dev *devcom = bridge->br_offloads->esw->devcom, *pos;
struct mlx5_eswitch *tmp, *peer_esw = NULL; struct mlx5_eswitch *tmp, *peer_esw = NULL;
static struct mlx5_flow_handle *handle; static struct mlx5_flow_handle *handle;
int i;
if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) if (!mlx5_devcom_for_each_peer_begin(devcom))
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
mlx5_devcom_for_each_peer_entry(devcom, mlx5_devcom_for_each_peer_entry(devcom, tmp, pos) {
MLX5_DEVCOM_ESW_OFFLOADS,
tmp, i) {
if (mlx5_esw_is_owner(tmp, vport_num, esw_owner_vhca_id)) { if (mlx5_esw_is_owner(tmp, vport_num, esw_owner_vhca_id)) {
peer_esw = tmp; peer_esw = tmp;
break; break;
} }
} }
if (!peer_esw) { if (!peer_esw) {
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); handle = ERR_PTR(-ENODEV);
return ERR_PTR(-ENODEV); goto out;
} }
handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id, handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
bridge, peer_esw); bridge, peer_esw);
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
out:
mlx5_devcom_for_each_peer_end(devcom);
return handle; return handle;
} }
...@@ -1391,8 +1391,8 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow ...@@ -1391,8 +1391,8 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow
mlx5_fc_id(counter), bridge); mlx5_fc_id(counter), bridge);
if (IS_ERR(handle)) { if (IS_ERR(handle)) {
err = PTR_ERR(handle); err = PTR_ERR(handle);
esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d)\n", esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d,peer=%d)\n",
vport_num, err); vport_num, err, peer);
goto err_ingress_flow_create; goto err_ingress_flow_create;
} }
entry->ingress_handle = handle; entry->ingress_handle = handle;
......
...@@ -539,30 +539,29 @@ mlx5_esw_bridge_mcast_filter_flow_create(struct mlx5_esw_bridge_port *port) ...@@ -539,30 +539,29 @@ mlx5_esw_bridge_mcast_filter_flow_create(struct mlx5_esw_bridge_port *port)
static struct mlx5_flow_handle * static struct mlx5_flow_handle *
mlx5_esw_bridge_mcast_filter_flow_peer_create(struct mlx5_esw_bridge_port *port) mlx5_esw_bridge_mcast_filter_flow_peer_create(struct mlx5_esw_bridge_port *port)
{ {
struct mlx5_devcom *devcom = port->bridge->br_offloads->esw->dev->priv.devcom; struct mlx5_devcom_comp_dev *devcom = port->bridge->br_offloads->esw->devcom, *pos;
struct mlx5_eswitch *tmp, *peer_esw = NULL; struct mlx5_eswitch *tmp, *peer_esw = NULL;
static struct mlx5_flow_handle *handle; static struct mlx5_flow_handle *handle;
int i;
if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) if (!mlx5_devcom_for_each_peer_begin(devcom))
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
mlx5_devcom_for_each_peer_entry(devcom, mlx5_devcom_for_each_peer_entry(devcom, tmp, pos) {
MLX5_DEVCOM_ESW_OFFLOADS,
tmp, i) {
if (mlx5_esw_is_owner(tmp, port->vport_num, port->esw_owner_vhca_id)) { if (mlx5_esw_is_owner(tmp, port->vport_num, port->esw_owner_vhca_id)) {
peer_esw = tmp; peer_esw = tmp;
break; break;
} }
} }
if (!peer_esw) { if (!peer_esw) {
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); handle = ERR_PTR(-ENODEV);
return ERR_PTR(-ENODEV); goto out;
} }
handle = mlx5_esw_bridge_mcast_flow_with_esw_create(port, peer_esw); handle = mlx5_esw_bridge_mcast_flow_with_esw_create(port, peer_esw);
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); out:
mlx5_devcom_for_each_peer_end(devcom);
return handle; return handle;
} }
......
...@@ -354,6 +354,7 @@ struct mlx5_eswitch { ...@@ -354,6 +354,7 @@ struct mlx5_eswitch {
} params; } params;
struct blocking_notifier_head n_head; struct blocking_notifier_head n_head;
struct xarray paired; struct xarray paired;
struct mlx5_devcom_comp_dev *devcom;
}; };
void esw_offloads_disable(struct mlx5_eswitch *esw); void esw_offloads_disable(struct mlx5_eswitch *esw);
...@@ -383,6 +384,7 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw); ...@@ -383,6 +384,7 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
void mlx5_eswitch_disable(struct mlx5_eswitch *esw); void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw); void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw);
void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw); void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw);
bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
u16 vport, const u8 *mac); u16 vport, const u8 *mac);
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
...@@ -818,6 +820,7 @@ static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool cle ...@@ -818,6 +820,7 @@ static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool cle
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {} static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) {} static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) {}
static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {} static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {}
static inline bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw) { return false; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
static inline static inline
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; } int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
......
...@@ -2811,7 +2811,6 @@ static int mlx5_esw_offloads_devcom_event(int event, ...@@ -2811,7 +2811,6 @@ static int mlx5_esw_offloads_devcom_event(int event,
void *event_data) void *event_data)
{ {
struct mlx5_eswitch *esw = my_data; struct mlx5_eswitch *esw = my_data;
struct mlx5_devcom *devcom = esw->dev->priv.devcom;
struct mlx5_eswitch *peer_esw = event_data; struct mlx5_eswitch *peer_esw = event_data;
u16 esw_i, peer_esw_i; u16 esw_i, peer_esw_i;
bool esw_paired; bool esw_paired;
...@@ -2833,6 +2832,7 @@ static int mlx5_esw_offloads_devcom_event(int event, ...@@ -2833,6 +2832,7 @@ static int mlx5_esw_offloads_devcom_event(int event,
err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true); err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
if (err) if (err)
goto err_out; goto err_out;
err = mlx5_esw_offloads_pair(esw, peer_esw); err = mlx5_esw_offloads_pair(esw, peer_esw);
if (err) if (err)
goto err_peer; goto err_peer;
...@@ -2851,7 +2851,7 @@ static int mlx5_esw_offloads_devcom_event(int event, ...@@ -2851,7 +2851,7 @@ static int mlx5_esw_offloads_devcom_event(int event,
esw->num_peers++; esw->num_peers++;
peer_esw->num_peers++; peer_esw->num_peers++;
mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true); mlx5_devcom_comp_set_ready(esw->devcom, true);
break; break;
case ESW_OFFLOADS_DEVCOM_UNPAIR: case ESW_OFFLOADS_DEVCOM_UNPAIR:
...@@ -2861,7 +2861,7 @@ static int mlx5_esw_offloads_devcom_event(int event, ...@@ -2861,7 +2861,7 @@ static int mlx5_esw_offloads_devcom_event(int event,
peer_esw->num_peers--; peer_esw->num_peers--;
esw->num_peers--; esw->num_peers--;
if (!esw->num_peers && !peer_esw->num_peers) if (!esw->num_peers && !peer_esw->num_peers)
mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false); mlx5_devcom_comp_set_ready(esw->devcom, false);
xa_erase(&peer_esw->paired, esw_i); xa_erase(&peer_esw->paired, esw_i);
xa_erase(&esw->paired, peer_esw_i); xa_erase(&esw->paired, peer_esw_i);
mlx5_esw_offloads_unpair(peer_esw, esw); mlx5_esw_offloads_unpair(peer_esw, esw);
...@@ -2888,7 +2888,7 @@ static int mlx5_esw_offloads_devcom_event(int event, ...@@ -2888,7 +2888,7 @@ static int mlx5_esw_offloads_devcom_event(int event,
void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
{ {
struct mlx5_devcom *devcom = esw->dev->priv.devcom; u64 guid;
int i; int i;
for (i = 0; i < MLX5_MAX_PORTS; i++) for (i = 0; i < MLX5_MAX_PORTS; i++)
...@@ -2902,34 +2902,41 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) ...@@ -2902,34 +2902,41 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
return; return;
xa_init(&esw->paired); xa_init(&esw->paired);
mlx5_devcom_register_component(devcom, guid = mlx5_query_nic_system_image_guid(esw->dev);
MLX5_DEVCOM_ESW_OFFLOADS,
mlx5_esw_offloads_devcom_event,
esw);
esw->num_peers = 0; esw->num_peers = 0;
mlx5_devcom_send_event(devcom, esw->devcom = mlx5_devcom_register_component(esw->dev->priv.devc,
MLX5_DEVCOM_ESW_OFFLOADS, MLX5_DEVCOM_ESW_OFFLOADS,
guid,
mlx5_esw_offloads_devcom_event,
esw);
if (IS_ERR_OR_NULL(esw->devcom))
return;
mlx5_devcom_send_event(esw->devcom,
ESW_OFFLOADS_DEVCOM_PAIR, ESW_OFFLOADS_DEVCOM_PAIR,
ESW_OFFLOADS_DEVCOM_UNPAIR, esw); ESW_OFFLOADS_DEVCOM_UNPAIR,
esw);
} }
void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
{ {
struct mlx5_devcom *devcom = esw->dev->priv.devcom; if (IS_ERR_OR_NULL(esw->devcom))
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
return; return;
if (!mlx5_lag_is_supported(esw->dev)) mlx5_devcom_send_event(esw->devcom,
return; ESW_OFFLOADS_DEVCOM_UNPAIR,
mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
ESW_OFFLOADS_DEVCOM_UNPAIR, ESW_OFFLOADS_DEVCOM_UNPAIR,
ESW_OFFLOADS_DEVCOM_UNPAIR, esw); esw);
mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS); mlx5_devcom_unregister_component(esw->devcom);
xa_destroy(&esw->paired); xa_destroy(&esw->paired);
esw->devcom = NULL;
}
bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw)
{
return mlx5_devcom_comp_is_ready(esw->devcom);
} }
bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw) bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
......
...@@ -835,7 +835,7 @@ static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev) ...@@ -835,7 +835,7 @@ static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
dev = ldev->pf[MLX5_LAG_P1].dev; dev = ldev->pf[MLX5_LAG_P1].dev;
if (is_mdev_switchdev_mode(dev) && if (is_mdev_switchdev_mode(dev) &&
mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) && mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
mlx5_devcom_comp_is_ready(dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS) && mlx5_esw_offloads_devcom_is_ready(dev->priv.eswitch) &&
MLX5_CAP_ESW(dev, esw_shared_ingress_acl) && MLX5_CAP_ESW(dev, esw_shared_ingress_acl) &&
mlx5_eswitch_get_npeers(dev->priv.eswitch) == MLX5_CAP_GEN(dev, num_lag_ports) - 1) mlx5_eswitch_get_npeers(dev->priv.eswitch) == MLX5_CAP_GEN(dev, num_lag_ports) - 1)
return true; return true;
......
...@@ -2,214 +2,273 @@ ...@@ -2,214 +2,273 @@
/* Copyright (c) 2018 Mellanox Technologies */ /* Copyright (c) 2018 Mellanox Technologies */
#include <linux/mlx5/vport.h> #include <linux/mlx5/vport.h>
#include <linux/list.h>
#include "lib/devcom.h" #include "lib/devcom.h"
#include "mlx5_core.h" #include "mlx5_core.h"
static LIST_HEAD(devcom_list); static LIST_HEAD(devcom_dev_list);
static LIST_HEAD(devcom_comp_list);
/* protect device list */
static DEFINE_MUTEX(dev_list_lock);
/* protect component list */
static DEFINE_MUTEX(comp_list_lock);
#define devcom_for_each_component(priv, comp, iter) \ #define devcom_for_each_component(iter) \
for (iter = 0; \ list_for_each_entry(iter, &devcom_comp_list, comp_list)
comp = &(priv)->components[iter], iter < MLX5_DEVCOM_NUM_COMPONENTS; \
iter++)
struct mlx5_devcom_component { struct mlx5_devcom_dev {
struct { struct list_head list;
void __rcu *data; struct mlx5_core_dev *dev;
} device[MLX5_DEVCOM_PORTS_SUPPORTED]; struct kref ref;
};
struct mlx5_devcom_comp {
struct list_head comp_list;
enum mlx5_devcom_component id;
u64 key;
struct list_head comp_dev_list_head;
mlx5_devcom_event_handler_t handler; mlx5_devcom_event_handler_t handler;
struct rw_semaphore sem; struct kref ref;
bool ready; bool ready;
struct rw_semaphore sem;
}; };
struct mlx5_devcom_list { struct mlx5_devcom_comp_dev {
struct list_head list; struct list_head list;
struct mlx5_devcom_comp *comp;
struct mlx5_devcom_component components[MLX5_DEVCOM_NUM_COMPONENTS]; struct mlx5_devcom_dev *devc;
struct mlx5_core_dev *devs[MLX5_DEVCOM_PORTS_SUPPORTED]; void __rcu *data;
}; };
struct mlx5_devcom { static bool devcom_dev_exists(struct mlx5_core_dev *dev)
struct mlx5_devcom_list *priv;
int idx;
};
static struct mlx5_devcom_list *mlx5_devcom_list_alloc(void)
{ {
struct mlx5_devcom_component *comp; struct mlx5_devcom_dev *iter;
struct mlx5_devcom_list *priv;
int i;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return NULL;
devcom_for_each_component(priv, comp, i) list_for_each_entry(iter, &devcom_dev_list, list)
init_rwsem(&comp->sem); if (iter->dev == dev)
return true;
return priv; return false;
} }
static struct mlx5_devcom *mlx5_devcom_alloc(struct mlx5_devcom_list *priv, static struct mlx5_devcom_dev *
u8 idx) mlx5_devcom_dev_alloc(struct mlx5_core_dev *dev)
{ {
struct mlx5_devcom *devcom; struct mlx5_devcom_dev *devc;
devcom = kzalloc(sizeof(*devcom), GFP_KERNEL); devc = kzalloc(sizeof(*devc), GFP_KERNEL);
if (!devcom) if (!devc)
return NULL; return NULL;
devcom->priv = priv; devc->dev = dev;
devcom->idx = idx; kref_init(&devc->ref);
return devcom; return devc;
} }
/* Must be called with intf_mutex held */ struct mlx5_devcom_dev *
struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev) mlx5_devcom_register_device(struct mlx5_core_dev *dev)
{ {
struct mlx5_devcom_list *priv = NULL, *iter; struct mlx5_devcom_dev *devc;
struct mlx5_devcom *devcom = NULL;
bool new_priv = false;
u64 sguid0, sguid1;
int idx, i;
if (!mlx5_core_is_pf(dev))
return NULL;
if (MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_DEVCOM_PORTS_SUPPORTED)
return NULL;
mlx5_dev_list_lock();
sguid0 = mlx5_query_nic_system_image_guid(dev);
list_for_each_entry(iter, &devcom_list, list) {
/* There is at least one device in iter */
struct mlx5_core_dev *tmp_dev;
idx = -1;
for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) {
if (iter->devs[i])
tmp_dev = iter->devs[i];
else
idx = i;
}
if (idx == -1)
continue;
sguid1 = mlx5_query_nic_system_image_guid(tmp_dev);
if (sguid0 != sguid1)
continue;
priv = iter;
break;
}
if (!priv) { mutex_lock(&dev_list_lock);
priv = mlx5_devcom_list_alloc();
if (!priv) {
devcom = ERR_PTR(-ENOMEM);
goto out;
}
idx = 0; if (devcom_dev_exists(dev)) {
new_priv = true; devc = ERR_PTR(-EEXIST);
goto out;
} }
priv->devs[idx] = dev; devc = mlx5_devcom_dev_alloc(dev);
devcom = mlx5_devcom_alloc(priv, idx); if (!devc) {
if (!devcom) { devc = ERR_PTR(-ENOMEM);
if (new_priv)
kfree(priv);
devcom = ERR_PTR(-ENOMEM);
goto out; goto out;
} }
if (new_priv) list_add_tail(&devc->list, &devcom_dev_list);
list_add(&priv->list, &devcom_list);
out: out:
mlx5_dev_list_unlock(); mutex_unlock(&dev_list_lock);
return devcom; return devc;
} }
/* Must be called with intf_mutex held */ static void
void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom) mlx5_devcom_dev_release(struct kref *ref)
{ {
struct mlx5_devcom_list *priv; struct mlx5_devcom_dev *devc = container_of(ref, struct mlx5_devcom_dev, ref);
int i;
if (IS_ERR_OR_NULL(devcom)) mutex_lock(&dev_list_lock);
return; list_del(&devc->list);
mutex_unlock(&dev_list_lock);
kfree(devc);
}
mlx5_dev_list_lock(); void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc)
priv = devcom->priv; {
priv->devs[devcom->idx] = NULL; if (!IS_ERR_OR_NULL(devc))
kref_put(&devc->ref, mlx5_devcom_dev_release);
}
kfree(devcom); static struct mlx5_devcom_comp *
mlx5_devcom_comp_alloc(u64 id, u64 key, mlx5_devcom_event_handler_t handler)
{
struct mlx5_devcom_comp *comp;
for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) comp = kzalloc(sizeof(*comp), GFP_KERNEL);
if (priv->devs[i]) if (!comp)
break; return ERR_PTR(-ENOMEM);
if (i != MLX5_DEVCOM_PORTS_SUPPORTED) comp->id = id;
goto out; comp->key = key;
comp->handler = handler;
init_rwsem(&comp->sem);
kref_init(&comp->ref);
INIT_LIST_HEAD(&comp->comp_dev_list_head);
list_del(&priv->list); return comp;
kfree(priv);
out:
mlx5_dev_list_unlock();
} }
void mlx5_devcom_register_component(struct mlx5_devcom *devcom, static void
enum mlx5_devcom_components id, mlx5_devcom_comp_release(struct kref *ref)
mlx5_devcom_event_handler_t handler,
void *data)
{ {
struct mlx5_devcom_component *comp; struct mlx5_devcom_comp *comp = container_of(ref, struct mlx5_devcom_comp, ref);
if (IS_ERR_OR_NULL(devcom)) mutex_lock(&comp_list_lock);
return; list_del(&comp->comp_list);
mutex_unlock(&comp_list_lock);
kfree(comp);
}
static struct mlx5_devcom_comp_dev *
devcom_alloc_comp_dev(struct mlx5_devcom_dev *devc,
struct mlx5_devcom_comp *comp,
void *data)
{
struct mlx5_devcom_comp_dev *devcom;
WARN_ON(!data); devcom = kzalloc(sizeof(*devcom), GFP_KERNEL);
if (!devcom)
return ERR_PTR(-ENOMEM);
kref_get(&devc->ref);
devcom->devc = devc;
devcom->comp = comp;
rcu_assign_pointer(devcom->data, data);
comp = &devcom->priv->components[id];
down_write(&comp->sem); down_write(&comp->sem);
comp->handler = handler; list_add_tail(&devcom->list, &comp->comp_dev_list_head);
rcu_assign_pointer(comp->device[devcom->idx].data, data);
up_write(&comp->sem); up_write(&comp->sem);
return devcom;
} }
void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom, static void
enum mlx5_devcom_components id) devcom_free_comp_dev(struct mlx5_devcom_comp_dev *devcom)
{ {
struct mlx5_devcom_component *comp; struct mlx5_devcom_comp *comp = devcom->comp;
if (IS_ERR_OR_NULL(devcom))
return;
comp = &devcom->priv->components[id];
down_write(&comp->sem); down_write(&comp->sem);
RCU_INIT_POINTER(comp->device[devcom->idx].data, NULL); list_del(&devcom->list);
up_write(&comp->sem); up_write(&comp->sem);
synchronize_rcu();
kref_put(&devcom->devc->ref, mlx5_devcom_dev_release);
kfree(devcom);
kref_put(&comp->ref, mlx5_devcom_comp_release);
} }
int mlx5_devcom_send_event(struct mlx5_devcom *devcom, static bool
enum mlx5_devcom_components id, devcom_component_equal(struct mlx5_devcom_comp *devcom,
enum mlx5_devcom_component id,
u64 key)
{
return devcom->id == id && devcom->key == key;
}
static struct mlx5_devcom_comp *
devcom_component_get(struct mlx5_devcom_dev *devc,
enum mlx5_devcom_component id,
u64 key,
mlx5_devcom_event_handler_t handler)
{
struct mlx5_devcom_comp *comp;
devcom_for_each_component(comp) {
if (devcom_component_equal(comp, id, key)) {
if (handler == comp->handler) {
kref_get(&comp->ref);
return comp;
}
mlx5_core_err(devc->dev,
"Cannot register existing devcom component with different handler\n");
return ERR_PTR(-EINVAL);
}
}
return NULL;
}
struct mlx5_devcom_comp_dev *
mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
enum mlx5_devcom_component id,
u64 key,
mlx5_devcom_event_handler_t handler,
void *data)
{
struct mlx5_devcom_comp_dev *devcom;
struct mlx5_devcom_comp *comp;
if (IS_ERR_OR_NULL(devc))
return NULL;
mutex_lock(&comp_list_lock);
comp = devcom_component_get(devc, id, key, handler);
if (IS_ERR(comp)) {
devcom = ERR_PTR(-EINVAL);
goto out_unlock;
}
if (!comp) {
comp = mlx5_devcom_comp_alloc(id, key, handler);
if (IS_ERR(comp)) {
devcom = ERR_CAST(comp);
goto out_unlock;
}
list_add_tail(&comp->comp_list, &devcom_comp_list);
}
mutex_unlock(&comp_list_lock);
devcom = devcom_alloc_comp_dev(devc, comp, data);
if (IS_ERR(devcom))
kref_put(&comp->ref, mlx5_devcom_comp_release);
return devcom;
out_unlock:
mutex_unlock(&comp_list_lock);
return devcom;
}
void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom)
{
if (!IS_ERR_OR_NULL(devcom))
devcom_free_comp_dev(devcom);
}
int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom,
int event, int rollback_event, int event, int rollback_event,
void *event_data) void *event_data)
{ {
struct mlx5_devcom_component *comp; struct mlx5_devcom_comp *comp = devcom->comp;
int err = -ENODEV, i; struct mlx5_devcom_comp_dev *pos;
int err = 0;
void *data;
if (IS_ERR_OR_NULL(devcom)) if (IS_ERR_OR_NULL(devcom))
return err; return -ENODEV;
comp = &devcom->priv->components[id];
down_write(&comp->sem); down_write(&comp->sem);
for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) { list_for_each_entry(pos, &comp->comp_dev_list_head, list) {
void *data = rcu_dereference_protected(comp->device[i].data, data = rcu_dereference_protected(pos->data, lockdep_is_held(&comp->sem));
lockdep_is_held(&comp->sem));
if (i != devcom->idx && data) { if (pos != devcom && data) {
err = comp->handler(event, data, event_data); err = comp->handler(event, data, event_data);
if (err) if (err)
goto rollback; goto rollback;
...@@ -220,48 +279,43 @@ int mlx5_devcom_send_event(struct mlx5_devcom *devcom, ...@@ -220,48 +279,43 @@ int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
return 0; return 0;
rollback: rollback:
while (i--) { if (list_entry_is_head(pos, &comp->comp_dev_list_head, list))
void *data = rcu_dereference_protected(comp->device[i].data, goto out;
lockdep_is_held(&comp->sem)); pos = list_prev_entry(pos, list);
list_for_each_entry_from_reverse(pos, &comp->comp_dev_list_head, list) {
data = rcu_dereference_protected(pos->data, lockdep_is_held(&comp->sem));
if (i != devcom->idx && data) if (pos != devcom && data)
comp->handler(rollback_event, data, event_data); comp->handler(rollback_event, data, event_data);
} }
out:
up_write(&comp->sem); up_write(&comp->sem);
return err; return err;
} }
void mlx5_devcom_comp_set_ready(struct mlx5_devcom *devcom, void mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev *devcom, bool ready)
enum mlx5_devcom_components id,
bool ready)
{ {
struct mlx5_devcom_component *comp; WARN_ON(!rwsem_is_locked(&devcom->comp->sem));
comp = &devcom->priv->components[id];
WARN_ON(!rwsem_is_locked(&comp->sem));
WRITE_ONCE(comp->ready, ready); WRITE_ONCE(devcom->comp->ready, ready);
} }
bool mlx5_devcom_comp_is_ready(struct mlx5_devcom *devcom, bool mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev *devcom)
enum mlx5_devcom_components id)
{ {
if (IS_ERR_OR_NULL(devcom)) if (IS_ERR_OR_NULL(devcom))
return false; return false;
return READ_ONCE(devcom->priv->components[id].ready); return READ_ONCE(devcom->comp->ready);
} }
bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom *devcom, bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom_comp_dev *devcom)
enum mlx5_devcom_components id)
{ {
struct mlx5_devcom_component *comp; struct mlx5_devcom_comp *comp;
if (IS_ERR_OR_NULL(devcom)) if (IS_ERR_OR_NULL(devcom))
return false; return false;
comp = &devcom->priv->components[id]; comp = devcom->comp;
down_read(&comp->sem); down_read(&comp->sem);
if (!READ_ONCE(comp->ready)) { if (!READ_ONCE(comp->ready)) {
up_read(&comp->sem); up_read(&comp->sem);
...@@ -271,74 +325,60 @@ bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom *devcom, ...@@ -271,74 +325,60 @@ bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom *devcom,
return true; return true;
} }
void mlx5_devcom_for_each_peer_end(struct mlx5_devcom *devcom, void mlx5_devcom_for_each_peer_end(struct mlx5_devcom_comp_dev *devcom)
enum mlx5_devcom_components id)
{ {
struct mlx5_devcom_component *comp = &devcom->priv->components[id]; up_read(&devcom->comp->sem);
up_read(&comp->sem);
} }
void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom *devcom, void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom_comp_dev *devcom,
enum mlx5_devcom_components id, struct mlx5_devcom_comp_dev **pos)
int *i)
{ {
struct mlx5_devcom_component *comp; struct mlx5_devcom_comp *comp = devcom->comp;
void *ret; struct mlx5_devcom_comp_dev *tmp;
int idx; void *data;
comp = &devcom->priv->components[id]; tmp = list_prepare_entry(*pos, &comp->comp_dev_list_head, list);
if (*i == MLX5_DEVCOM_PORTS_SUPPORTED) list_for_each_entry_continue(tmp, &comp->comp_dev_list_head, list) {
return NULL; if (tmp != devcom) {
for (idx = *i; idx < MLX5_DEVCOM_PORTS_SUPPORTED; idx++) { data = rcu_dereference_protected(tmp->data, lockdep_is_held(&comp->sem));
if (idx != devcom->idx) { if (data)
ret = rcu_dereference_protected(comp->device[idx].data,
lockdep_is_held(&comp->sem));
if (ret)
break; break;
} }
} }
if (idx == MLX5_DEVCOM_PORTS_SUPPORTED) { if (list_entry_is_head(tmp, &comp->comp_dev_list_head, list))
*i = idx;
return NULL; return NULL;
}
*i = idx + 1;
return ret; *pos = tmp;
return data;
} }
void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom *devcom, void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom,
enum mlx5_devcom_components id, struct mlx5_devcom_comp_dev **pos)
int *i)
{ {
struct mlx5_devcom_component *comp; struct mlx5_devcom_comp *comp = devcom->comp;
void *ret; struct mlx5_devcom_comp_dev *tmp;
int idx; void *data;
comp = &devcom->priv->components[id]; tmp = list_prepare_entry(*pos, &comp->comp_dev_list_head, list);
if (*i == MLX5_DEVCOM_PORTS_SUPPORTED) list_for_each_entry_continue(tmp, &comp->comp_dev_list_head, list) {
return NULL; if (tmp != devcom) {
for (idx = *i; idx < MLX5_DEVCOM_PORTS_SUPPORTED; idx++) {
if (idx != devcom->idx) {
/* This can change concurrently, however 'data' pointer will remain /* This can change concurrently, however 'data' pointer will remain
* valid for the duration of RCU read section. * valid for the duration of RCU read section.
*/ */
if (!READ_ONCE(comp->ready)) if (!READ_ONCE(comp->ready))
return NULL; return NULL;
ret = rcu_dereference(comp->device[idx].data); data = rcu_dereference(tmp->data);
if (ret) if (data)
break; break;
} }
} }
if (idx == MLX5_DEVCOM_PORTS_SUPPORTED) { if (list_entry_is_head(tmp, &comp->comp_dev_list_head, list))
*i = idx;
return NULL; return NULL;
}
*i = idx + 1;
return ret; *pos = tmp;
return data;
} }
...@@ -6,11 +6,8 @@ ...@@ -6,11 +6,8 @@
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#define MLX5_DEVCOM_PORTS_SUPPORTED 4 enum mlx5_devcom_component {
enum mlx5_devcom_components {
MLX5_DEVCOM_ESW_OFFLOADS, MLX5_DEVCOM_ESW_OFFLOADS,
MLX5_DEVCOM_NUM_COMPONENTS, MLX5_DEVCOM_NUM_COMPONENTS,
}; };
...@@ -18,45 +15,40 @@ typedef int (*mlx5_devcom_event_handler_t)(int event, ...@@ -18,45 +15,40 @@ typedef int (*mlx5_devcom_event_handler_t)(int event,
void *my_data, void *my_data,
void *event_data); void *event_data);
struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev); struct mlx5_devcom_dev *mlx5_devcom_register_device(struct mlx5_core_dev *dev);
void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom); void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc);
void mlx5_devcom_register_component(struct mlx5_devcom *devcom, struct mlx5_devcom_comp_dev *
enum mlx5_devcom_components id, mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
mlx5_devcom_event_handler_t handler, enum mlx5_devcom_component id,
void *data); u64 key,
void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom, mlx5_devcom_event_handler_t handler,
enum mlx5_devcom_components id); void *data);
void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom);
int mlx5_devcom_send_event(struct mlx5_devcom *devcom, int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom,
enum mlx5_devcom_components id,
int event, int rollback_event, int event, int rollback_event,
void *event_data); void *event_data);
void mlx5_devcom_comp_set_ready(struct mlx5_devcom *devcom, void mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev *devcom, bool ready);
enum mlx5_devcom_components id, bool mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev *devcom);
bool ready);
bool mlx5_devcom_comp_is_ready(struct mlx5_devcom *devcom, bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom_comp_dev *devcom);
enum mlx5_devcom_components id); void mlx5_devcom_for_each_peer_end(struct mlx5_devcom_comp_dev *devcom);
void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom_comp_dev *devcom,
bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom *devcom, struct mlx5_devcom_comp_dev **pos);
enum mlx5_devcom_components id);
void mlx5_devcom_for_each_peer_end(struct mlx5_devcom *devcom, #define mlx5_devcom_for_each_peer_entry(devcom, data, pos) \
enum mlx5_devcom_components id); for (pos = NULL, data = mlx5_devcom_get_next_peer_data(devcom, &pos); \
void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom *devcom, data; \
enum mlx5_devcom_components id, int *i); data = mlx5_devcom_get_next_peer_data(devcom, &pos))
#define mlx5_devcom_for_each_peer_entry(devcom, id, data, i) \ void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom,
for (i = 0, data = mlx5_devcom_get_next_peer_data(devcom, id, &i); \ struct mlx5_devcom_comp_dev **pos);
data; \
data = mlx5_devcom_get_next_peer_data(devcom, id, &i)) #define mlx5_devcom_for_each_peer_entry_rcu(devcom, data, pos) \
for (pos = NULL, data = mlx5_devcom_get_next_peer_data_rcu(devcom, &pos); \
void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom *devcom, data; \
enum mlx5_devcom_components id, int *i); data = mlx5_devcom_get_next_peer_data_rcu(devcom, &pos))
#define mlx5_devcom_for_each_peer_entry_rcu(devcom, id, data, i) \ #endif /* __LIB_MLX5_DEVCOM_H__ */
for (i = 0, data = mlx5_devcom_get_next_peer_data_rcu(devcom, id, &i); \
data; \
data = mlx5_devcom_get_next_peer_data_rcu(devcom, id, &i))
#endif
...@@ -951,10 +951,10 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) ...@@ -951,10 +951,10 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
{ {
int err; int err;
dev->priv.devcom = mlx5_devcom_register_device(dev); dev->priv.devc = mlx5_devcom_register_device(dev);
if (IS_ERR(dev->priv.devcom)) if (IS_ERR(dev->priv.devc))
mlx5_core_err(dev, "failed to register with devcom (0x%p)\n", mlx5_core_warn(dev, "failed to register devcom device %ld\n",
dev->priv.devcom); PTR_ERR(dev->priv.devc));
err = mlx5_query_board_id(dev); err = mlx5_query_board_id(dev);
if (err) { if (err) {
...@@ -1089,7 +1089,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) ...@@ -1089,7 +1089,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
err_irq_cleanup: err_irq_cleanup:
mlx5_irq_table_cleanup(dev); mlx5_irq_table_cleanup(dev);
err_devcom: err_devcom:
mlx5_devcom_unregister_device(dev->priv.devcom); mlx5_devcom_unregister_device(dev->priv.devc);
return err; return err;
} }
...@@ -1118,7 +1118,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) ...@@ -1118,7 +1118,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_events_cleanup(dev); mlx5_events_cleanup(dev);
mlx5_eq_table_cleanup(dev); mlx5_eq_table_cleanup(dev);
mlx5_irq_table_cleanup(dev); mlx5_irq_table_cleanup(dev);
mlx5_devcom_unregister_device(dev->priv.devcom); mlx5_devcom_unregister_device(dev->priv.devc);
} }
static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeout) static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeout)
......
...@@ -501,7 +501,7 @@ struct mlx5_events; ...@@ -501,7 +501,7 @@ struct mlx5_events;
struct mlx5_mpfs; struct mlx5_mpfs;
struct mlx5_eswitch; struct mlx5_eswitch;
struct mlx5_lag; struct mlx5_lag;
struct mlx5_devcom; struct mlx5_devcom_dev;
struct mlx5_fw_reset; struct mlx5_fw_reset;
struct mlx5_eq_table; struct mlx5_eq_table;
struct mlx5_irq_table; struct mlx5_irq_table;
...@@ -618,7 +618,7 @@ struct mlx5_priv { ...@@ -618,7 +618,7 @@ struct mlx5_priv {
struct mlx5_core_sriov sriov; struct mlx5_core_sriov sriov;
struct mlx5_lag *lag; struct mlx5_lag *lag;
u32 flags; u32 flags;
struct mlx5_devcom *devcom; struct mlx5_devcom_dev *devc;
struct mlx5_fw_reset *fw_reset; struct mlx5_fw_reset *fw_reset;
struct mlx5_core_roce roce; struct mlx5_core_roce roce;
struct mlx5_fc_stats fc_stats; struct mlx5_fc_stats fc_stats;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment