Commit ba46c96d authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2023-05-22' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-fixes-2023-05-22

This series provides bug fixes for the mlx5 driver.
Please pull and let me know if there is any problem.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 04910d8c 1da438c0
...@@ -1920,9 +1920,10 @@ static void mlx5_cmd_err_trace(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod ...@@ -1920,9 +1920,10 @@ static void mlx5_cmd_err_trace(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod
static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
u32 syndrome, int err) u32 syndrome, int err)
{ {
const char *namep = mlx5_command_str(opcode);
struct mlx5_cmd_stats *stats; struct mlx5_cmd_stats *stats;
if (!err) if (!err || !(strcmp(namep, "unknown command opcode")))
return; return;
stats = &dev->cmd.stats[opcode]; stats = &dev->cmd.stats[opcode];
......
...@@ -175,6 +175,8 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget) ...@@ -175,6 +175,8 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
/* ensure cq space is freed before enabling more cqes */ /* ensure cq space is freed before enabling more cqes */
wmb(); wmb();
mlx5e_txqsq_wake(&ptpsq->txqsq);
return work_done == budget; return work_done == budget;
} }
......
...@@ -1369,11 +1369,13 @@ static void mlx5e_invalidate_encap(struct mlx5e_priv *priv, ...@@ -1369,11 +1369,13 @@ static void mlx5e_invalidate_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow; struct mlx5e_tc_flow *flow;
list_for_each_entry(flow, encap_flows, tmp_list) { list_for_each_entry(flow, encap_flows, tmp_list) {
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_esw_flow_attr *esw_attr; struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_flow_attr *attr;
if (!mlx5e_is_offloaded_flow(flow)) if (!mlx5e_is_offloaded_flow(flow))
continue; continue;
attr = mlx5e_tc_get_encap_attr(flow);
esw_attr = attr->esw_attr; esw_attr = attr->esw_attr;
if (flow_flag_test(flow, SLOW)) if (flow_flag_test(flow, SLOW))
......
...@@ -193,6 +193,8 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size) ...@@ -193,6 +193,8 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
return pi; return pi;
} }
void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq);
static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{ {
return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1); return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1);
......
...@@ -1665,11 +1665,9 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_ ...@@ -1665,11 +1665,9 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_
int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport) int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
{ {
struct mlx5e_priv *out_priv, *route_priv; struct mlx5e_priv *out_priv, *route_priv;
struct mlx5_devcom *devcom = NULL;
struct mlx5_core_dev *route_mdev; struct mlx5_core_dev *route_mdev;
struct mlx5_eswitch *esw; struct mlx5_eswitch *esw;
u16 vhca_id; u16 vhca_id;
int err;
out_priv = netdev_priv(out_dev); out_priv = netdev_priv(out_dev);
esw = out_priv->mdev->priv.eswitch; esw = out_priv->mdev->priv.eswitch;
...@@ -1678,6 +1676,9 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro ...@@ -1678,6 +1676,9 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id); vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
if (mlx5_lag_is_active(out_priv->mdev)) { if (mlx5_lag_is_active(out_priv->mdev)) {
struct mlx5_devcom *devcom;
int err;
/* In lag case we may get devices from different eswitch instances. /* In lag case we may get devices from different eswitch instances.
* If we failed to get vport num, it means, mostly, that we on the wrong * If we failed to get vport num, it means, mostly, that we on the wrong
* eswitch. * eswitch.
...@@ -1686,16 +1687,16 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro ...@@ -1686,16 +1687,16 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
if (err != -ENOENT) if (err != -ENOENT)
return err; return err;
rcu_read_lock();
devcom = out_priv->mdev->priv.devcom; devcom = out_priv->mdev->priv.devcom;
esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); esw = mlx5_devcom_get_peer_data_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
if (!esw) err = esw ? mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport) : -ENODEV;
return -ENODEV; rcu_read_unlock();
}
err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
if (devcom)
mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
return err; return err;
}
return mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
} }
static int static int
...@@ -5301,6 +5302,8 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv) ...@@ -5301,6 +5302,8 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
goto err_action_counter; goto err_action_counter;
} }
mlx5_esw_offloads_devcom_init(esw);
return 0; return 0;
err_action_counter: err_action_counter:
...@@ -5329,7 +5332,7 @@ void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv) ...@@ -5329,7 +5332,7 @@ void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
priv = netdev_priv(rpriv->netdev); priv = netdev_priv(rpriv->netdev);
esw = priv->mdev->priv.eswitch; esw = priv->mdev->priv.eswitch;
mlx5e_tc_clean_fdb_peer_flows(esw); mlx5_esw_offloads_devcom_cleanup(esw);
mlx5e_tc_tun_cleanup(uplink_priv->encap); mlx5e_tc_tun_cleanup(uplink_priv->encap);
...@@ -5643,22 +5646,43 @@ bool mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb) ...@@ -5643,22 +5646,43 @@ bool mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
0, NULL); 0, NULL);
} }
static struct mapping_ctx *
mlx5e_get_priv_obj_mapping(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc;
struct mlx5_eswitch *esw;
struct mapping_ctx *ctx;
if (is_mdev_switchdev_mode(priv->mdev)) {
esw = priv->mdev->priv.eswitch;
ctx = esw->offloads.reg_c0_obj_pool;
} else {
tc = mlx5e_fs_get_tc(priv->fs);
ctx = tc->mapping;
}
return ctx;
}
int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr, int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
u64 act_miss_cookie, u32 *act_miss_mapping) u64 act_miss_cookie, u32 *act_miss_mapping)
{ {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_mapped_obj mapped_obj = {}; struct mlx5_mapped_obj mapped_obj = {};
struct mlx5_eswitch *esw;
struct mapping_ctx *ctx; struct mapping_ctx *ctx;
int err; int err;
ctx = esw->offloads.reg_c0_obj_pool; ctx = mlx5e_get_priv_obj_mapping(priv);
mapped_obj.type = MLX5_MAPPED_OBJ_ACT_MISS; mapped_obj.type = MLX5_MAPPED_OBJ_ACT_MISS;
mapped_obj.act_miss_cookie = act_miss_cookie; mapped_obj.act_miss_cookie = act_miss_cookie;
err = mapping_add(ctx, &mapped_obj, act_miss_mapping); err = mapping_add(ctx, &mapped_obj, act_miss_mapping);
if (err) if (err)
return err; return err;
if (!is_mdev_switchdev_mode(priv->mdev))
return 0;
esw = priv->mdev->priv.eswitch;
attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping); attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping);
if (IS_ERR(attr->act_id_restore_rule)) if (IS_ERR(attr->act_id_restore_rule))
goto err_rule; goto err_rule;
...@@ -5673,10 +5697,9 @@ int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_a ...@@ -5673,10 +5697,9 @@ int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_a
void mlx5e_tc_action_miss_mapping_put(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr, void mlx5e_tc_action_miss_mapping_put(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
u32 act_miss_mapping) u32 act_miss_mapping)
{ {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mapping_ctx *ctx = mlx5e_get_priv_obj_mapping(priv);
struct mapping_ctx *ctx;
ctx = esw->offloads.reg_c0_obj_pool; if (is_mdev_switchdev_mode(priv->mdev))
mlx5_del_flow_rules(attr->act_id_restore_rule); mlx5_del_flow_rules(attr->act_id_restore_rule);
mapping_remove(ctx, act_miss_mapping); mapping_remove(ctx, act_miss_mapping);
} }
...@@ -762,6 +762,17 @@ static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_t ...@@ -762,6 +762,17 @@ static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_t
} }
} }
void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq)
{
if (netif_tx_queue_stopped(sq->txq) &&
mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
mlx5e_ptpsq_fifo_has_room(sq) &&
!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
netif_tx_wake_queue(sq->txq);
sq->stats->wake++;
}
}
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{ {
struct mlx5e_sq_stats *stats; struct mlx5e_sq_stats *stats;
...@@ -861,13 +872,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) ...@@ -861,13 +872,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
netdev_tx_completed_queue(sq->txq, npkts, nbytes); netdev_tx_completed_queue(sq->txq, npkts, nbytes);
if (netif_tx_queue_stopped(sq->txq) && mlx5e_txqsq_wake(sq);
mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
mlx5e_ptpsq_fifo_has_room(sq) &&
!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
netif_tx_wake_queue(sq->txq);
stats->wake++;
}
return (i == MLX5E_TX_CQ_POLL_BUDGET); return (i == MLX5E_TX_CQ_POLL_BUDGET);
} }
......
...@@ -1104,7 +1104,7 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev) ...@@ -1104,7 +1104,7 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
struct mlx5_eq_table *table = dev->priv.eq_table; struct mlx5_eq_table *table = dev->priv.eq_table;
mutex_lock(&table->lock); /* sync with create/destroy_async_eq */ mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
mlx5_irq_table_destroy(dev); mlx5_irq_table_free_irqs(dev);
mutex_unlock(&table->lock); mutex_unlock(&table->lock);
} }
......
...@@ -342,6 +342,7 @@ struct mlx5_eswitch { ...@@ -342,6 +342,7 @@ struct mlx5_eswitch {
u32 large_group_num; u32 large_group_num;
} params; } params;
struct blocking_notifier_head n_head; struct blocking_notifier_head n_head;
bool paired[MLX5_MAX_PORTS];
}; };
void esw_offloads_disable(struct mlx5_eswitch *esw); void esw_offloads_disable(struct mlx5_eswitch *esw);
...@@ -369,6 +370,8 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs); ...@@ -369,6 +370,8 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf); void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf);
void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw); void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
void mlx5_eswitch_disable(struct mlx5_eswitch *esw); void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw);
void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
u16 vport, const u8 *mac); u16 vport, const u8 *mac);
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
...@@ -767,6 +770,8 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} ...@@ -767,6 +770,8 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; } static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {} static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {}
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {} static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) {}
static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {}
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
static inline static inline
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; } int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
......
...@@ -2742,6 +2742,9 @@ static int mlx5_esw_offloads_devcom_event(int event, ...@@ -2742,6 +2742,9 @@ static int mlx5_esw_offloads_devcom_event(int event,
mlx5_eswitch_vport_match_metadata_enabled(peer_esw)) mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
break; break;
if (esw->paired[mlx5_get_dev_index(peer_esw->dev)])
break;
err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true); err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
if (err) if (err)
goto err_out; goto err_out;
...@@ -2753,14 +2756,18 @@ static int mlx5_esw_offloads_devcom_event(int event, ...@@ -2753,14 +2756,18 @@ static int mlx5_esw_offloads_devcom_event(int event,
if (err) if (err)
goto err_pair; goto err_pair;
esw->paired[mlx5_get_dev_index(peer_esw->dev)] = true;
peer_esw->paired[mlx5_get_dev_index(esw->dev)] = true;
mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true); mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
break; break;
case ESW_OFFLOADS_DEVCOM_UNPAIR: case ESW_OFFLOADS_DEVCOM_UNPAIR:
if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) if (!esw->paired[mlx5_get_dev_index(peer_esw->dev)])
break; break;
mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false); mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
esw->paired[mlx5_get_dev_index(peer_esw->dev)] = false;
peer_esw->paired[mlx5_get_dev_index(esw->dev)] = false;
mlx5_esw_offloads_unpair(peer_esw); mlx5_esw_offloads_unpair(peer_esw);
mlx5_esw_offloads_unpair(esw); mlx5_esw_offloads_unpair(esw);
mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false); mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
...@@ -2779,7 +2786,7 @@ static int mlx5_esw_offloads_devcom_event(int event, ...@@ -2779,7 +2786,7 @@ static int mlx5_esw_offloads_devcom_event(int event,
return err; return err;
} }
static void esw_offloads_devcom_init(struct mlx5_eswitch *esw) void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
{ {
struct mlx5_devcom *devcom = esw->dev->priv.devcom; struct mlx5_devcom *devcom = esw->dev->priv.devcom;
...@@ -2802,7 +2809,7 @@ static void esw_offloads_devcom_init(struct mlx5_eswitch *esw) ...@@ -2802,7 +2809,7 @@ static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
ESW_OFFLOADS_DEVCOM_PAIR, esw); ESW_OFFLOADS_DEVCOM_PAIR, esw);
} }
static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
{ {
struct mlx5_devcom *devcom = esw->dev->priv.devcom; struct mlx5_devcom *devcom = esw->dev->priv.devcom;
...@@ -3250,8 +3257,6 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) ...@@ -3250,8 +3257,6 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
if (err) if (err)
goto err_vports; goto err_vports;
esw_offloads_devcom_init(esw);
return 0; return 0;
err_vports: err_vports:
...@@ -3292,7 +3297,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, ...@@ -3292,7 +3297,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
void esw_offloads_disable(struct mlx5_eswitch *esw) void esw_offloads_disable(struct mlx5_eswitch *esw)
{ {
esw_offloads_devcom_cleanup(esw);
mlx5_eswitch_disable_pf_vf_vports(esw); mlx5_eswitch_disable_pf_vf_vports(esw);
esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK); esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
esw_set_passing_vport_metadata(esw, false); esw_set_passing_vport_metadata(esw, false);
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/mlx5/vport.h> #include <linux/mlx5/vport.h>
#include "lib/devcom.h" #include "lib/devcom.h"
#include "mlx5_core.h"
static LIST_HEAD(devcom_list); static LIST_HEAD(devcom_list);
...@@ -13,7 +14,7 @@ static LIST_HEAD(devcom_list); ...@@ -13,7 +14,7 @@ static LIST_HEAD(devcom_list);
struct mlx5_devcom_component { struct mlx5_devcom_component {
struct { struct {
void *data; void __rcu *data;
} device[MLX5_DEVCOM_PORTS_SUPPORTED]; } device[MLX5_DEVCOM_PORTS_SUPPORTED];
mlx5_devcom_event_handler_t handler; mlx5_devcom_event_handler_t handler;
...@@ -77,6 +78,7 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev) ...@@ -77,6 +78,7 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_DEVCOM_PORTS_SUPPORTED) if (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_DEVCOM_PORTS_SUPPORTED)
return NULL; return NULL;
mlx5_dev_list_lock();
sguid0 = mlx5_query_nic_system_image_guid(dev); sguid0 = mlx5_query_nic_system_image_guid(dev);
list_for_each_entry(iter, &devcom_list, list) { list_for_each_entry(iter, &devcom_list, list) {
struct mlx5_core_dev *tmp_dev = NULL; struct mlx5_core_dev *tmp_dev = NULL;
...@@ -102,8 +104,10 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev) ...@@ -102,8 +104,10 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
if (!priv) { if (!priv) {
priv = mlx5_devcom_list_alloc(); priv = mlx5_devcom_list_alloc();
if (!priv) if (!priv) {
return ERR_PTR(-ENOMEM); devcom = ERR_PTR(-ENOMEM);
goto out;
}
idx = 0; idx = 0;
new_priv = true; new_priv = true;
...@@ -112,13 +116,16 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev) ...@@ -112,13 +116,16 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
priv->devs[idx] = dev; priv->devs[idx] = dev;
devcom = mlx5_devcom_alloc(priv, idx); devcom = mlx5_devcom_alloc(priv, idx);
if (!devcom) { if (!devcom) {
if (new_priv)
kfree(priv); kfree(priv);
return ERR_PTR(-ENOMEM); devcom = ERR_PTR(-ENOMEM);
goto out;
} }
if (new_priv) if (new_priv)
list_add(&priv->list, &devcom_list); list_add(&priv->list, &devcom_list);
out:
mlx5_dev_list_unlock();
return devcom; return devcom;
} }
...@@ -131,6 +138,7 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom) ...@@ -131,6 +138,7 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom)
if (IS_ERR_OR_NULL(devcom)) if (IS_ERR_OR_NULL(devcom))
return; return;
mlx5_dev_list_lock();
priv = devcom->priv; priv = devcom->priv;
priv->devs[devcom->idx] = NULL; priv->devs[devcom->idx] = NULL;
...@@ -141,10 +149,12 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom) ...@@ -141,10 +149,12 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom)
break; break;
if (i != MLX5_DEVCOM_PORTS_SUPPORTED) if (i != MLX5_DEVCOM_PORTS_SUPPORTED)
return; goto out;
list_del(&priv->list); list_del(&priv->list);
kfree(priv); kfree(priv);
out:
mlx5_dev_list_unlock();
} }
void mlx5_devcom_register_component(struct mlx5_devcom *devcom, void mlx5_devcom_register_component(struct mlx5_devcom *devcom,
...@@ -162,7 +172,7 @@ void mlx5_devcom_register_component(struct mlx5_devcom *devcom, ...@@ -162,7 +172,7 @@ void mlx5_devcom_register_component(struct mlx5_devcom *devcom,
comp = &devcom->priv->components[id]; comp = &devcom->priv->components[id];
down_write(&comp->sem); down_write(&comp->sem);
comp->handler = handler; comp->handler = handler;
comp->device[devcom->idx].data = data; rcu_assign_pointer(comp->device[devcom->idx].data, data);
up_write(&comp->sem); up_write(&comp->sem);
} }
...@@ -176,8 +186,9 @@ void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom, ...@@ -176,8 +186,9 @@ void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom,
comp = &devcom->priv->components[id]; comp = &devcom->priv->components[id];
down_write(&comp->sem); down_write(&comp->sem);
comp->device[devcom->idx].data = NULL; RCU_INIT_POINTER(comp->device[devcom->idx].data, NULL);
up_write(&comp->sem); up_write(&comp->sem);
synchronize_rcu();
} }
int mlx5_devcom_send_event(struct mlx5_devcom *devcom, int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
...@@ -193,12 +204,15 @@ int mlx5_devcom_send_event(struct mlx5_devcom *devcom, ...@@ -193,12 +204,15 @@ int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
comp = &devcom->priv->components[id]; comp = &devcom->priv->components[id];
down_write(&comp->sem); down_write(&comp->sem);
for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) {
if (i != devcom->idx && comp->device[i].data) { void *data = rcu_dereference_protected(comp->device[i].data,
err = comp->handler(event, comp->device[i].data, lockdep_is_held(&comp->sem));
event_data);
if (i != devcom->idx && data) {
err = comp->handler(event, data, event_data);
break; break;
} }
}
up_write(&comp->sem); up_write(&comp->sem);
return err; return err;
...@@ -213,7 +227,7 @@ void mlx5_devcom_set_paired(struct mlx5_devcom *devcom, ...@@ -213,7 +227,7 @@ void mlx5_devcom_set_paired(struct mlx5_devcom *devcom,
comp = &devcom->priv->components[id]; comp = &devcom->priv->components[id];
WARN_ON(!rwsem_is_locked(&comp->sem)); WARN_ON(!rwsem_is_locked(&comp->sem));
comp->paired = paired; WRITE_ONCE(comp->paired, paired);
} }
bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom, bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
...@@ -222,7 +236,7 @@ bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom, ...@@ -222,7 +236,7 @@ bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
if (IS_ERR_OR_NULL(devcom)) if (IS_ERR_OR_NULL(devcom))
return false; return false;
return devcom->priv->components[id].paired; return READ_ONCE(devcom->priv->components[id].paired);
} }
void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom, void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
...@@ -236,7 +250,7 @@ void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom, ...@@ -236,7 +250,7 @@ void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
comp = &devcom->priv->components[id]; comp = &devcom->priv->components[id];
down_read(&comp->sem); down_read(&comp->sem);
if (!comp->paired) { if (!READ_ONCE(comp->paired)) {
up_read(&comp->sem); up_read(&comp->sem);
return NULL; return NULL;
} }
...@@ -245,7 +259,29 @@ void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom, ...@@ -245,7 +259,29 @@ void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
if (i != devcom->idx) if (i != devcom->idx)
break; break;
return comp->device[i].data; return rcu_dereference_protected(comp->device[i].data, lockdep_is_held(&comp->sem));
}
void *mlx5_devcom_get_peer_data_rcu(struct mlx5_devcom *devcom, enum mlx5_devcom_components id)
{
struct mlx5_devcom_component *comp;
int i;
if (IS_ERR_OR_NULL(devcom))
return NULL;
for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++)
if (i != devcom->idx)
break;
comp = &devcom->priv->components[id];
/* This can change concurrently, however 'data' pointer will remain
* valid for the duration of RCU read section.
*/
if (!READ_ONCE(comp->paired))
return NULL;
return rcu_dereference(comp->device[i].data);
} }
void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom, void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom,
......
...@@ -41,6 +41,7 @@ bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom, ...@@ -41,6 +41,7 @@ bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom, void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id); enum mlx5_devcom_components id);
void *mlx5_devcom_get_peer_data_rcu(struct mlx5_devcom *devcom, enum mlx5_devcom_components id);
void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom, void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id); enum mlx5_devcom_components id);
......
...@@ -1049,7 +1049,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) ...@@ -1049,7 +1049,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
dev->dm = mlx5_dm_create(dev); dev->dm = mlx5_dm_create(dev);
if (IS_ERR(dev->dm)) if (IS_ERR(dev->dm))
mlx5_core_warn(dev, "Failed to init device memory%d\n", err); mlx5_core_warn(dev, "Failed to init device memory %ld\n", PTR_ERR(dev->dm));
dev->tracer = mlx5_fw_tracer_create(dev); dev->tracer = mlx5_fw_tracer_create(dev);
dev->hv_vhca = mlx5_hv_vhca_create(dev); dev->hv_vhca = mlx5_hv_vhca_create(dev);
......
...@@ -15,6 +15,7 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev); ...@@ -15,6 +15,7 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev);
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev); void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
int mlx5_irq_table_create(struct mlx5_core_dev *dev); int mlx5_irq_table_create(struct mlx5_core_dev *dev);
void mlx5_irq_table_destroy(struct mlx5_core_dev *dev); void mlx5_irq_table_destroy(struct mlx5_core_dev *dev);
void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev);
int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table); int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table);
int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table); int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table);
struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev); struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev);
......
...@@ -32,6 +32,7 @@ struct mlx5_irq { ...@@ -32,6 +32,7 @@ struct mlx5_irq {
struct mlx5_irq_pool *pool; struct mlx5_irq_pool *pool;
int refcount; int refcount;
struct msi_map map; struct msi_map map;
u32 pool_index;
}; };
struct mlx5_irq_table { struct mlx5_irq_table {
...@@ -132,7 +133,7 @@ static void irq_release(struct mlx5_irq *irq) ...@@ -132,7 +133,7 @@ static void irq_release(struct mlx5_irq *irq)
struct cpu_rmap *rmap; struct cpu_rmap *rmap;
#endif #endif
xa_erase(&pool->irqs, irq->map.index); xa_erase(&pool->irqs, irq->pool_index);
/* free_irq requires that affinity_hint and rmap will be cleared before /* free_irq requires that affinity_hint and rmap will be cleared before
* calling it. To satisfy this requirement, we call * calling it. To satisfy this requirement, we call
* irq_cpu_rmap_remove() to remove the notifier * irq_cpu_rmap_remove() to remove the notifier
...@@ -276,11 +277,11 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i, ...@@ -276,11 +277,11 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
} }
irq->pool = pool; irq->pool = pool;
irq->refcount = 1; irq->refcount = 1;
irq->map.index = i; irq->pool_index = i;
err = xa_err(xa_store(&pool->irqs, irq->map.index, irq, GFP_KERNEL)); err = xa_err(xa_store(&pool->irqs, irq->pool_index, irq, GFP_KERNEL));
if (err) { if (err) {
mlx5_core_err(dev, "Failed to alloc xa entry for irq(%u). err = %d\n", mlx5_core_err(dev, "Failed to alloc xa entry for irq(%u). err = %d\n",
irq->map.index, err); irq->pool_index, err);
goto err_xa; goto err_xa;
} }
return irq; return irq;
...@@ -567,7 +568,7 @@ int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs, ...@@ -567,7 +568,7 @@ int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs,
struct mlx5_irq *irq; struct mlx5_irq *irq;
int i; int i;
af_desc.is_managed = 1; af_desc.is_managed = false;
for (i = 0; i < nirqs; i++) { for (i = 0; i < nirqs; i++) {
cpumask_set_cpu(cpus[i], &af_desc.mask); cpumask_set_cpu(cpus[i], &af_desc.mask);
irq = mlx5_irq_request(dev, i + 1, &af_desc, rmap); irq = mlx5_irq_request(dev, i + 1, &af_desc, rmap);
...@@ -691,6 +692,24 @@ static void irq_pools_destroy(struct mlx5_irq_table *table) ...@@ -691,6 +692,24 @@ static void irq_pools_destroy(struct mlx5_irq_table *table)
irq_pool_free(table->pcif_pool); irq_pool_free(table->pcif_pool);
} }
static void mlx5_irq_pool_free_irqs(struct mlx5_irq_pool *pool)
{
struct mlx5_irq *irq;
unsigned long index;
xa_for_each(&pool->irqs, index, irq)
free_irq(irq->map.virq, &irq->nh);
}
static void mlx5_irq_pools_free_irqs(struct mlx5_irq_table *table)
{
if (table->sf_ctrl_pool) {
mlx5_irq_pool_free_irqs(table->sf_comp_pool);
mlx5_irq_pool_free_irqs(table->sf_ctrl_pool);
}
mlx5_irq_pool_free_irqs(table->pcif_pool);
}
/* irq_table API */ /* irq_table API */
int mlx5_irq_table_init(struct mlx5_core_dev *dev) int mlx5_irq_table_init(struct mlx5_core_dev *dev)
...@@ -774,6 +793,17 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev) ...@@ -774,6 +793,17 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
pci_free_irq_vectors(dev->pdev); pci_free_irq_vectors(dev->pdev);
} }
void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev)
{
struct mlx5_irq_table *table = dev->priv.irq_table;
if (mlx5_core_is_sf(dev))
return;
mlx5_irq_pools_free_irqs(table);
pci_free_irq_vectors(dev->pdev);
}
int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table) int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table)
{ {
if (table->sf_comp_pool) if (table->sf_comp_pool)
......
...@@ -117,6 +117,8 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, ...@@ -117,6 +117,8 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id); caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id);
caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols); caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols);
caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version); caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version);
caps->roce_caps.fl_rc_qp_when_roce_disabled =
MLX5_CAP_GEN(mdev, fl_rc_qp_when_roce_disabled);
if (MLX5_CAP_GEN(mdev, roce)) { if (MLX5_CAP_GEN(mdev, roce)) {
err = dr_cmd_query_nic_vport_roce_en(mdev, 0, &roce_en); err = dr_cmd_query_nic_vport_roce_en(mdev, 0, &roce_en);
...@@ -124,7 +126,7 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, ...@@ -124,7 +126,7 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
return err; return err;
caps->roce_caps.roce_en = roce_en; caps->roce_caps.roce_en = roce_en;
caps->roce_caps.fl_rc_qp_when_roce_disabled = caps->roce_caps.fl_rc_qp_when_roce_disabled |=
MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_disabled); MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_disabled);
caps->roce_caps.fl_rc_qp_when_roce_enabled = caps->roce_caps.fl_rc_qp_when_roce_enabled =
MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_enabled); MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_enabled);
......
...@@ -15,7 +15,8 @@ static u32 dr_ste_crc32_calc(const void *input_data, size_t length) ...@@ -15,7 +15,8 @@ static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
{ {
u32 crc = crc32(0, input_data, length); u32 crc = crc32(0, input_data, length);
return (__force u32)htonl(crc); return (__force u32)((crc >> 24) & 0xff) | ((crc << 8) & 0xff0000) |
((crc >> 8) & 0xff00) | ((crc << 24) & 0xff000000);
} }
bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps) bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps)
......
...@@ -1705,7 +1705,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -1705,7 +1705,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 rc[0x1]; u8 rc[0x1];
u8 uar_4k[0x1]; u8 uar_4k[0x1];
u8 reserved_at_241[0x9]; u8 reserved_at_241[0x7];
u8 fl_rc_qp_when_roce_disabled[0x1];
u8 regexp_params[0x1];
u8 uar_sz[0x6]; u8 uar_sz[0x6];
u8 port_selection_cap[0x1]; u8 port_selection_cap[0x1];
u8 reserved_at_248[0x1]; u8 reserved_at_248[0x1];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment