Commit 9311ccef authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2021-11-16' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-fixes-2021-11-16

Please pull this mlx5 fixes series, or let me know in case of any problem.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3751c3d3 c4c31767
......@@ -339,6 +339,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_PAGE_FAULT_RESUME:
case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
case MLX5_CMD_OP_DEALLOC_SF:
case MLX5_CMD_OP_DESTROY_UCTX:
case MLX5_CMD_OP_DESTROY_UMEM:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
......@@ -464,9 +466,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
case MLX5_CMD_OP_CREATE_UCTX:
case MLX5_CMD_OP_DESTROY_UCTX:
case MLX5_CMD_OP_CREATE_UMEM:
case MLX5_CMD_OP_DESTROY_UMEM:
case MLX5_CMD_OP_ALLOC_MEMIC:
case MLX5_CMD_OP_MODIFY_XRQ:
case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
......
......@@ -164,13 +164,14 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
MLX5_SET(destroy_cq_in, in, uid, cq->uid);
err = mlx5_cmd_exec_in(dev, destroy_cq, in);
if (err)
return err;
synchronize_irq(cq->irqn);
mlx5_cq_put(cq);
wait_for_completion(&cq->free);
return err;
return 0;
}
EXPORT_SYMBOL(mlx5_core_destroy_cq);
......
......@@ -507,6 +507,8 @@ void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
if (!mlx5_debugfs_root)
return;
if (cq->dbg)
if (cq->dbg) {
rem_res_tree(cq->dbg);
cq->dbg = NULL;
}
}
......@@ -1356,9 +1356,13 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
int
mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_acts,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR;
int err;
if (!priv) {
NL_SET_ERR_MSG_MOD(extack,
"offload of ct action isn't available");
......@@ -1369,6 +1373,17 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
attr->ct_attr.ct_action = act->ct.action;
attr->ct_attr.nf_ft = act->ct.flow_table;
if (!clear_action)
goto out;
err = mlx5_tc_ct_entry_set_registers(priv, mod_acts, 0, 0, 0, 0);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to set registers for ct clear");
return err;
}
attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
out:
return 0;
}
......@@ -1898,23 +1913,16 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5_tc_ct_priv *ct_priv,
memcpy(pre_ct_attr, attr, attr_sz);
err = mlx5_tc_ct_entry_set_registers(ct_priv, mod_acts, 0, 0, 0, 0);
if (err) {
ct_dbg("Failed to set register for ct clear");
goto err_set_registers;
}
mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type,
mod_acts->num_actions,
mod_acts->actions);
if (IS_ERR(mod_hdr)) {
err = PTR_ERR(mod_hdr);
ct_dbg("Failed to add create ct clear mod hdr");
goto err_set_registers;
goto err_mod_hdr;
}
pre_ct_attr->modify_hdr = mod_hdr;
pre_ct_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
rule = mlx5_tc_rule_insert(priv, orig_spec, pre_ct_attr);
if (IS_ERR(rule)) {
......@@ -1930,7 +1938,7 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5_tc_ct_priv *ct_priv,
err_insert:
mlx5_modify_header_dealloc(priv->mdev, mod_hdr);
err_set_registers:
err_mod_hdr:
netdev_warn(priv->netdev,
"Failed to offload ct clear flow, err %d\n", err);
kfree(pre_ct_attr);
......
......@@ -110,6 +110,7 @@ int mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec);
int
mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_acts,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack);
......@@ -172,6 +173,7 @@ mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec)
static inline int
mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_acts,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
......
......@@ -102,6 +102,7 @@ struct mlx5e_tc_flow {
refcount_t refcnt;
struct rcu_head rcu_head;
struct completion init_done;
struct completion del_hw_done;
int tunnel_id; /* the mapped tunnel id of this flow */
struct mlx5_flow_attr *attr;
};
......
......@@ -245,8 +245,14 @@ static void mlx5e_take_tmp_flow(struct mlx5e_tc_flow *flow,
struct list_head *flow_list,
int index)
{
if (IS_ERR(mlx5e_flow_get(flow)))
if (IS_ERR(mlx5e_flow_get(flow))) {
/* Flow is being deleted concurrently. Wait for it to be
* unoffloaded from hardware, otherwise deleting encap will
* fail.
*/
wait_for_completion(&flow->del_hw_done);
return;
}
wait_for_completion(&flow->init_done);
flow->tmp_entry_index = index;
......
......@@ -55,6 +55,7 @@ struct mlx5e_ktls_offload_context_rx {
DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS);
/* resync */
spinlock_t lock; /* protects resync fields */
struct mlx5e_ktls_rx_resync_ctx resync;
struct list_head list;
};
......@@ -386,14 +387,18 @@ static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_r
struct mlx5e_icosq *sq;
bool trigger_poll;
memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq));
sq = &c->async_icosq;
ktls_resync = sq->ktls_resync;
trigger_poll = false;
spin_lock_bh(&ktls_resync->lock);
list_add_tail(&priv_rx->list, &ktls_resync->list);
trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
spin_lock_bh(&priv_rx->lock);
memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq));
if (list_empty(&priv_rx->list)) {
list_add_tail(&priv_rx->list, &ktls_resync->list);
trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
}
spin_unlock_bh(&priv_rx->lock);
spin_unlock_bh(&ktls_resync->lock);
if (!trigger_poll)
......@@ -617,6 +622,8 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
if (err)
goto err_create_key;
INIT_LIST_HEAD(&priv_rx->list);
spin_lock_init(&priv_rx->lock);
priv_rx->crypto_info =
*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
......@@ -730,10 +737,14 @@ bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget)
priv_rx = list_first_entry(&local_list,
struct mlx5e_ktls_offload_context_rx,
list);
spin_lock(&priv_rx->lock);
cseg = post_static_params(sq, priv_rx);
if (IS_ERR(cseg))
if (IS_ERR(cseg)) {
spin_unlock(&priv_rx->lock);
break;
list_del(&priv_rx->list);
}
list_del_init(&priv_rx->list);
spin_unlock(&priv_rx->lock);
db_cseg = cseg;
}
if (db_cseg)
......
......@@ -1600,6 +1600,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
else
mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
}
complete_all(&flow->del_hw_done);
if (mlx5_flow_has_geneve_opt(flow))
mlx5_geneve_tlv_option_del(priv->mdev->geneve);
......@@ -3607,7 +3608,9 @@ parse_tc_nic_actions(struct mlx5e_priv *priv,
attr->dest_chain = act->chain_index;
break;
case FLOW_ACTION_CT:
err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack);
err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr,
&parse_attr->mod_hdr_acts,
act, extack);
if (err)
return err;
......@@ -4276,7 +4279,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported");
return -EOPNOTSUPP;
}
err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack);
err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr,
&parse_attr->mod_hdr_acts,
act, extack);
if (err)
return err;
......@@ -4465,6 +4470,7 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
refcount_set(&flow->refcnt, 1);
init_completion(&flow->init_done);
init_completion(&flow->del_hw_done);
*__flow = flow;
*__parse_attr = parse_attr;
......
......@@ -1305,12 +1305,17 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
*/
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
{
bool toggle_lag;
int ret;
if (!mlx5_esw_allowed(esw))
return 0;
mlx5_lag_disable_change(esw->dev);
toggle_lag = esw->mode == MLX5_ESWITCH_NONE;
if (toggle_lag)
mlx5_lag_disable_change(esw->dev);
down_write(&esw->mode_lock);
if (esw->mode == MLX5_ESWITCH_NONE) {
ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs);
......@@ -1324,7 +1329,10 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
esw->esw_funcs.num_vfs = num_vfs;
}
up_write(&esw->mode_lock);
mlx5_lag_enable_change(esw->dev);
if (toggle_lag)
mlx5_lag_enable_change(esw->dev);
return ret;
}
......@@ -1572,6 +1580,11 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->enabled_vports = 0;
esw->mode = MLX5_ESWITCH_NONE;
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
else
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
dev->priv.eswitch = esw;
BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
......@@ -1934,7 +1947,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
return err;
}
u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev)
u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
......@@ -1948,7 +1961,7 @@ mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
struct mlx5_eswitch *esw;
esw = dev->priv.eswitch;
return mlx5_esw_allowed(esw) ? esw->offloads.encap :
return (mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS) ? esw->offloads.encap :
DEVLINK_ESWITCH_ENCAP_MODE_NONE;
}
EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode);
......
......@@ -3183,12 +3183,6 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
u64 mapping_id;
int err;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
else
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
mutex_init(&esw->offloads.termtbl_mutex);
mlx5_rdma_enable_roce(esw->dev);
......@@ -3286,7 +3280,6 @@ void esw_offloads_disable(struct mlx5_eswitch *esw)
esw_offloads_metadata_uninit(esw);
mlx5_rdma_disable_roce(esw->dev);
mutex_destroy(&esw->offloads.termtbl_mutex);
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
}
static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
......@@ -3630,7 +3623,7 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
*encap = esw->offloads.encap;
unlock:
up_write(&esw->mode_lock);
return 0;
return err;
}
static bool
......
......@@ -40,7 +40,7 @@
#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
/* Max number of counters to query in bulk read is 32K */
#define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
#define MLX5_SF_NUM_COUNTERS_BULK 6
#define MLX5_SF_NUM_COUNTERS_BULK 8
#define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
#define MLX5_FC_POOL_USED_BUFF_RATIO 10
......
......@@ -615,6 +615,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
bool is_bonded, is_in_lag, mode_supported;
int bond_status = 0;
int num_slaves = 0;
int changed = 0;
int idx;
if (!netif_is_lag_master(upper))
......@@ -653,27 +654,27 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
*/
is_in_lag = num_slaves == MLX5_MAX_PORTS && bond_status == 0x3;
if (!mlx5_lag_is_ready(ldev) && is_in_lag) {
NL_SET_ERR_MSG_MOD(info->info.extack,
"Can't activate LAG offload, PF is configured with more than 64 VFs");
return 0;
}
/* Lag mode must be activebackup or hash. */
mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP ||
tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH;
if (is_in_lag && !mode_supported)
NL_SET_ERR_MSG_MOD(info->info.extack,
"Can't activate LAG offload, TX type isn't supported");
is_bonded = is_in_lag && mode_supported;
if (tracker->is_bonded != is_bonded) {
tracker->is_bonded = is_bonded;
return 1;
changed = 1;
}
return 0;
if (!is_in_lag)
return changed;
if (!mlx5_lag_is_ready(ldev))
NL_SET_ERR_MSG_MOD(info->info.extack,
"Can't activate LAG offload, PF is configured with more than 64 VFs");
else if (!mode_supported)
NL_SET_ERR_MSG_MOD(info->info.extack,
"Can't activate LAG offload, TX type isn't supported");
return changed;
}
static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
......@@ -716,9 +717,6 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
ldev = container_of(this, struct mlx5_lag, nb);
if (!mlx5_lag_is_ready(ldev) && event == NETDEV_CHANGELOWERSTATE)
return NOTIFY_DONE;
tracker = ldev->tracker;
switch (event) {
......
......@@ -135,25 +135,14 @@ static void dr_domain_fill_uplink_caps(struct mlx5dr_domain *dmn,
static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
u16 vport_number,
bool other_vport,
struct mlx5dr_cmd_vport_cap *vport_caps)
{
u16 cmd_vport = vport_number;
bool other_vport = true;
int ret;
if (vport_number == MLX5_VPORT_UPLINK) {
dr_domain_fill_uplink_caps(dmn, vport_caps);
return 0;
}
if (dmn->info.caps.is_ecpf && vport_number == MLX5_VPORT_ECPF) {
other_vport = false;
cmd_vport = 0;
}
ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
other_vport,
cmd_vport,
vport_number,
&vport_caps->icm_address_rx,
&vport_caps->icm_address_tx);
if (ret)
......@@ -161,7 +150,7 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
other_vport,
cmd_vport,
vport_number,
&vport_caps->vport_gvmi);
if (ret)
return ret;
......@@ -176,9 +165,15 @@ static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn)
{
return dr_domain_query_vport(dmn,
dmn->info.caps.is_ecpf ? MLX5_VPORT_ECPF : 0,
false,
&dmn->info.caps.vports.esw_manager_caps);
}
static void dr_domain_query_uplink(struct mlx5dr_domain *dmn)
{
dr_domain_fill_uplink_caps(dmn, &dmn->info.caps.vports.uplink_caps);
}
static struct mlx5dr_cmd_vport_cap *
dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
{
......@@ -190,7 +185,7 @@ dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
if (!vport_caps)
return NULL;
ret = dr_domain_query_vport(dmn, vport, vport_caps);
ret = dr_domain_query_vport(dmn, vport, true, vport_caps);
if (ret) {
kvfree(vport_caps);
return NULL;
......@@ -207,16 +202,26 @@ dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
return vport_caps;
}
static bool dr_domain_is_esw_mgr_vport(struct mlx5dr_domain *dmn, u16 vport)
{
struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
return (caps->is_ecpf && vport == MLX5_VPORT_ECPF) ||
(!caps->is_ecpf && vport == 0);
}
struct mlx5dr_cmd_vport_cap *
mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
{
struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
struct mlx5dr_cmd_vport_cap *vport_caps;
if ((caps->is_ecpf && vport == MLX5_VPORT_ECPF) ||
(!caps->is_ecpf && vport == 0))
if (dr_domain_is_esw_mgr_vport(dmn, vport))
return &caps->vports.esw_manager_caps;
if (vport == MLX5_VPORT_UPLINK)
return &caps->vports.uplink_caps;
vport_load:
vport_caps = xa_load(&caps->vports.vports_caps_xa, vport);
if (vport_caps)
......@@ -241,17 +246,6 @@ static void dr_domain_clear_vports(struct mlx5dr_domain *dmn)
}
}
static int dr_domain_query_uplink(struct mlx5dr_domain *dmn)
{
struct mlx5dr_cmd_vport_cap *vport_caps;
vport_caps = mlx5dr_domain_get_vport_cap(dmn, MLX5_VPORT_UPLINK);
if (!vport_caps)
return -EINVAL;
return 0;
}
static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
struct mlx5dr_domain *dmn)
{
......@@ -281,11 +275,7 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
goto free_vports_caps_xa;
}
ret = dr_domain_query_uplink(dmn);
if (ret) {
mlx5dr_err(dmn, "Failed to query uplink vport caps (err: %d)", ret);
goto free_vports_caps_xa;
}
dr_domain_query_uplink(dmn);
return 0;
......
......@@ -924,11 +924,12 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher,
/* Check that all mask data was consumed */
for (i = 0; i < consumed_mask.match_sz; i++) {
if (consumed_mask.match_buf[i]) {
mlx5dr_dbg(dmn, "Match param mask contains unsupported parameters\n");
ret = -EOPNOTSUPP;
goto free_consumed_mask;
}
if (!((u8 *)consumed_mask.match_buf)[i])
continue;
mlx5dr_dbg(dmn, "Match param mask contains unsupported parameters\n");
ret = -EOPNOTSUPP;
goto free_consumed_mask;
}
ret = 0;
......
......@@ -764,6 +764,7 @@ struct mlx5dr_roce_cap {
struct mlx5dr_vports {
struct mlx5dr_cmd_vport_cap esw_manager_caps;
struct mlx5dr_cmd_vport_cap uplink_caps;
struct xarray vports_caps_xa;
};
......
......@@ -145,13 +145,13 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, \
ESW_TUN_OPTS_OFFSET + 1)
u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev);
u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev);
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw);
#else /* CONFIG_MLX5_ESWITCH */
static inline u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev)
static inline u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev)
{
return MLX5_ESWITCH_NONE;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment