Commit 045e42f3 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2020-09-15' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2020-09-15

Various updates to mlx5 driver,

1) Eli adds support for TC trap action.
2) Eran, minor improvements to clock.c code structure
3) Better handling of error reporting in LAG from Jianbo
4) IPv6 traffic class (DSCP) header rewrite support from Maor
5) Ofer Levi adds support for CQE compression of multi-strides packets
6) Vu, Enables use of vport meta data by default.
7) Some minor code cleanup
====================
Reviewed-by: default avatarJakub Kicinski <kuba@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 897dccb8 b7cf0806
...@@ -265,6 +265,7 @@ enum { ...@@ -265,6 +265,7 @@ enum {
MLX5E_RQ_STATE_NO_CSUM_COMPLETE, MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */ MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */ MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */
MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX /* set when mini_cqe_resp_stride_index cap is used */
}; };
struct mlx5e_cq { struct mlx5e_cq {
......
...@@ -242,8 +242,8 @@ static int mlx5e_health_rsc_fmsg_binary(struct devlink_fmsg *fmsg, ...@@ -242,8 +242,8 @@ static int mlx5e_health_rsc_fmsg_binary(struct devlink_fmsg *fmsg,
{ {
u32 data_size; u32 data_size;
int err = 0;
u32 offset; u32 offset;
int err;
for (offset = 0; offset < value_len; offset += data_size) { for (offset = 0; offset < value_len; offset += data_size) {
data_size = value_len - offset; data_size = value_len - offset;
......
...@@ -243,7 +243,6 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) ...@@ -243,7 +243,6 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
return MLX5E_NUM_PFLAGS; return MLX5E_NUM_PFLAGS;
case ETH_SS_TEST: case ETH_SS_TEST:
return mlx5e_self_test_num(priv); return mlx5e_self_test_num(priv);
fallthrough;
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
......
...@@ -848,6 +848,13 @@ int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params, ...@@ -848,6 +848,13 @@ int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp) if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp)
__set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
/* For CQE compression on striding RQ, use stride index provided by
* HW if capability is supported.
*/
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) &&
MLX5_CAP_GEN(c->mdev, mini_cqe_resp_stride_index))
__set_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &c->rq.state);
return 0; return 0;
err_destroy_rq: err_destroy_rq:
...@@ -2182,6 +2189,7 @@ void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, ...@@ -2182,6 +2189,7 @@ void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
struct mlx5e_cq_param *param) struct mlx5e_cq_param *param)
{ {
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
bool hw_stridx = false;
void *cqc = param->cqc; void *cqc = param->cqc;
u8 log_cq_size; u8 log_cq_size;
...@@ -2189,6 +2197,7 @@ void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, ...@@ -2189,6 +2197,7 @@ void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) + log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk); mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index);
break; break;
default: /* MLX5_WQ_TYPE_CYCLIC */ default: /* MLX5_WQ_TYPE_CYCLIC */
log_cq_size = params->log_rq_mtu_frames; log_cq_size = params->log_rq_mtu_frames;
...@@ -2196,7 +2205,8 @@ void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, ...@@ -2196,7 +2205,8 @@ void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
MLX5_SET(cqc, cqc, log_cq_size, log_cq_size); MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM); MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ?
MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM);
MLX5_SET(cqc, cqc, cqe_comp_en, 1); MLX5_SET(cqc, cqc, cqe_comp_en, 1);
} }
......
...@@ -137,8 +137,17 @@ static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, ...@@ -137,8 +137,17 @@ static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
title->check_sum = mini_cqe->checksum; title->check_sum = mini_cqe->checksum;
title->op_own &= 0xf0; title->op_own &= 0xf0;
title->op_own |= 0x01 & (cqcc >> wq->fbc.log_sz); title->op_own |= 0x01 & (cqcc >> wq->fbc.log_sz);
title->wqe_counter = cpu_to_be16(cqd->wqe_counter);
/* state bit set implies linked-list striding RQ wq type and
* HW stride index capability supported
*/
if (test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)) {
title->wqe_counter = mini_cqe->stridx;
return;
}
/* HW stride index capability not supported */
title->wqe_counter = cpu_to_be16(cqd->wqe_counter);
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title); cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title);
else else
......
...@@ -2615,6 +2615,7 @@ static struct mlx5_fields fields[] = { ...@@ -2615,6 +2615,7 @@ static struct mlx5_fields fields[] = {
OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0, OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]), dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit), OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp),
OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport), OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport), OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
...@@ -3943,6 +3944,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, ...@@ -3943,6 +3944,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
action |= MLX5_FLOW_CONTEXT_ACTION_DROP | action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT; MLX5_FLOW_CONTEXT_ACTION_COUNT;
break; break;
case FLOW_ACTION_TRAP:
if (!flow_offload_has_one_action(flow_action)) {
NL_SET_ERR_MSG_MOD(extack,
"action trap is supported as a sole action only");
return -EOPNOTSUPP;
}
action |= (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT);
attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
break;
case FLOW_ACTION_MPLS_PUSH: case FLOW_ACTION_MPLS_PUSH:
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
reformat_l2_to_l3_tunnel) || reformat_l2_to_l3_tunnel) ||
......
...@@ -1864,18 +1864,6 @@ esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw) ...@@ -1864,18 +1864,6 @@ esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
return true; return true;
} }
static bool
esw_check_vport_match_metadata_mandatory(const struct mlx5_eswitch *esw)
{
return mlx5_core_mp_enabled(esw->dev);
}
static bool esw_use_vport_metadata(const struct mlx5_eswitch *esw)
{
return esw_check_vport_match_metadata_mandatory(esw) &&
esw_check_vport_match_metadata_supported(esw);
}
u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw) u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
{ {
u32 num_vports = GENMASK(ESW_VPORT_BITS - 1, 0) - 1; u32 num_vports = GENMASK(ESW_VPORT_BITS - 1, 0) - 1;
...@@ -1908,9 +1896,6 @@ void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata) ...@@ -1908,9 +1896,6 @@ void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw, static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport) struct mlx5_vport *vport)
{ {
if (vport->vport == MLX5_VPORT_UPLINK)
return 0;
vport->default_metadata = mlx5_esw_match_metadata_alloc(esw); vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
vport->metadata = vport->default_metadata; vport->metadata = vport->default_metadata;
return vport->metadata ? 0 : -ENOSPC; return vport->metadata ? 0 : -ENOSPC;
...@@ -1919,26 +1904,56 @@ static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw, ...@@ -1919,26 +1904,56 @@ static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw, static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport) struct mlx5_vport *vport)
{ {
if (vport->vport == MLX5_VPORT_UPLINK || !vport->default_metadata) if (!vport->default_metadata)
return; return;
WARN_ON(vport->metadata != vport->default_metadata); WARN_ON(vport->metadata != vport->default_metadata);
mlx5_esw_match_metadata_free(esw, vport->default_metadata); mlx5_esw_match_metadata_free(esw, vport->default_metadata);
} }
static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
int i;
if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
return;
mlx5_esw_for_all_vports_reverse(esw, i, vport)
esw_offloads_vport_metadata_cleanup(esw, vport);
}
static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
int err;
int i;
if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
return 0;
mlx5_esw_for_all_vports(esw, i, vport) {
err = esw_offloads_vport_metadata_setup(esw, vport);
if (err)
goto metadata_err;
}
return 0;
metadata_err:
esw_offloads_metadata_uninit(esw);
return err;
}
int int
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport) struct mlx5_vport *vport)
{ {
int err; int err;
err = esw_offloads_vport_metadata_setup(esw, vport);
if (err)
goto metadata_err;
err = esw_acl_ingress_ofld_setup(esw, vport); err = esw_acl_ingress_ofld_setup(esw, vport);
if (err) if (err)
goto ingress_err; return err;
if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) { if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
err = esw_acl_egress_ofld_setup(esw, vport); err = esw_acl_egress_ofld_setup(esw, vport);
...@@ -1950,9 +1965,6 @@ esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, ...@@ -1950,9 +1965,6 @@ esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
egress_err: egress_err:
esw_acl_ingress_ofld_cleanup(esw, vport); esw_acl_ingress_ofld_cleanup(esw, vport);
ingress_err:
esw_offloads_vport_metadata_cleanup(esw, vport);
metadata_err:
return err; return err;
} }
...@@ -1962,22 +1974,14 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, ...@@ -1962,22 +1974,14 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
{ {
esw_acl_egress_ofld_cleanup(vport); esw_acl_egress_ofld_cleanup(vport);
esw_acl_ingress_ofld_cleanup(esw, vport); esw_acl_ingress_ofld_cleanup(esw, vport);
esw_offloads_vport_metadata_cleanup(esw, vport);
} }
static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
{ {
struct mlx5_vport *vport; struct mlx5_vport *vport;
int err;
if (esw_use_vport_metadata(esw))
esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
err = esw_vport_create_offloads_acl_tables(esw, vport); return esw_vport_create_offloads_acl_tables(esw, vport);
if (err)
esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
return err;
} }
static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
...@@ -1986,7 +1990,6 @@ static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) ...@@ -1986,7 +1990,6 @@ static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
esw_vport_destroy_offloads_acl_tables(esw, vport); esw_vport_destroy_offloads_acl_tables(esw, vport);
esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
} }
static int esw_offloads_steering_init(struct mlx5_eswitch *esw) static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
...@@ -2144,7 +2147,14 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) ...@@ -2144,7 +2147,14 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
err = mlx5_esw_host_number_init(esw); err = mlx5_esw_host_number_init(esw);
if (err) if (err)
goto err_vport_metadata; goto err_metadata;
if (esw_check_vport_match_metadata_supported(esw))
esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
err = esw_offloads_metadata_init(esw);
if (err)
goto err_metadata;
err = esw_set_passing_vport_metadata(esw, true); err = esw_set_passing_vport_metadata(esw, true);
if (err) if (err)
...@@ -2178,6 +2188,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) ...@@ -2178,6 +2188,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
err_steering_init: err_steering_init:
esw_set_passing_vport_metadata(esw, false); esw_set_passing_vport_metadata(esw, false);
err_vport_metadata: err_vport_metadata:
esw_offloads_metadata_uninit(esw);
err_metadata:
esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
mlx5_rdma_disable_roce(esw->dev); mlx5_rdma_disable_roce(esw->dev);
mutex_destroy(&esw->offloads.termtbl_mutex); mutex_destroy(&esw->offloads.termtbl_mutex);
return err; return err;
...@@ -2211,6 +2224,8 @@ void esw_offloads_disable(struct mlx5_eswitch *esw) ...@@ -2211,6 +2224,8 @@ void esw_offloads_disable(struct mlx5_eswitch *esw)
esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK); esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
esw_set_passing_vport_metadata(esw, false); esw_set_passing_vport_metadata(esw, false);
esw_offloads_steering_cleanup(esw); esw_offloads_steering_cleanup(esw);
esw_offloads_metadata_uninit(esw);
esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
mlx5_rdma_disable_roce(esw->dev); mlx5_rdma_disable_roce(esw->dev);
mutex_destroy(&esw->offloads.termtbl_mutex); mutex_destroy(&esw->offloads.termtbl_mutex);
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
......
...@@ -102,7 +102,7 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev, ...@@ -102,7 +102,7 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
if (ldev->pf[i].netdev == ndev) if (ldev->pf[i].netdev == ndev)
return i; return i;
return -1; return -ENOENT;
} }
static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev) static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
...@@ -271,7 +271,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) ...@@ -271,7 +271,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
bool do_bond, roce_lag; bool do_bond, roce_lag;
int err; int err;
if (!dev0 || !dev1) if (!mlx5_lag_is_ready(ldev))
return; return;
spin_lock(&lag_lock); spin_lock(&lag_lock);
...@@ -355,7 +355,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, ...@@ -355,7 +355,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
{ {
struct net_device *upper = info->upper_dev, *ndev_tmp; struct net_device *upper = info->upper_dev, *ndev_tmp;
struct netdev_lag_upper_info *lag_upper_info = NULL; struct netdev_lag_upper_info *lag_upper_info = NULL;
bool is_bonded; bool is_bonded, is_in_lag, mode_supported;
int bond_status = 0; int bond_status = 0;
int num_slaves = 0; int num_slaves = 0;
int idx; int idx;
...@@ -374,7 +374,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, ...@@ -374,7 +374,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
rcu_read_lock(); rcu_read_lock();
for_each_netdev_in_bond_rcu(upper, ndev_tmp) { for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp); idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
if (idx > -1) if (idx >= 0)
bond_status |= (1 << idx); bond_status |= (1 << idx);
num_slaves++; num_slaves++;
...@@ -391,13 +391,24 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, ...@@ -391,13 +391,24 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
/* Determine bonding status: /* Determine bonding status:
* A device is considered bonded if both its physical ports are slaves * A device is considered bonded if both its physical ports are slaves
* of the same lag master, and only them. * of the same lag master, and only them.
* Lag mode must be activebackup or hash.
*/ */
is_bonded = (num_slaves == MLX5_MAX_PORTS) && is_in_lag = num_slaves == MLX5_MAX_PORTS && bond_status == 0x3;
(bond_status == 0x3) &&
((tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) ||
(tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH));
if (!mlx5_lag_is_ready(ldev) && is_in_lag) {
NL_SET_ERR_MSG_MOD(info->info.extack,
"Can't activate LAG offload, PF is configured with more than 64 VFs");
return 0;
}
/* Lag mode must be activebackup or hash. */
mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP ||
tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH;
if (is_in_lag && !mode_supported)
NL_SET_ERR_MSG_MOD(info->info.extack,
"Can't activate LAG offload, TX type isn't supported");
is_bonded = is_in_lag && mode_supported;
if (tracker->is_bonded != is_bonded) { if (tracker->is_bonded != is_bonded) {
tracker->is_bonded = is_bonded; tracker->is_bonded = is_bonded;
return 1; return 1;
...@@ -418,7 +429,7 @@ static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev, ...@@ -418,7 +429,7 @@ static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
return 0; return 0;
idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev); idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev);
if (idx == -1) if (idx < 0)
return 0; return 0;
/* This information is used to determine virtual to physical /* This information is used to determine virtual to physical
...@@ -445,6 +456,10 @@ static int mlx5_lag_netdev_event(struct notifier_block *this, ...@@ -445,6 +456,10 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
return NOTIFY_DONE; return NOTIFY_DONE;
ldev = container_of(this, struct mlx5_lag, nb); ldev = container_of(this, struct mlx5_lag, nb);
if (!mlx5_lag_is_ready(ldev) && event == NETDEV_CHANGELOWERSTATE)
return NOTIFY_DONE;
tracker = ldev->tracker; tracker = ldev->tracker;
switch (event) { switch (event) {
...@@ -493,14 +508,14 @@ static void mlx5_lag_dev_free(struct mlx5_lag *ldev) ...@@ -493,14 +508,14 @@ static void mlx5_lag_dev_free(struct mlx5_lag *ldev)
kfree(ldev); kfree(ldev);
} }
static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev, static int mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
struct mlx5_core_dev *dev, struct mlx5_core_dev *dev,
struct net_device *netdev) struct net_device *netdev)
{ {
unsigned int fn = PCI_FUNC(dev->pdev->devfn); unsigned int fn = PCI_FUNC(dev->pdev->devfn);
if (fn >= MLX5_MAX_PORTS) if (fn >= MLX5_MAX_PORTS)
return; return -EPERM;
spin_lock(&lag_lock); spin_lock(&lag_lock);
ldev->pf[fn].dev = dev; ldev->pf[fn].dev = dev;
...@@ -511,6 +526,8 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev, ...@@ -511,6 +526,8 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
dev->priv.lag = ldev; dev->priv.lag = ldev;
spin_unlock(&lag_lock); spin_unlock(&lag_lock);
return fn;
} }
static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev, static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
...@@ -537,11 +554,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev) ...@@ -537,11 +554,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
{ {
struct mlx5_lag *ldev = NULL; struct mlx5_lag *ldev = NULL;
struct mlx5_core_dev *tmp_dev; struct mlx5_core_dev *tmp_dev;
int err; int i, err;
if (!MLX5_CAP_GEN(dev, vport_group_manager) || if (!MLX5_CAP_GEN(dev, vport_group_manager))
!MLX5_CAP_GEN(dev, lag_master) ||
(MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS))
return; return;
tmp_dev = mlx5_get_next_phys_dev(dev); tmp_dev = mlx5_get_next_phys_dev(dev);
...@@ -556,7 +571,18 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev) ...@@ -556,7 +571,18 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
} }
} }
mlx5_lag_dev_add_pf(ldev, dev, netdev); if (mlx5_lag_dev_add_pf(ldev, dev, netdev) < 0)
return;
for (i = 0; i < MLX5_MAX_PORTS; i++) {
tmp_dev = ldev->pf[i].dev;
if (!tmp_dev || !MLX5_CAP_GEN(tmp_dev, lag_master) ||
MLX5_CAP_GEN(tmp_dev, num_lag_ports) != MLX5_MAX_PORTS)
break;
}
if (i >= MLX5_MAX_PORTS)
ldev->flags |= MLX5_LAG_FLAG_READY;
if (!ldev->nb.notifier_call) { if (!ldev->nb.notifier_call) {
ldev->nb.notifier_call = mlx5_lag_netdev_event; ldev->nb.notifier_call = mlx5_lag_netdev_event;
...@@ -587,6 +613,8 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev) ...@@ -587,6 +613,8 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev)
mlx5_lag_dev_remove_pf(ldev, dev); mlx5_lag_dev_remove_pf(ldev, dev);
ldev->flags &= ~MLX5_LAG_FLAG_READY;
for (i = 0; i < MLX5_MAX_PORTS; i++) for (i = 0; i < MLX5_MAX_PORTS; i++)
if (ldev->pf[i].dev) if (ldev->pf[i].dev)
break; break;
......
...@@ -16,6 +16,7 @@ enum { ...@@ -16,6 +16,7 @@ enum {
MLX5_LAG_FLAG_ROCE = 1 << 0, MLX5_LAG_FLAG_ROCE = 1 << 0,
MLX5_LAG_FLAG_SRIOV = 1 << 1, MLX5_LAG_FLAG_SRIOV = 1 << 1,
MLX5_LAG_FLAG_MULTIPATH = 1 << 2, MLX5_LAG_FLAG_MULTIPATH = 1 << 2,
MLX5_LAG_FLAG_READY = 1 << 3,
}; };
#define MLX5_LAG_MODE_FLAGS (MLX5_LAG_FLAG_ROCE | MLX5_LAG_FLAG_SRIOV |\ #define MLX5_LAG_MODE_FLAGS (MLX5_LAG_FLAG_ROCE | MLX5_LAG_FLAG_SRIOV |\
...@@ -59,6 +60,12 @@ __mlx5_lag_is_active(struct mlx5_lag *ldev) ...@@ -59,6 +60,12 @@ __mlx5_lag_is_active(struct mlx5_lag *ldev)
return !!(ldev->flags & MLX5_LAG_MODE_FLAGS); return !!(ldev->flags & MLX5_LAG_MODE_FLAGS);
} }
static inline bool
mlx5_lag_is_ready(struct mlx5_lag *ldev)
{
return ldev->flags & MLX5_LAG_FLAG_READY;
}
void mlx5_modify_lag(struct mlx5_lag *ldev, void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker); struct lag_tracker *tracker);
int mlx5_activate_lag(struct mlx5_lag *ldev, int mlx5_activate_lag(struct mlx5_lag *ldev,
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev) static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
{ {
if (!ldev->pf[MLX5_LAG_P1].dev || !ldev->pf[MLX5_LAG_P2].dev) if (!mlx5_lag_is_ready(ldev))
return false; return false;
return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev, return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
...@@ -131,7 +131,12 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, ...@@ -131,7 +131,12 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
struct net_device *nh_dev = nh->fib_nh_dev; struct net_device *nh_dev = nh->fib_nh_dev;
int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev); int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev);
mlx5_lag_set_port_affinity(ldev, ++i); if (i < 0)
i = MLX5_LAG_NORMAL_AFFINITY;
else
++i;
mlx5_lag_set_port_affinity(ldev, i);
} }
return; return;
} }
......
...@@ -150,28 +150,30 @@ static void mlx5_pps_out(struct work_struct *work) ...@@ -150,28 +150,30 @@ static void mlx5_pps_out(struct work_struct *work)
static void mlx5_timestamp_overflow(struct work_struct *work) static void mlx5_timestamp_overflow(struct work_struct *work)
{ {
struct delayed_work *dwork = to_delayed_work(work); struct delayed_work *dwork = to_delayed_work(work);
struct mlx5_clock *clock = container_of(dwork, struct mlx5_clock, struct mlx5_core_dev *mdev;
overflow_work); struct mlx5_clock *clock;
unsigned long flags; unsigned long flags;
clock = container_of(dwork, struct mlx5_clock, overflow_work);
mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags); write_seqlock_irqsave(&clock->lock, flags);
timecounter_read(&clock->tc); timecounter_read(&clock->tc);
mlx5_update_clock_info_page(clock->mdev); mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags); write_sequnlock_irqrestore(&clock->lock, flags);
schedule_delayed_work(&clock->overflow_work, clock->overflow_period); schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
} }
static int mlx5_ptp_settime(struct ptp_clock_info *ptp, static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
const struct timespec64 *ts)
{ {
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
ptp_info);
u64 ns = timespec64_to_ns(ts); u64 ns = timespec64_to_ns(ts);
struct mlx5_core_dev *mdev;
unsigned long flags; unsigned long flags;
mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags); write_seqlock_irqsave(&clock->lock, flags);
timecounter_init(&clock->tc, &clock->cycles, ns); timecounter_init(&clock->tc, &clock->cycles, ns);
mlx5_update_clock_info_page(clock->mdev); mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags); write_sequnlock_irqrestore(&clock->lock, flags);
return 0; return 0;
...@@ -180,13 +182,12 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp, ...@@ -180,13 +182,12 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
struct ptp_system_timestamp *sts) struct ptp_system_timestamp *sts)
{ {
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
ptp_info); struct mlx5_core_dev *mdev;
struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
clock);
unsigned long flags; unsigned long flags;
u64 cycles, ns; u64 cycles, ns;
mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags); write_seqlock_irqsave(&clock->lock, flags);
cycles = mlx5_read_internal_timer(mdev, sts); cycles = mlx5_read_internal_timer(mdev, sts);
ns = timecounter_cyc2time(&clock->tc, cycles); ns = timecounter_cyc2time(&clock->tc, cycles);
...@@ -199,13 +200,14 @@ static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, ...@@ -199,13 +200,14 @@ static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{ {
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
ptp_info); struct mlx5_core_dev *mdev;
unsigned long flags; unsigned long flags;
mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags); write_seqlock_irqsave(&clock->lock, flags);
timecounter_adjtime(&clock->tc, delta); timecounter_adjtime(&clock->tc, delta);
mlx5_update_clock_info_page(clock->mdev); mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags); write_sequnlock_irqrestore(&clock->lock, flags);
return 0; return 0;
...@@ -213,12 +215,13 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) ...@@ -213,12 +215,13 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
{ {
u64 adj; struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
u32 diff; struct mlx5_core_dev *mdev;
unsigned long flags; unsigned long flags;
int neg_adj = 0; int neg_adj = 0;
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, u32 diff;
ptp_info); u64 adj;
if (delta < 0) { if (delta < 0) {
neg_adj = 1; neg_adj = 1;
...@@ -229,11 +232,12 @@ static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) ...@@ -229,11 +232,12 @@ static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
adj *= delta; adj *= delta;
diff = div_u64(adj, 1000000000ULL); diff = div_u64(adj, 1000000000ULL);
mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags); write_seqlock_irqsave(&clock->lock, flags);
timecounter_read(&clock->tc); timecounter_read(&clock->tc);
clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff : clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
clock->nominal_c_mult + diff; clock->nominal_c_mult + diff;
mlx5_update_clock_info_page(clock->mdev); mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags); write_sequnlock_irqrestore(&clock->lock, flags);
return 0; return 0;
...@@ -437,7 +441,7 @@ static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, ...@@ -437,7 +441,7 @@ static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
static const struct ptp_clock_info mlx5_ptp_clock_info = { static const struct ptp_clock_info mlx5_ptp_clock_info = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.name = "mlx5_p2p", .name = "mlx5_ptp",
.max_adj = 100000000, .max_adj = 100000000,
.n_alarm = 0, .n_alarm = 0,
.n_ext_ts = 0, .n_ext_ts = 0,
...@@ -465,7 +469,8 @@ static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin, ...@@ -465,7 +469,8 @@ static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin) static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
{ {
struct mlx5_core_dev *mdev = clock->mdev; struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {}; u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
u8 mode; u8 mode;
int err; int err;
...@@ -538,20 +543,23 @@ static int mlx5_pps_event(struct notifier_block *nb, ...@@ -538,20 +543,23 @@ static int mlx5_pps_event(struct notifier_block *nb,
unsigned long type, void *data) unsigned long type, void *data)
{ {
struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb); struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
struct mlx5_core_dev *mdev = clock->mdev;
struct ptp_clock_event ptp_event; struct ptp_clock_event ptp_event;
u64 cycles_now, cycles_delta; u64 cycles_now, cycles_delta;
u64 nsec_now, nsec_delta, ns; u64 nsec_now, nsec_delta, ns;
struct mlx5_eqe *eqe = data; struct mlx5_eqe *eqe = data;
int pin = eqe->data.pps.pin; int pin = eqe->data.pps.pin;
struct mlx5_core_dev *mdev;
struct timespec64 ts; struct timespec64 ts;
unsigned long flags; unsigned long flags;
mdev = container_of(clock, struct mlx5_core_dev, clock);
switch (clock->ptp_info.pin_config[pin].func) { switch (clock->ptp_info.pin_config[pin].func) {
case PTP_PF_EXTTS: case PTP_PF_EXTTS:
ptp_event.index = pin; ptp_event.index = pin;
ptp_event.timestamp = timecounter_cyc2time(&clock->tc, ptp_event.timestamp =
be64_to_cpu(eqe->data.pps.time_stamp)); mlx5_timecounter_cyc2time(clock,
be64_to_cpu(eqe->data.pps.time_stamp));
if (clock->pps_info.enabled) { if (clock->pps_info.enabled) {
ptp_event.type = PTP_CLOCK_PPSUSR; ptp_event.type = PTP_CLOCK_PPSUSR;
ptp_event.pps_times.ts_real = ptp_event.pps_times.ts_real =
...@@ -574,8 +582,8 @@ static int mlx5_pps_event(struct notifier_block *nb, ...@@ -574,8 +582,8 @@ static int mlx5_pps_event(struct notifier_block *nb,
cycles_delta = div64_u64(nsec_delta << clock->cycles.shift, cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
clock->cycles.mult); clock->cycles.mult);
clock->pps_info.start[pin] = cycles_now + cycles_delta; clock->pps_info.start[pin] = cycles_now + cycles_delta;
schedule_work(&clock->pps_info.out_work);
write_sequnlock_irqrestore(&clock->lock, flags); write_sequnlock_irqrestore(&clock->lock, flags);
schedule_work(&clock->pps_info.out_work);
break; break;
default: default:
mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n", mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
...@@ -605,7 +613,6 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev) ...@@ -605,7 +613,6 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
clock->cycles.shift); clock->cycles.shift);
clock->nominal_c_mult = clock->cycles.mult; clock->nominal_c_mult = clock->cycles.mult;
clock->cycles.mask = CLOCKSOURCE_MASK(41); clock->cycles.mask = CLOCKSOURCE_MASK(41);
clock->mdev = mdev;
timecounter_init(&clock->tc, &clock->cycles, timecounter_init(&clock->tc, &clock->cycles,
ktime_to_ns(ktime_get_real())); ktime_to_ns(ktime_get_real()));
......
...@@ -816,7 +816,7 @@ struct mlx5_mini_cqe8 { ...@@ -816,7 +816,7 @@ struct mlx5_mini_cqe8 {
__be32 rx_hash_result; __be32 rx_hash_result;
struct { struct {
__be16 checksum; __be16 checksum;
__be16 rsvd; __be16 stridx;
}; };
struct { struct {
__be16 wqe_counter; __be16 wqe_counter;
...@@ -836,6 +836,7 @@ enum { ...@@ -836,6 +836,7 @@ enum {
enum { enum {
MLX5_CQE_FORMAT_CSUM = 0x1, MLX5_CQE_FORMAT_CSUM = 0x1,
MLX5_CQE_FORMAT_CSUM_STRIDX = 0x3,
}; };
#define MLX5_MINI_CQE_ARRAY_SIZE 8 #define MLX5_MINI_CQE_ARRAY_SIZE 8
......
...@@ -643,7 +643,6 @@ struct mlx5_pps { ...@@ -643,7 +643,6 @@ struct mlx5_pps {
}; };
struct mlx5_clock { struct mlx5_clock {
struct mlx5_core_dev *mdev;
struct mlx5_nb pps_nb; struct mlx5_nb pps_nb;
seqlock_t lock; seqlock_t lock;
struct cyclecounter cycles; struct cyclecounter cycles;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment