Commit 1e1e73ee authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2021-03-10' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

mlx5-fixes-2021-03-10
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8373a0fe 84076c4c
...@@ -1078,7 +1078,7 @@ static int _create_kernel_qp(struct mlx5_ib_dev *dev, ...@@ -1078,7 +1078,7 @@ static int _create_kernel_qp(struct mlx5_ib_dev *dev,
qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc); qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
MLX5_SET(qpc, qpc, uar_page, uar_index); MLX5_SET(qpc, qpc, uar_page, uar_index);
MLX5_SET(qpc, qpc, ts_format, MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT); MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev));
MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
/* Set "fast registration enabled" for all kernel QPs */ /* Set "fast registration enabled" for all kernel QPs */
...@@ -1188,7 +1188,8 @@ static int get_rq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq) ...@@ -1188,7 +1188,8 @@ static int get_rq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq)
} }
return MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING; return MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING;
} }
return MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT; return fr_supported ? MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING :
MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT;
} }
static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq) static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq)
...@@ -1206,7 +1207,8 @@ static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq) ...@@ -1206,7 +1207,8 @@ static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq)
} }
return MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING; return MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING;
} }
return MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT; return fr_supported ? MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING :
MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT;
} }
static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq, static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq,
...@@ -1217,7 +1219,8 @@ static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq, ...@@ -1217,7 +1219,8 @@ static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq,
MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING || MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING ||
MLX5_CAP_ROCE(dev->mdev, qp_ts_format) == MLX5_CAP_ROCE(dev->mdev, qp_ts_format) ==
MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME; MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
int ts_format = MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT; int ts_format = fr_supported ? MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING :
MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT;
if (recv_cq && if (recv_cq &&
recv_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION) recv_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION)
...@@ -1930,6 +1933,7 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, ...@@ -1930,6 +1933,7 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (qp->flags & IB_QP_CREATE_MANAGED_RECV) if (qp->flags & IB_QP_CREATE_MANAGED_RECV)
MLX5_SET(qpc, qpc, cd_slave_receive, 1); MLX5_SET(qpc, qpc, cd_slave_receive, 1);
MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev));
MLX5_SET(qpc, qpc, rq_type, MLX5_SRQ_RQ); MLX5_SET(qpc, qpc, rq_type, MLX5_SRQ_RQ);
MLX5_SET(qpc, qpc, no_sq, 1); MLX5_SET(qpc, qpc, no_sq, 1);
MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn); MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
...@@ -4873,6 +4877,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, ...@@ -4873,6 +4877,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
struct mlx5_ib_dev *dev; struct mlx5_ib_dev *dev;
int has_net_offloads; int has_net_offloads;
__be64 *rq_pas0; __be64 *rq_pas0;
int ts_format;
void *in; void *in;
void *rqc; void *rqc;
void *wq; void *wq;
...@@ -4881,6 +4886,10 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, ...@@ -4881,6 +4886,10 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
dev = to_mdev(pd->device); dev = to_mdev(pd->device);
ts_format = get_rq_ts_format(dev, to_mcq(init_attr->cq));
if (ts_format < 0)
return ts_format;
inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas; inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas;
in = kvzalloc(inlen, GFP_KERNEL); in = kvzalloc(inlen, GFP_KERNEL);
if (!in) if (!in)
...@@ -4890,6 +4899,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, ...@@ -4890,6 +4899,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
MLX5_SET(rqc, rqc, mem_rq_type, MLX5_SET(rqc, rqc, mem_rq_type,
MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE); MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
MLX5_SET(rqc, rqc, ts_format, ts_format);
MLX5_SET(rqc, rqc, user_index, rwq->user_index); MLX5_SET(rqc, rqc, user_index, rwq->user_index);
MLX5_SET(rqc, rqc, cqn, to_mcq(init_attr->cq)->mcq.cqn); MLX5_SET(rqc, rqc, cqn, to_mcq(init_attr->cq)->mcq.cqn);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
......
...@@ -92,14 +92,15 @@ struct page_pool; ...@@ -92,14 +92,15 @@ struct page_pool;
MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0) MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER) #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2) #define MLX5_ALIGN_MTTS(mtts) (ALIGN(mtts, 8))
#define MLX5_ALIGNED_MTTS_OCTW(mtts) ((mtts) / 2)
#define MLX5_MTT_OCTW(mtts) (MLX5_ALIGNED_MTTS_OCTW(MLX5_ALIGN_MTTS(mtts)))
/* Add another page to MLX5E_REQUIRED_WQE_MTTS as a buffer between /* Add another page to MLX5E_REQUIRED_WQE_MTTS as a buffer between
* WQEs, This page will absorb write overflow by the hardware, when * WQEs, This page will absorb write overflow by the hardware, when
* receiving packets larger than MTU. These oversize packets are * receiving packets larger than MTU. These oversize packets are
* dropped by the driver at a later stage. * dropped by the driver at a later stage.
*/ */
#define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE + 1, 8)) #define MLX5E_REQUIRED_WQE_MTTS (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1))
#define MLX5E_LOG_ALIGNED_MPWQE_PPW (ilog2(MLX5E_REQUIRED_WQE_MTTS))
#define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS) #define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
#define MLX5E_MAX_RQ_NUM_MTTS \ #define MLX5E_MAX_RQ_NUM_MTTS \
((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */ ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
......
...@@ -685,14 +685,14 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv, ...@@ -685,14 +685,14 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
u16 vport_num; u16 vport_num;
int err = 0; int err = 0;
if (flow_attr->ip_version == 4) { if (flow_attr->tun_ip_version == 4) {
/* Addresses are swapped for decap */ /* Addresses are swapped for decap */
attr.fl.fl4.saddr = esw_attr->rx_tun_attr->dst_ip.v4; attr.fl.fl4.saddr = esw_attr->rx_tun_attr->dst_ip.v4;
attr.fl.fl4.daddr = esw_attr->rx_tun_attr->src_ip.v4; attr.fl.fl4.daddr = esw_attr->rx_tun_attr->src_ip.v4;
err = mlx5e_route_lookup_ipv4_get(priv, priv->netdev, &attr); err = mlx5e_route_lookup_ipv4_get(priv, priv->netdev, &attr);
} }
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
else if (flow_attr->ip_version == 6) { else if (flow_attr->tun_ip_version == 6) {
/* Addresses are swapped for decap */ /* Addresses are swapped for decap */
attr.fl.fl6.saddr = esw_attr->rx_tun_attr->dst_ip.v6; attr.fl.fl6.saddr = esw_attr->rx_tun_attr->dst_ip.v6;
attr.fl.fl6.daddr = esw_attr->rx_tun_attr->src_ip.v6; attr.fl.fl6.daddr = esw_attr->rx_tun_attr->src_ip.v6;
...@@ -718,10 +718,10 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv, ...@@ -718,10 +718,10 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
esw_attr->rx_tun_attr->decap_vport = vport_num; esw_attr->rx_tun_attr->decap_vport = vport_num;
out: out:
if (flow_attr->ip_version == 4) if (flow_attr->tun_ip_version == 4)
mlx5e_route_lookup_ipv4_put(&attr); mlx5e_route_lookup_ipv4_put(&attr);
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
else if (flow_attr->ip_version == 6) else if (flow_attr->tun_ip_version == 6)
mlx5e_route_lookup_ipv6_put(&attr); mlx5e_route_lookup_ipv6_put(&attr);
#endif #endif
return err; return err;
......
...@@ -89,6 +89,7 @@ int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow, ...@@ -89,6 +89,7 @@ int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow,
* required to establish routing. * required to establish routing.
*/ */
flow_flag_set(flow, TUN_RX); flow_flag_set(flow, TUN_RX);
flow->attr->tun_ip_version = ip_version;
return 0; return 0;
} }
...@@ -1091,7 +1092,7 @@ int mlx5e_attach_decap_route(struct mlx5e_priv *priv, ...@@ -1091,7 +1092,7 @@ int mlx5e_attach_decap_route(struct mlx5e_priv *priv,
if (err || !esw_attr->rx_tun_attr->decap_vport) if (err || !esw_attr->rx_tun_attr->decap_vport)
goto out; goto out;
key.ip_version = attr->ip_version; key.ip_version = attr->tun_ip_version;
if (key.ip_version == 4) if (key.ip_version == 4)
key.endpoint_ip.v4 = esw_attr->rx_tun_attr->dst_ip.v4; key.endpoint_ip.v4 = esw_attr->rx_tun_attr->dst_ip.v4;
else else
......
...@@ -227,6 +227,10 @@ static int mlx5e_tc_tun_parse_geneve_options(struct mlx5e_priv *priv, ...@@ -227,6 +227,10 @@ static int mlx5e_tc_tun_parse_geneve_options(struct mlx5e_priv *priv,
option_key = (struct geneve_opt *)&enc_opts.key->data[0]; option_key = (struct geneve_opt *)&enc_opts.key->data[0];
option_mask = (struct geneve_opt *)&enc_opts.mask->data[0]; option_mask = (struct geneve_opt *)&enc_opts.mask->data[0];
if (option_mask->opt_class == 0 && option_mask->type == 0 &&
!memchr_inv(option_mask->opt_data, 0, option_mask->length * 4))
return 0;
if (option_key->length > max_tlv_option_data_len) { if (option_key->length > max_tlv_option_data_len) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"Matching on GENEVE options: unsupported option len"); "Matching on GENEVE options: unsupported option len");
......
...@@ -2014,8 +2014,13 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable) ...@@ -2014,8 +2014,13 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
*/ */
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
struct mlx5e_params old_params;
old_params = priv->channels.params;
priv->channels.params = new_channels.params; priv->channels.params = new_channels.params;
err = mlx5e_num_channels_changed(priv); err = mlx5e_num_channels_changed(priv);
if (err)
priv->channels.params = old_params;
goto out; goto out;
} }
......
...@@ -334,9 +334,9 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq ...@@ -334,9 +334,9 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq
rq->wqe_overflow.addr); rq->wqe_overflow.addr);
} }
static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix) static u64 mlx5e_get_mpwqe_offset(u16 wqe_ix)
{ {
return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT; return MLX5E_REQUIRED_MTTS(wqe_ix) << PAGE_SHIFT;
} }
static void mlx5e_init_frags_partition(struct mlx5e_rq *rq) static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
...@@ -577,7 +577,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, ...@@ -577,7 +577,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i); mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i);
u32 byte_count = u32 byte_count =
rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz; rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i); u64 dma_offset = mlx5e_get_mpwqe_offset(i);
wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom); wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom);
wqe->data[0].byte_count = cpu_to_be32(byte_count); wqe->data[0].byte_count = cpu_to_be32(byte_count);
...@@ -2368,8 +2368,9 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params, ...@@ -2368,8 +2368,9 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params,
{ {
switch (params->rq_wq_type) { switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
return order_base_2(MLX5E_UMR_WQEBBS) + return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE,
mlx5e_get_rq_log_wq_sz(rqp->rqc); order_base_2(MLX5E_UMR_WQEBBS) +
mlx5e_get_rq_log_wq_sz(rqp->rqc));
default: /* MLX5_WQ_TYPE_CYCLIC */ default: /* MLX5_WQ_TYPE_CYCLIC */
return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
} }
...@@ -2502,8 +2503,10 @@ void mlx5e_close_channels(struct mlx5e_channels *chs) ...@@ -2502,8 +2503,10 @@ void mlx5e_close_channels(struct mlx5e_channels *chs)
{ {
int i; int i;
if (chs->port_ptp) if (chs->port_ptp) {
mlx5e_port_ptp_close(chs->port_ptp); mlx5e_port_ptp_close(chs->port_ptp);
chs->port_ptp = NULL;
}
for (i = 0; i < chs->num; i++) for (i = 0; i < chs->num; i++)
mlx5e_close_channel(chs->c[i]); mlx5e_close_channel(chs->c[i]);
...@@ -3810,6 +3813,15 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s) ...@@ -3810,6 +3813,15 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
for (j = 0; j < priv->max_opened_tc; j++) { for (j = 0; j < priv->max_opened_tc; j++) {
struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes;
s->tx_dropped += sq_stats->dropped;
}
}
if (priv->port_ptp_opened) {
for (i = 0; i < priv->max_opened_tc; i++) {
struct mlx5e_sq_stats *sq_stats = &priv->port_ptp_stats.sq[i];
s->tx_packets += sq_stats->packets; s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes; s->tx_bytes += sq_stats->bytes;
s->tx_dropped += sq_stats->dropped; s->tx_dropped += sq_stats->dropped;
...@@ -4683,8 +4695,10 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) ...@@ -4683,8 +4695,10 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
struct mlx5e_channel *c = priv->channels.c[i]; struct mlx5e_channel *c = priv->channels.c[i];
mlx5e_rq_replace_xdp_prog(&c->rq, prog); mlx5e_rq_replace_xdp_prog(&c->rq, prog);
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) {
bpf_prog_inc(prog);
mlx5e_rq_replace_xdp_prog(&c->xskrq, prog); mlx5e_rq_replace_xdp_prog(&c->xskrq, prog);
}
} }
unlock: unlock:
...@@ -5474,8 +5488,6 @@ int mlx5e_priv_init(struct mlx5e_priv *priv, ...@@ -5474,8 +5488,6 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
struct net_device *netdev, struct net_device *netdev,
struct mlx5_core_dev *mdev) struct mlx5_core_dev *mdev)
{ {
memset(priv, 0, sizeof(*priv));
/* priv init */ /* priv init */
priv->mdev = mdev; priv->mdev = mdev;
priv->netdev = netdev; priv->netdev = netdev;
...@@ -5508,12 +5520,18 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv) ...@@ -5508,12 +5520,18 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
{ {
int i; int i;
/* bail if change profile failed and also rollback failed */
if (!priv->mdev)
return;
destroy_workqueue(priv->wq); destroy_workqueue(priv->wq);
free_cpumask_var(priv->scratchpad.cpumask); free_cpumask_var(priv->scratchpad.cpumask);
for (i = 0; i < priv->htb.max_qos_sqs; i++) for (i = 0; i < priv->htb.max_qos_sqs; i++)
kfree(priv->htb.qos_sq_stats[i]); kfree(priv->htb.qos_sq_stats[i]);
kvfree(priv->htb.qos_sq_stats); kvfree(priv->htb.qos_sq_stats);
memset(priv, 0, sizeof(*priv));
} }
struct net_device * struct net_device *
...@@ -5630,11 +5648,10 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv) ...@@ -5630,11 +5648,10 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
} }
static int static int
mlx5e_netdev_attach_profile(struct mlx5e_priv *priv, mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mdev,
const struct mlx5e_profile *new_profile, void *new_ppriv) const struct mlx5e_profile *new_profile, void *new_ppriv)
{ {
struct net_device *netdev = priv->netdev; struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
int err; int err;
err = mlx5e_priv_init(priv, netdev, mdev); err = mlx5e_priv_init(priv, netdev, mdev);
...@@ -5647,10 +5664,16 @@ mlx5e_netdev_attach_profile(struct mlx5e_priv *priv, ...@@ -5647,10 +5664,16 @@ mlx5e_netdev_attach_profile(struct mlx5e_priv *priv,
priv->ppriv = new_ppriv; priv->ppriv = new_ppriv;
err = new_profile->init(priv->mdev, priv->netdev); err = new_profile->init(priv->mdev, priv->netdev);
if (err) if (err)
return err; goto priv_cleanup;
err = mlx5e_attach_netdev(priv); err = mlx5e_attach_netdev(priv);
if (err) if (err)
new_profile->cleanup(priv); goto profile_cleanup;
return err;
profile_cleanup:
new_profile->cleanup(priv);
priv_cleanup:
mlx5e_priv_cleanup(priv);
return err; return err;
} }
...@@ -5659,13 +5682,14 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv, ...@@ -5659,13 +5682,14 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
{ {
unsigned int new_max_nch = mlx5e_calc_max_nch(priv, new_profile); unsigned int new_max_nch = mlx5e_calc_max_nch(priv, new_profile);
const struct mlx5e_profile *orig_profile = priv->profile; const struct mlx5e_profile *orig_profile = priv->profile;
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
void *orig_ppriv = priv->ppriv; void *orig_ppriv = priv->ppriv;
int err, rollback_err; int err, rollback_err;
/* sanity */ /* sanity */
if (new_max_nch != priv->max_nch) { if (new_max_nch != priv->max_nch) {
netdev_warn(priv->netdev, netdev_warn(netdev, "%s: Replacing profile with different max channels\n",
"%s: Replacing profile with different max channels\n",
__func__); __func__);
return -EINVAL; return -EINVAL;
} }
...@@ -5675,22 +5699,19 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv, ...@@ -5675,22 +5699,19 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
priv->profile->cleanup(priv); priv->profile->cleanup(priv);
mlx5e_priv_cleanup(priv); mlx5e_priv_cleanup(priv);
err = mlx5e_netdev_attach_profile(priv, new_profile, new_ppriv); err = mlx5e_netdev_attach_profile(netdev, mdev, new_profile, new_ppriv);
if (err) { /* roll back to original profile */ if (err) { /* roll back to original profile */
netdev_warn(priv->netdev, "%s: new profile init failed, %d\n", netdev_warn(netdev, "%s: new profile init failed, %d\n", __func__, err);
__func__, err);
goto rollback; goto rollback;
} }
return 0; return 0;
rollback: rollback:
rollback_err = mlx5e_netdev_attach_profile(priv, orig_profile, orig_ppriv); rollback_err = mlx5e_netdev_attach_profile(netdev, mdev, orig_profile, orig_ppriv);
if (rollback_err) { if (rollback_err)
netdev_err(priv->netdev, netdev_err(netdev, "%s: failed to rollback to orig profile, %d\n",
"%s: failed to rollback to orig profile, %d\n",
__func__, rollback_err); __func__, rollback_err);
}
return err; return err;
} }
......
...@@ -500,7 +500,6 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) ...@@ -500,7 +500,6 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
struct mlx5e_icosq *sq = rq->icosq; struct mlx5e_icosq *sq = rq->icosq;
struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *umr_wqe; struct mlx5e_umr_wqe *umr_wqe;
u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
u16 pi; u16 pi;
int err; int err;
int i; int i;
...@@ -531,7 +530,8 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) ...@@ -531,7 +530,8 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
umr_wqe->ctrl.opmod_idx_opcode = umr_wqe->ctrl.opmod_idx_opcode =
cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_UMR); MLX5_OPCODE_UMR);
umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset); umr_wqe->uctrl.xlt_offset =
cpu_to_be16(MLX5_ALIGNED_MTTS_OCTW(MLX5E_REQUIRED_MTTS(ix)));
sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_UMR_RX, .wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
......
...@@ -4445,7 +4445,8 @@ static int apply_police_params(struct mlx5e_priv *priv, u64 rate, ...@@ -4445,7 +4445,8 @@ static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
*/ */
if (rate) { if (rate) {
rate = (rate * BITS_PER_BYTE) + 500000; rate = (rate * BITS_PER_BYTE) + 500000;
rate_mbps = max_t(u64, do_div(rate, 1000000), 1); do_div(rate, 1000000);
rate_mbps = max_t(u32, rate, 1);
} }
err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps); err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
......
...@@ -79,6 +79,7 @@ struct mlx5_flow_attr { ...@@ -79,6 +79,7 @@ struct mlx5_flow_attr {
u8 inner_match_level; u8 inner_match_level;
u8 outer_match_level; u8 outer_match_level;
u8 ip_version; u8 ip_version;
u8 tun_ip_version;
u32 flags; u32 flags;
union { union {
struct mlx5_esw_flow_attr esw_attr[0]; struct mlx5_esw_flow_attr esw_attr[0];
......
...@@ -551,7 +551,8 @@ esw_setup_dests(struct mlx5_flow_destination *dest, ...@@ -551,7 +551,8 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) && if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve) && MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve) &&
mlx5_eswitch_vport_match_metadata_enabled(esw)) mlx5_eswitch_vport_match_metadata_enabled(esw) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE; attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
if (attr->dest_ft) { if (attr->dest_ft) {
......
...@@ -575,6 +575,7 @@ static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn, ...@@ -575,6 +575,7 @@ static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn,
MLX5_SET(qpc, qpc, log_sq_size, ilog2(conn->qp.sq.size)); MLX5_SET(qpc, qpc, log_sq_size, ilog2(conn->qp.sq.size));
MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn); MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn);
MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn); MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(mdev));
MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma); MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma);
if (MLX5_CAP_GEN(mdev, cqe_version) == 1) if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
MLX5_SET(qpc, qpc, user_index, 0xFFFFFF); MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
......
...@@ -233,6 +233,7 @@ int mlx5i_create_underlay_qp(struct mlx5e_priv *priv) ...@@ -233,6 +233,7 @@ int mlx5i_create_underlay_qp(struct mlx5e_priv *priv)
} }
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(priv->mdev));
MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD); MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, MLX5_SET(qpc, qpc, ulp_stateless_offload_mode,
...@@ -694,6 +695,7 @@ static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev) ...@@ -694,6 +695,7 @@ static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev)
static void mlx5_rdma_netdev_free(struct net_device *netdev) static void mlx5_rdma_netdev_free(struct net_device *netdev)
{ {
struct mlx5e_priv *priv = mlx5i_epriv(netdev); struct mlx5e_priv *priv = mlx5i_epriv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5i_priv *ipriv = priv->ppriv; struct mlx5i_priv *ipriv = priv->ppriv;
const struct mlx5e_profile *profile = priv->profile; const struct mlx5e_profile *profile = priv->profile;
...@@ -702,7 +704,7 @@ static void mlx5_rdma_netdev_free(struct net_device *netdev) ...@@ -702,7 +704,7 @@ static void mlx5_rdma_netdev_free(struct net_device *netdev)
if (!ipriv->sub_interface) { if (!ipriv->sub_interface) {
mlx5i_pkey_qpn_ht_cleanup(netdev); mlx5i_pkey_qpn_ht_cleanup(netdev);
mlx5e_destroy_mdev_resources(priv->mdev); mlx5e_destroy_mdev_resources(mdev);
} }
} }
......
...@@ -495,15 +495,15 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp, ...@@ -495,15 +495,15 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
return -EINVAL; return -EINVAL;
field_select = MLX5_MTPPS_FS_ENABLE; field_select = MLX5_MTPPS_FS_ENABLE;
pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index);
if (pin < 0)
return -EBUSY;
if (on) { if (on) {
bool rt_mode = mlx5_real_time_mode(mdev); bool rt_mode = mlx5_real_time_mode(mdev);
u32 nsec; u32 nsec;
s64 sec; s64 sec;
pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index);
if (pin < 0)
return -EBUSY;
pin_mode = MLX5_PIN_MODE_OUT; pin_mode = MLX5_PIN_MODE_OUT;
pattern = MLX5_OUT_PATTERN_PERIODIC; pattern = MLX5_OUT_PATTERN_PERIODIC;
ts.tv_sec = rq->perout.period.sec; ts.tv_sec = rq->perout.period.sec;
......
...@@ -64,7 +64,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum) ...@@ -64,7 +64,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum)
} }
if (sw_id == -ENOSPC) { if (sw_id == -ENOSPC) {
err = -ENOSPC; err = -ENOSPC;
goto err; goto exist_err;
} }
hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sw_id); hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sw_id);
......
...@@ -20,7 +20,7 @@ struct mlx5_ifc_vhca_state_context_bits { ...@@ -20,7 +20,7 @@ struct mlx5_ifc_vhca_state_context_bits {
u8 sw_function_id[0x20]; u8 sw_function_id[0x20];
u8 reserved_at_40[0x80]; u8 reserved_at_40[0x40];
}; };
struct mlx5_ifc_query_vhca_state_out_bits { struct mlx5_ifc_query_vhca_state_out_bits {
......
...@@ -94,6 +94,7 @@ static void mlx5_vhca_state_work_handler(struct work_struct *_work) ...@@ -94,6 +94,7 @@ static void mlx5_vhca_state_work_handler(struct work_struct *_work)
struct mlx5_core_dev *dev = notifier->dev; struct mlx5_core_dev *dev = notifier->dev;
mlx5_vhca_event_notify(dev, &work->event); mlx5_vhca_event_notify(dev, &work->event);
kfree(work);
} }
static int static int
......
...@@ -169,6 +169,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev, ...@@ -169,6 +169,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
MLX5_SET(qpc, qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt)); MLX5_SET(qpc, qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt));
MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ); MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
MLX5_SET(qpc, qpc, log_sq_size, ilog2(dr_qp->sq.wqe_cnt)); MLX5_SET(qpc, qpc, log_sq_size, ilog2(dr_qp->sq.wqe_cnt));
MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(mdev));
MLX5_SET64(qpc, qpc, dbr_addr, dr_qp->wq_ctrl.db.dma); MLX5_SET64(qpc, qpc, dbr_addr, dr_qp->wq_ctrl.db.dma);
if (MLX5_CAP_GEN(mdev, cqe_version) == 1) if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
MLX5_SET(qpc, qpc, user_index, 0xFFFFFF); MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
......
...@@ -264,8 +264,8 @@ static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr) ...@@ -264,8 +264,8 @@ static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p) static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
{ {
u64 index = u64 index =
(MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) | ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) |
MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32) << 26); ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32)) << 26);
return index << 6; return index << 6;
} }
......
...@@ -547,4 +547,11 @@ static inline const char *mlx5_qp_state_str(int state) ...@@ -547,4 +547,11 @@ static inline const char *mlx5_qp_state_str(int state)
} }
} }
static inline int mlx5_get_qp_default_ts(struct mlx5_core_dev *dev)
{
return !MLX5_CAP_ROCE(dev, qp_ts_format) ?
MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING :
MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT;
}
#endif /* MLX5_QP_H */ #endif /* MLX5_QP_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment