Commit 114a5c32 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2019-07-11' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
Mellanox, mlx5 fixes 2019-07-11

This series introduces some fixes to mlx5 driver.

Please pull and let me know if there is any problem.

For -stable v4.15
('net/mlx5e: IPoIB, Add error path in mlx5_rdma_setup_rn')

For -stable v5.1
('net/mlx5e: Fix port tunnel GRE entropy control')
('net/mlx5e: Rx, Fix checksum calculation for new hardware')
('net/mlx5e: Fix return value from timeout recover function')
('net/mlx5e: Fix error flow in tx reporter diagnose')

For -stable v5.2
('net/mlx5: E-Switch, Fix default encap mode')

Conflict note: This pull request will produce a small conflict when
merged with net-next.
In drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
Take the hunk from net and replace:
esw_offloads_steering_init(esw, vf_nvports, total_nvports);
with:
esw_offloads_steering_init(esw);
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 08d14c49 ef1ce7d7
...@@ -305,6 +305,7 @@ enum { ...@@ -305,6 +305,7 @@ enum {
MLX5E_RQ_STATE_ENABLED, MLX5E_RQ_STATE_ENABLED,
MLX5E_RQ_STATE_AM, MLX5E_RQ_STATE_AM,
MLX5E_RQ_STATE_NO_CSUM_COMPLETE, MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
}; };
struct mlx5e_cq { struct mlx5e_cq {
......
...@@ -142,22 +142,20 @@ static int mlx5e_tx_reporter_timeout_recover(struct mlx5e_txqsq *sq) ...@@ -142,22 +142,20 @@ static int mlx5e_tx_reporter_timeout_recover(struct mlx5e_txqsq *sq)
{ {
struct mlx5_eq_comp *eq = sq->cq.mcq.eq; struct mlx5_eq_comp *eq = sq->cq.mcq.eq;
u32 eqe_count; u32 eqe_count;
int ret;
netdev_err(sq->channel->netdev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n", netdev_err(sq->channel->netdev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
eq->core.eqn, eq->core.cons_index, eq->core.irqn); eq->core.eqn, eq->core.cons_index, eq->core.irqn);
eqe_count = mlx5_eq_poll_irq_disabled(eq); eqe_count = mlx5_eq_poll_irq_disabled(eq);
ret = eqe_count ? false : true;
if (!eqe_count) { if (!eqe_count) {
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
return ret; return -EIO;
} }
netdev_err(sq->channel->netdev, "Recover %d eqes on EQ 0x%x\n", netdev_err(sq->channel->netdev, "Recover %d eqes on EQ 0x%x\n",
eqe_count, eq->core.eqn); eqe_count, eq->core.eqn);
sq->channel->stats->eq_rearm++; sq->channel->stats->eq_rearm++;
return ret; return 0;
} }
int mlx5e_tx_reporter_timeout(struct mlx5e_txqsq *sq) int mlx5e_tx_reporter_timeout(struct mlx5e_txqsq *sq)
...@@ -264,13 +262,13 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, ...@@ -264,13 +262,13 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state); err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state);
if (err) if (err)
break; goto unlock;
err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq->sqn, err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq->sqn,
state, state,
netif_xmit_stopped(sq->txq)); netif_xmit_stopped(sq->txq));
if (err) if (err)
break; goto unlock;
} }
err = devlink_fmsg_arr_pair_nest_end(fmsg); err = devlink_fmsg_arr_pair_nest_end(fmsg);
if (err) if (err)
......
...@@ -889,6 +889,9 @@ int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params, ...@@ -889,6 +889,9 @@ int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
if (err) if (err)
goto err_destroy_rq; goto err_destroy_rq;
if (MLX5_CAP_ETH(c->mdev, cqe_checksum_full))
__set_bit(MLX5E_RQ_STATE_CSUM_FULL, &c->rq.state);
if (params->rx_dim_enabled) if (params->rx_dim_enabled)
__set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
......
...@@ -923,8 +923,14 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, ...@@ -923,8 +923,14 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP)) if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
goto csum_unnecessary; goto csum_unnecessary;
stats->csum_complete++;
skb->ip_summed = CHECKSUM_COMPLETE; skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum); skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state))
return; /* CQE csum covers all received bytes */
/* csum might need some fixups ...*/
if (network_depth > ETH_HLEN) if (network_depth > ETH_HLEN)
/* CQE csum is calculated from the IP header and does /* CQE csum is calculated from the IP header and does
* not cover VLAN headers (if present). This will add * not cover VLAN headers (if present). This will add
...@@ -935,7 +941,6 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, ...@@ -935,7 +941,6 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
skb->csum); skb->csum);
mlx5e_skb_padding_csum(skb, network_depth, proto, stats); mlx5e_skb_padding_csum(skb, network_depth, proto, stats);
stats->csum_complete++;
return; return;
} }
......
...@@ -1946,11 +1946,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ...@@ -1946,11 +1946,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->enabled_vports = 0; esw->enabled_vports = 0;
esw->mode = MLX5_ESWITCH_NONE; esw->mode = MLX5_ESWITCH_NONE;
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE; esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
else
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
dev->priv.eswitch = esw; dev->priv.eswitch = esw;
return 0; return 0;
......
...@@ -2131,6 +2131,12 @@ int esw_offloads_init(struct mlx5_eswitch *esw) ...@@ -2131,6 +2131,12 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
{ {
int err; int err;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
else
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
err = esw_offloads_steering_init(esw); err = esw_offloads_steering_init(esw);
if (err) if (err)
return err; return err;
...@@ -2187,6 +2193,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw) ...@@ -2187,6 +2193,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw)
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) if (mlx5_eswitch_vport_match_metadata_enabled(esw))
mlx5_eswitch_disable_passing_vport_metadata(esw); mlx5_eswitch_disable_passing_vport_metadata(esw);
esw_offloads_steering_cleanup(esw); esw_offloads_steering_cleanup(esw);
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
} }
static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
......
...@@ -711,7 +711,9 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u8 port_num, ...@@ -711,7 +711,9 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u8 port_num,
prof->init(mdev, netdev, prof, ipriv); prof->init(mdev, netdev, prof, ipriv);
mlx5e_attach_netdev(epriv); err = mlx5e_attach_netdev(epriv);
if (err)
goto detach;
netif_carrier_off(netdev); netif_carrier_off(netdev);
/* set rdma_netdev func pointers */ /* set rdma_netdev func pointers */
...@@ -727,6 +729,11 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u8 port_num, ...@@ -727,6 +729,11 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u8 port_num,
return 0; return 0;
detach:
prof->cleanup(epriv);
if (ipriv->sub_interface)
return err;
mlx5e_destroy_mdev_resources(mdev);
destroy_ht: destroy_ht:
mlx5i_pkey_qpn_ht_cleanup(netdev); mlx5i_pkey_qpn_ht_cleanup(netdev);
return err; return err;
......
...@@ -98,27 +98,12 @@ static int mlx5_set_entropy(struct mlx5_tun_entropy *tun_entropy, ...@@ -98,27 +98,12 @@ static int mlx5_set_entropy(struct mlx5_tun_entropy *tun_entropy,
*/ */
if (entropy_flags.gre_calc_supported && if (entropy_flags.gre_calc_supported &&
reformat_type == MLX5_REFORMAT_TYPE_L2_TO_NVGRE) { reformat_type == MLX5_REFORMAT_TYPE_L2_TO_NVGRE) {
/* Other applications may change the global FW entropy if (!entropy_flags.force_supported)
* calculations settings. Check that the current entropy value return 0;
* is the negative of the updated value. err = mlx5_set_port_gre_tun_entropy_calc(tun_entropy->mdev,
*/ enable, !enable);
if (entropy_flags.force_enabled &&
enable == entropy_flags.gre_calc_enabled) {
mlx5_core_warn(tun_entropy->mdev,
"Unexpected GRE entropy calc setting - expected %d",
!entropy_flags.gre_calc_enabled);
return -EOPNOTSUPP;
}
err = mlx5_set_port_gre_tun_entropy_calc(tun_entropy->mdev, enable,
entropy_flags.force_supported);
if (err) if (err)
return err; return err;
/* if we turn on the entropy we don't need to force it anymore */
if (entropy_flags.force_supported && enable) {
err = mlx5_set_port_gre_tun_entropy_calc(tun_entropy->mdev, 1, 0);
if (err)
return err;
}
} else if (entropy_flags.calc_supported) { } else if (entropy_flags.calc_supported) {
/* Other applications may change the global FW entropy /* Other applications may change the global FW entropy
* calculations settings. Check that the current entropy value * calculations settings. Check that the current entropy value
......
...@@ -805,7 +805,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { ...@@ -805,7 +805,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 swp[0x1]; u8 swp[0x1];
u8 swp_csum[0x1]; u8 swp_csum[0x1];
u8 swp_lso[0x1]; u8 swp_lso[0x1];
u8 reserved_at_23[0xd]; u8 cqe_checksum_full[0x1];
u8 reserved_at_24[0xc];
u8 max_vxlan_udp_ports[0x8]; u8 max_vxlan_udp_ports[0x8];
u8 reserved_at_38[0x6]; u8 reserved_at_38[0x6];
u8 max_geneve_opt_len[0x1]; u8 max_geneve_opt_len[0x1];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment