Commit fcd1a530 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2021-06-03' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
This series provides misc updates for mlx5 drivers.
For more information please see tag log below.

Please pull and let me know if there is any problem.

mlx5-updates-2021-06-03

This series contains misc updates for mlx5 driver

1) Alaa disables advanced features when kdump mode to save on memory
2) Jakub counts all link flap events
3) Meir adds support for IPoIB NDR speed
4) Various misc cleanup
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 6a8dd8b2 f68406ca
......@@ -974,7 +974,6 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
struct mlx5e_xsk_param *xsk, int node,
struct mlx5e_rq *rq);
int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
void mlx5e_close_rq(struct mlx5e_rq *rq);
int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param);
void mlx5e_destroy_rq(struct mlx5e_rq *rq);
......@@ -1163,6 +1162,13 @@ mlx5e_calc_max_nch(struct mlx5e_priv *priv, const struct mlx5e_profile *profile)
return priv->netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
}
static inline bool
mlx5e_tx_mpwqe_supported(struct mlx5_core_dev *mdev)
{
return !is_kdump_kernel() &&
MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe);
}
int mlx5e_priv_init(struct mlx5e_priv *priv,
struct net_device *netdev,
struct mlx5_core_dev *mdev);
......
......@@ -201,7 +201,7 @@ int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *param
static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
{
struct dim_cq_moder moder;
struct dim_cq_moder moder = {};
moder.cq_period_mode = cq_period_mode;
moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
......@@ -214,7 +214,7 @@ static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
{
struct dim_cq_moder moder;
struct dim_cq_moder moder = {};
moder.cq_period_mode = cq_period_mode;
moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
......@@ -614,7 +614,7 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params,
static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev)
{
if (mlx5_accel_is_ktls_rx(mdev))
if (mlx5e_accel_is_ktls_rx(mdev))
return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
......@@ -643,7 +643,7 @@ static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
mlx5e_build_sq_param_common(mdev, param);
param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */
param->is_tls = mlx5_accel_is_ktls_rx(mdev);
param->is_tls = mlx5e_accel_is_ktls_rx(mdev);
if (param->is_tls)
param->stop_room += mlx5e_stop_room_for_wqe(1); /* for TLS RX resync NOP */
MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
......
......@@ -162,7 +162,7 @@ static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
/* Part of the eseg touched by TX offloads */
#define MLX5E_ACCEL_ESEG_LEN offsetof(struct mlx5_wqe_eth_seg, mss)
static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
static inline void mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
......@@ -175,8 +175,6 @@ static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
mlx5e_tx_tunnel_accel(skb, eseg, ihs);
#endif
return true;
}
static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
......
......@@ -59,12 +59,15 @@ void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
if (mlx5_accel_is_ktls_tx(mdev)) {
if (!mlx5e_accel_is_ktls_tx(mdev) && !mlx5e_accel_is_ktls_rx(mdev))
return;
if (mlx5e_accel_is_ktls_tx(mdev)) {
netdev->hw_features |= NETIF_F_HW_TLS_TX;
netdev->features |= NETIF_F_HW_TLS_TX;
}
if (mlx5_accel_is_ktls_rx(mdev))
if (mlx5e_accel_is_ktls_rx(mdev))
netdev->hw_features |= NETIF_F_HW_TLS_RX;
netdev->tlsdev_ops = &mlx5e_ktls_ops;
......@@ -89,7 +92,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
{
int err;
if (!mlx5_accel_is_ktls_rx(priv->mdev))
if (!mlx5e_accel_is_ktls_rx(priv->mdev))
return 0;
priv->tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx");
......@@ -109,7 +112,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
{
if (!mlx5_accel_is_ktls_rx(priv->mdev))
if (!mlx5e_accel_is_ktls_rx(priv->mdev))
return;
if (priv->netdev->features & NETIF_F_HW_TLS_RX)
......
......@@ -15,6 +15,25 @@ int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable);
struct mlx5e_ktls_resync_resp *
mlx5e_ktls_rx_resync_create_resp_list(void);
void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list);
static inline bool mlx5e_accel_is_ktls_tx(struct mlx5_core_dev *mdev)
{
return !is_kdump_kernel() &&
mlx5_accel_is_ktls_tx(mdev);
}
static inline bool mlx5e_accel_is_ktls_rx(struct mlx5_core_dev *mdev)
{
return !is_kdump_kernel() &&
mlx5_accel_is_ktls_rx(mdev);
}
static inline bool mlx5e_accel_is_ktls_device(struct mlx5_core_dev *mdev)
{
return !is_kdump_kernel() &&
mlx5_accel_is_ktls_device(mdev);
}
#else
static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
......@@ -44,6 +63,11 @@ mlx5e_ktls_rx_resync_create_resp_list(void)
static inline void
mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list) {}
static inline bool mlx5e_accel_is_ktls_tx(struct mlx5_core_dev *mdev) { return false; }
static inline bool mlx5e_accel_is_ktls_rx(struct mlx5_core_dev *mdev) { return false; }
static inline bool mlx5e_accel_is_ktls_device(struct mlx5_core_dev *mdev) { return false; }
#endif
#endif /* __MLX5E_TLS_H__ */
......@@ -23,10 +23,13 @@ mlx5e_ktls_dumps_num_wqes(struct mlx5e_params *params, unsigned int nfrags,
return nfrags + DIV_ROUND_UP(sync_len, MLX5E_SW2HW_MTU(params, params->sw_mtu));
}
u16 mlx5e_ktls_get_stop_room(struct mlx5e_params *params)
u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
u16 num_dumps, stop_room = 0;
if (!mlx5e_accel_is_ktls_tx(mdev))
return 0;
num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
......
......@@ -14,7 +14,7 @@ struct mlx5e_accel_tx_tls_state {
u32 tls_tisn;
};
u16 mlx5e_ktls_get_stop_room(struct mlx5e_params *params);
u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
struct sk_buff *skb, int datalen,
......
......@@ -192,13 +192,13 @@ void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
struct net_device *netdev = priv->netdev;
u32 caps;
if (mlx5_accel_is_ktls_device(priv->mdev)) {
if (mlx5e_accel_is_ktls_device(priv->mdev)) {
mlx5e_ktls_build_netdev(priv);
return;
}
/* FPGA */
if (!mlx5_accel_is_tls_device(priv->mdev))
if (!mlx5e_accel_is_tls_device(priv->mdev))
return;
caps = mlx5_accel_tls_device_caps(priv->mdev);
......@@ -224,7 +224,7 @@ int mlx5e_tls_init(struct mlx5e_priv *priv)
{
struct mlx5e_tls *tls;
if (!mlx5_accel_is_tls_device(priv->mdev))
if (!mlx5e_accel_is_tls_device(priv->mdev))
return 0;
tls = kzalloc(sizeof(*tls), GFP_KERNEL);
......
......@@ -103,11 +103,18 @@ int mlx5e_tls_get_count(struct mlx5e_priv *priv);
int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data);
int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data);
static inline bool mlx5e_accel_is_tls_device(struct mlx5_core_dev *mdev)
{
return !is_kdump_kernel() &&
mlx5_accel_is_tls_device(mdev);
}
#else
static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
{
if (mlx5_accel_is_ktls_device(priv->mdev))
if (!is_kdump_kernel() &&
mlx5_accel_is_ktls_device(priv->mdev))
mlx5e_ktls_build_netdev(priv);
}
......@@ -117,6 +124,7 @@ static inline void mlx5e_tls_cleanup(struct mlx5e_priv *priv) { }
static inline int mlx5e_tls_get_count(struct mlx5e_priv *priv) { return 0; }
static inline int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) { return 0; }
static inline int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) { return 0; }
static inline bool mlx5e_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; }
#endif
......
......@@ -273,7 +273,7 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
goto err_out;
if (mlx5_accel_is_ktls_tx(sq->mdev))
if (mlx5e_accel_is_ktls_tx(sq->mdev))
return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state);
/* FPGA */
......@@ -378,11 +378,11 @@ void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb,
u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
if (!mlx5_accel_is_tls_device(mdev))
if (!mlx5e_accel_is_tls_device(mdev))
return 0;
if (mlx5_accel_is_ktls_device(mdev))
return mlx5e_ktls_get_stop_room(params);
if (mlx5e_accel_is_ktls_device(mdev))
return mlx5e_ktls_get_stop_room(mdev, params);
/* FPGA */
/* Resync SKB. */
......
......@@ -58,7 +58,7 @@ static const struct counter_desc *get_tls_atomic_stats(struct mlx5e_priv *priv)
{
if (!priv->tls)
return NULL;
if (mlx5_accel_is_ktls_device(priv->mdev))
if (mlx5e_accel_is_ktls_device(priv->mdev))
return mlx5e_ktls_sw_stats_desc;
return mlx5e_tls_sw_stats_desc;
}
......@@ -67,7 +67,7 @@ int mlx5e_tls_get_count(struct mlx5e_priv *priv)
{
if (!priv->tls)
return 0;
if (mlx5_accel_is_ktls_device(priv->mdev))
if (mlx5e_accel_is_ktls_device(priv->mdev))
return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc);
return ARRAY_SIZE(mlx5e_tls_sw_stats_desc);
}
......
......@@ -1984,7 +1984,7 @@ static int set_pflag_tx_mpwqe_common(struct net_device *netdev, u32 flag, bool e
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_params new_params;
if (enable && !MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
if (enable && !mlx5e_tx_mpwqe_supported(mdev))
return -EOPNOTSUPP;
new_params = priv->channels.params;
......
......@@ -91,12 +91,16 @@ void mlx5e_update_carrier(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
u8 port_state;
bool up;
port_state = mlx5_query_vport_state(mdev,
MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT,
0);
if (port_state == VPORT_STATE_UP) {
up = port_state == VPORT_STATE_UP;
if (up == netif_carrier_ok(priv->netdev))
netif_carrier_event(priv->netdev);
if (up) {
netdev_info(priv->netdev, "Link up\n");
netif_carrier_on(priv->netdev);
} else {
......@@ -853,7 +857,7 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
if (err)
goto err_destroy_rq;
if (mlx5e_is_tls_on(rq->priv) && !mlx5_accel_is_ktls_device(mdev))
if (mlx5e_is_tls_on(rq->priv) && !mlx5e_accel_is_ktls_device(mdev))
__set_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state); /* must be FPGA */
if (MLX5_CAP_ETH(mdev, cqe_checksum_full))
......@@ -4616,12 +4620,10 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
params->log_sq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE,
MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe));
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE, mlx5e_tx_mpwqe_supported(mdev));
/* XDP SQ */
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE,
MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe));
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE, mlx5e_tx_mpwqe_supported(mdev));
/* set CQE compression */
params->rx_cqe_compress_def = false;
......
......@@ -579,6 +579,9 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
if (mlx5_wq_cyc_missing(wq) < wqe_bulk)
return false;
if (rq->page_pool)
page_pool_nid_changed(rq->page_pool, numa_mem_id());
do {
u16 head = mlx5_wq_cyc_get_head(wq);
......@@ -734,6 +737,9 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
if (likely(missing < UMR_WQE_BULK))
return false;
if (rq->page_pool)
page_pool_nid_changed(rq->page_pool, numa_mem_id());
head = rq->mpwqe.actual_wq_head;
i = missing;
do {
......@@ -1555,9 +1561,6 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return 0;
if (rq->page_pool)
page_pool_nid_changed(rq->page_pool, numa_mem_id());
if (rq->cqd.left) {
work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget);
if (work_done >= budget)
......
......@@ -706,16 +706,12 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
mlx5e_tx_mpwqe_session_complete(sq);
}
static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg, ihs)))
return false;
mlx5e_accel_tx_eseg(priv, skb, eseg, ihs);
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
return true;
}
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
......@@ -744,10 +740,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
struct mlx5_wqe_eth_seg eseg = {};
if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg,
attr.ihs)))
return NETDEV_TX_OK;
mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg, attr.ihs);
mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
return NETDEV_TX_OK;
}
......@@ -762,9 +755,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
/* May update the WQE, but may not post other WQEs. */
mlx5e_accel_tx_finish(sq, wqe, &accel,
(struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs)))
return NETDEV_TX_OK;
mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs);
mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
return NETDEV_TX_OK;
......
......@@ -27,6 +27,8 @@ int mlx5_ft_pool_init(struct mlx5_core_dev *dev)
int i;
ft_pool = kzalloc(sizeof(*ft_pool), GFP_KERNEL);
if (!ft_pool)
return -ENOMEM;
for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--)
ft_pool->ft_left[i] = FT_SIZE / FT_POOLS[i];
......
......@@ -150,6 +150,7 @@ enum mlx5_ptys_rate {
MLX5_PTYS_RATE_FDR = 1 << 4,
MLX5_PTYS_RATE_EDR = 1 << 5,
MLX5_PTYS_RATE_HDR = 1 << 6,
MLX5_PTYS_RATE_NDR = 1 << 7,
};
static inline int mlx5_ptys_rate_enum_to_int(enum mlx5_ptys_rate rate)
......@@ -162,6 +163,7 @@ static inline int mlx5_ptys_rate_enum_to_int(enum mlx5_ptys_rate rate)
case MLX5_PTYS_RATE_FDR: return 14000;
case MLX5_PTYS_RATE_EDR: return 25000;
case MLX5_PTYS_RATE_HDR: return 50000;
case MLX5_PTYS_RATE_NDR: return 100000;
default: return -1;
}
}
......
......@@ -6,7 +6,6 @@
#include "sf.h"
#include "mlx5_ifc_vhca_event.h"
#include "ecpf.h"
#include "vhca_event.h"
#include "mlx5_core.h"
#include "eswitch.h"
......
......@@ -4187,8 +4187,8 @@ unsigned long dev_trans_start(struct net_device *dev);
void __netdev_watchdog_up(struct net_device *dev);
void netif_carrier_on(struct net_device *dev);
void netif_carrier_off(struct net_device *dev);
void netif_carrier_event(struct net_device *dev);
/**
* netif_dormant_on - mark device as dormant.
......
......@@ -540,6 +540,24 @@ void netif_carrier_off(struct net_device *dev)
}
EXPORT_SYMBOL(netif_carrier_off);
/**
* netif_carrier_event - report carrier state event
* @dev: network device
*
* Device has detected a carrier event but the carrier state wasn't changed.
* Use in drivers when querying carrier state asynchronously, to avoid missing
* events (link flaps) if link recovers before it's queried.
*/
void netif_carrier_event(struct net_device *dev)
{
if (dev->reg_state == NETREG_UNINITIALIZED)
return;
atomic_inc(&dev->carrier_up_count);
atomic_inc(&dev->carrier_down_count);
linkwatch_fire_event(dev);
}
EXPORT_SYMBOL_GPL(netif_carrier_event);
/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
under all circumstances. It is difficult to invent anything faster or
cheaper.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment