Commit 03e481e8 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2021-04-16' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2021-04-16

This patchset introduces updates to mlx5e netdev driver.

1) Tariq refactors TLS offloads and adds resiliency against RX resync
   failures

2) Maxim reduces code duplications by unifying channels reset flow
   regardless if channels are closed or open

3) Aya Enhances TX/RX health reporters diagnostics to expose the
   internal clock time-stamping format

4) Moshe adds support for ethtool extended link state, to show the reason
   for link down
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 70c18375 95742c1c
......@@ -325,9 +325,9 @@ enum {
MLX5E_SQ_STATE_RECOVERING,
MLX5E_SQ_STATE_IPSEC,
MLX5E_SQ_STATE_AM,
MLX5E_SQ_STATE_TLS,
MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
MLX5E_SQ_STATE_PENDING_XSK_TX,
MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
};
struct mlx5e_tx_mpwqe {
......@@ -500,6 +500,8 @@ struct mlx5e_xdpsq {
struct mlx5e_channel *channel;
} ____cacheline_aligned_in_smp;
struct mlx5e_ktls_resync_resp;
struct mlx5e_icosq {
/* data path */
u16 cc;
......@@ -519,6 +521,7 @@ struct mlx5e_icosq {
u32 sqn;
u16 reserved_room;
unsigned long state;
struct mlx5e_ktls_resync_resp *ktls_resync;
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
......@@ -1015,10 +1018,10 @@ int fn##_ctx(struct mlx5e_priv *priv, void *context) \
return fn(priv); \
}
int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv);
int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *new_chs,
mlx5e_fp_preactivate preactivate,
void *context);
int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
struct mlx5e_params *new_params,
mlx5e_fp_preactivate preactivate,
void *context, bool reset);
int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv);
int mlx5e_num_channels_changed(struct mlx5e_priv *priv);
int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context);
......
......@@ -621,6 +621,9 @@ static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
mlx5e_build_sq_param_common(mdev, param);
param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */
param->is_tls = mlx5_accel_is_ktls_rx(mdev);
if (param->is_tls)
param->stop_room += mlx5e_stop_room_for_wqe(1); /* for TLS RX resync NOP */
MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
mlx5e_build_ico_cq_param(mdev, log_wq_size, &param->cqp);
......
......@@ -30,6 +30,7 @@ struct mlx5e_sq_param {
u32 sqc[MLX5_ST_SZ_DW(sqc)];
struct mlx5_wq_param wq;
bool is_mpw;
bool is_tls;
u16 stop_room;
};
......
......@@ -792,6 +792,9 @@ int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set)
if (!priv->profile->rx_ptp_support)
return 0;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
if (set) {
if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state)) {
netdev_WARN_ONCE(priv->netdev, "Don't try to add PTP RX-FS rules");
......
......@@ -323,10 +323,12 @@ static int mlx5e_rx_reporter_diagnose_generic_rq(struct mlx5e_rq *rq,
struct mlx5e_priv *priv = rq->priv;
struct mlx5e_params *params;
u32 rq_stride, rq_sz;
bool real_time;
int err;
params = &priv->channels.params;
rq_sz = mlx5e_rqwq_get_size(rq);
real_time = mlx5_is_real_time_rq(priv->mdev);
rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(priv->mdev, params, NULL));
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ");
......@@ -345,6 +347,10 @@ static int mlx5e_rx_reporter_diagnose_generic_rq(struct mlx5e_rq *rq,
if (err)
return err;
err = devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC");
if (err)
return err;
err = mlx5e_health_cq_common_diag_fmsg(&rq->cq, fmsg);
if (err)
return err;
......
......@@ -257,12 +257,14 @@ mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg *fmsg,
struct mlx5e_txqsq *txqsq)
{
u32 sq_stride, sq_sz;
bool real_time;
int err;
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ");
if (err)
return err;
real_time = mlx5_is_real_time_sq(txqsq->mdev);
sq_sz = mlx5_wq_cyc_get_size(&txqsq->wq);
sq_stride = MLX5_SEND_WQE_BB;
......@@ -274,6 +276,10 @@ mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg *fmsg,
if (err)
return err;
err = devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC");
if (err)
return err;
err = mlx5e_health_cq_common_diag_fmsg(&txqsq->cq, fmsg);
if (err)
return err;
......
......@@ -123,11 +123,10 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
mlx5e_udp_gso_handle_tx_skb(skb);
#ifdef CONFIG_MLX5_EN_TLS
if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) {
/* May send SKBs and WQEs. */
/* May send SKBs and WQEs. */
if (mlx5e_tls_skb_offloaded(skb))
if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls)))
return false;
}
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
......@@ -186,7 +185,7 @@ static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
struct mlx5_wqe_inline_seg *inlseg)
{
#ifdef CONFIG_MLX5_EN_TLS
mlx5e_tls_handle_tx_wqe(sq, &wqe->ctrl, &state->tls);
mlx5e_tls_handle_tx_wqe(&wqe->ctrl, &state->tls);
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
......
......@@ -12,6 +12,9 @@ void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
int mlx5e_ktls_init_rx(struct mlx5e_priv *priv);
void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv);
int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable);
struct mlx5e_ktls_resync_resp *
mlx5e_ktls_rx_resync_create_resp_list(void);
void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list);
#else
static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
......@@ -33,6 +36,14 @@ static inline int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enab
return -EOPNOTSUPP;
}
static inline struct mlx5e_ktls_resync_resp *
mlx5e_ktls_rx_resync_create_resp_list(void)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline void
mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list) {}
#endif
#endif /* __MLX5E_TLS_H__ */
......@@ -56,6 +56,7 @@ struct mlx5e_ktls_offload_context_rx {
/* resync */
struct mlx5e_ktls_rx_resync_ctx resync;
struct list_head list;
};
static bool mlx5e_ktls_priv_rx_put(struct mlx5e_ktls_offload_context_rx *priv_rx)
......@@ -72,6 +73,32 @@ static void mlx5e_ktls_priv_rx_get(struct mlx5e_ktls_offload_context_rx *priv_rx
refcount_inc(&priv_rx->resync.refcnt);
}
struct mlx5e_ktls_resync_resp {
/* protects list changes */
spinlock_t lock;
struct list_head list;
};
void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list)
{
kvfree(resp_list);
}
struct mlx5e_ktls_resync_resp *
mlx5e_ktls_rx_resync_create_resp_list(void)
{
struct mlx5e_ktls_resync_resp *resp_list;
resp_list = kvzalloc(sizeof(*resp_list), GFP_KERNEL);
if (!resp_list)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&resp_list->list);
spin_lock_init(&resp_list->lock);
return resp_list;
}
static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn)
{
int err, inlen;
......@@ -119,8 +146,7 @@ static void accel_rule_handle_work(struct work_struct *work)
complete(&priv_rx->add_ctx);
}
static void accel_rule_init(struct accel_rule *rule, struct mlx5e_priv *priv,
struct sock *sk)
static void accel_rule_init(struct accel_rule *rule, struct mlx5e_priv *priv)
{
INIT_WORK(&rule->work, accel_rule_handle_work);
rule->priv = priv;
......@@ -359,33 +385,32 @@ static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
/* Function can be called with the refcount being either elevated or not.
* It does not affect the refcount.
*/
static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx,
struct mlx5e_channel *c)
static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx,
struct mlx5e_channel *c)
{
struct tls12_crypto_info_aes_gcm_128 *info = &priv_rx->crypto_info;
struct mlx5_wqe_ctrl_seg *cseg;
struct mlx5e_ktls_resync_resp *ktls_resync;
struct mlx5e_icosq *sq;
int err;
bool trigger_poll;
memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq));
err = 0;
sq = &c->async_icosq;
spin_lock_bh(&c->async_icosq_lock);
ktls_resync = sq->ktls_resync;
cseg = post_static_params(sq, priv_rx);
if (IS_ERR(cseg)) {
priv_rx->rq_stats->tls_resync_res_skip++;
err = PTR_ERR(cseg);
goto unlock;
}
/* Do not increment priv_rx refcnt, CQE handling is empty */
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
priv_rx->rq_stats->tls_resync_res_ok++;
unlock:
spin_unlock_bh(&c->async_icosq_lock);
spin_lock_bh(&ktls_resync->lock);
list_add_tail(&priv_rx->list, &ktls_resync->list);
trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
spin_unlock_bh(&ktls_resync->lock);
return err;
if (!trigger_poll)
return;
if (!napi_if_scheduled_mark_missed(&c->napi)) {
spin_lock_bh(&c->async_icosq_lock);
mlx5e_trigger_irq(sq);
spin_unlock_bh(&c->async_icosq_lock);
}
}
/* Function can be called with the refcount being either elevated or not.
......@@ -618,7 +643,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
init_completion(&priv_rx->add_ctx);
accel_rule_init(&priv_rx->rule, priv, sk);
accel_rule_init(&priv_rx->rule, priv);
resync = &priv_rx->resync;
resync_init(resync, priv);
tls_offload_ctx_rx(tls_ctx)->resync_async = &resync->core;
......@@ -676,3 +701,65 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
*/
mlx5e_ktls_priv_rx_put(priv_rx);
}
bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget)
{
struct mlx5e_ktls_offload_context_rx *priv_rx, *tmp;
struct mlx5e_ktls_resync_resp *ktls_resync;
struct mlx5_wqe_ctrl_seg *db_cseg;
struct mlx5e_icosq *sq;
LIST_HEAD(local_list);
int i, j;
sq = &c->async_icosq;
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return false;
ktls_resync = sq->ktls_resync;
db_cseg = NULL;
i = 0;
spin_lock(&ktls_resync->lock);
list_for_each_entry_safe(priv_rx, tmp, &ktls_resync->list, list) {
list_move(&priv_rx->list, &local_list);
if (++i == budget)
break;
}
if (list_empty(&ktls_resync->list))
clear_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
spin_unlock(&ktls_resync->lock);
spin_lock(&c->async_icosq_lock);
for (j = 0; j < i; j++) {
struct mlx5_wqe_ctrl_seg *cseg;
priv_rx = list_first_entry(&local_list,
struct mlx5e_ktls_offload_context_rx,
list);
cseg = post_static_params(sq, priv_rx);
if (IS_ERR(cseg))
break;
list_del(&priv_rx->list);
db_cseg = cseg;
}
if (db_cseg)
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, db_cseg);
spin_unlock(&c->async_icosq_lock);
priv_rx->rq_stats->tls_resync_res_ok += j;
if (!list_empty(&local_list)) {
/* This happens only if ICOSQ is full.
* There is no need to mark busy or explicitly ask for a NAPI cycle,
* it will be triggered by the outstanding ICOSQ completions.
*/
spin_lock(&ktls_resync->lock);
list_splice(&local_list, &ktls_resync->list);
set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
spin_unlock(&ktls_resync->lock);
priv_rx->rq_stats->tls_resync_res_retry++;
}
return i == budget;
}
......@@ -40,6 +40,14 @@ mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
}
return false;
}
bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget);
static inline bool
mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget)
{
return budget && test_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &c->async_icosq.state);
}
#else
static inline bool
mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
......@@ -49,6 +57,18 @@ mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
return false;
}
static inline bool
mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget)
{
return false;
}
static inline bool
mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget)
{
return false;
}
#endif /* CONFIG_MLX5_EN_TLS */
#endif /* __MLX5E_TLS_TXRX_H__ */
......@@ -263,9 +263,6 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
int datalen;
u32 skb_seq;
if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
return true;
datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
if (!datalen)
return true;
......@@ -301,12 +298,6 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
return false;
}
void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
struct mlx5e_accel_tx_tls_state *state)
{
cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8);
}
static int tls_update_resync_sn(struct net_device *netdev,
struct sk_buff *skb,
struct mlx5e_tls_metadata *mdata)
......
......@@ -47,8 +47,18 @@ u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par
bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state);
void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
struct mlx5e_accel_tx_tls_state *state);
static inline bool mlx5e_tls_skb_offloaded(struct sk_buff *skb)
{
return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
}
static inline void
mlx5e_tls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg,
struct mlx5e_accel_tx_tls_state *state)
{
cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8);
}
void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb,
u32 *cqe_bcnt);
......
......@@ -1149,35 +1149,23 @@ static int mlx5e_update_trust_state_hw(struct mlx5e_priv *priv, void *context)
static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
{
struct mlx5e_channels new_channels = {};
bool reset_channels = true;
bool opened;
int err = 0;
struct mlx5e_params new_params;
bool reset = true;
int err;
mutex_lock(&priv->state_lock);
new_channels.params = priv->channels.params;
mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_channels.params,
new_params = priv->channels.params;
mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_params,
trust_state);
opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
if (!opened)
reset_channels = false;
/* Skip if tx_min_inline is the same */
if (new_channels.params.tx_min_inline_mode ==
priv->channels.params.tx_min_inline_mode)
reset_channels = false;
if (reset_channels) {
err = mlx5e_safe_switch_channels(priv, &new_channels,
mlx5e_update_trust_state_hw,
&trust_state);
} else {
err = mlx5e_update_trust_state_hw(priv, &trust_state);
if (!err && !opened)
priv->channels.params = new_channels.params;
}
if (new_params.tx_min_inline_mode == priv->channels.params.tx_min_inline_mode)
reset = false;
err = mlx5e_safe_switch_params(priv, &new_params,
mlx5e_update_trust_state_hw,
&trust_state, reset);
mutex_unlock(&priv->state_lock);
......
......@@ -184,6 +184,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_retry) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) },
#endif
......@@ -344,6 +345,7 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end;
s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip;
s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok;
s->rx_tls_resync_res_retry += rq_stats->tls_resync_res_retry;
s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip;
s->rx_tls_err += rq_stats->tls_err;
#endif
......@@ -1654,6 +1656,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_retry) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) },
#endif
......
......@@ -209,6 +209,7 @@ struct mlx5e_sw_stats {
u64 rx_tls_resync_req_end;
u64 rx_tls_resync_req_skip;
u64 rx_tls_resync_res_ok;
u64 rx_tls_resync_res_retry;
u64 rx_tls_resync_res_skip;
u64 rx_tls_err;
#endif
......@@ -339,6 +340,7 @@ struct mlx5e_rq_stats {
u64 tls_resync_req_end;
u64 tls_resync_req_skip;
u64 tls_resync_res_ok;
u64 tls_resync_res_retry;
u64 tls_resync_res_skip;
u64 tls_err;
#endif
......
......@@ -36,6 +36,7 @@
#include "en/xdp.h"
#include "en/xsk/rx.h"
#include "en/xsk/tx.h"
#include "en_accel/ktls_txrx.h"
static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
{
......@@ -171,6 +172,10 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
*/
clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state);
/* Keep after async ICOSQ CQ poll */
if (unlikely(mlx5e_ktls_rx_pending_resync_list(c, budget)))
busy |= mlx5e_ktls_rx_handle_resync_list(c, budget);
busy |= INDIRECT_CALL_2(rq->post_wqes,
mlx5e_post_rx_mpwqes,
mlx5e_post_rx_wqes,
......
......@@ -497,13 +497,13 @@ static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
bulk = kzalloc(sizeof(*bulk) + bulk_len * sizeof(struct mlx5_fc),
GFP_KERNEL);
bulk = kvzalloc(sizeof(*bulk) + bulk_len * sizeof(struct mlx5_fc),
GFP_KERNEL);
if (!bulk)
goto err_alloc_bulk;
bulk->bitmask = kcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
GFP_KERNEL);
bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
GFP_KERNEL);
if (!bulk->bitmask)
goto err_alloc_bitmask;
......@@ -521,9 +521,9 @@ static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
return bulk;
err_mlx5_cmd_bulk_alloc:
kfree(bulk->bitmask);
kvfree(bulk->bitmask);
err_alloc_bitmask:
kfree(bulk);
kvfree(bulk);
err_alloc_bulk:
return ERR_PTR(err);
}
......@@ -537,8 +537,8 @@ mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk)
}
mlx5_cmd_fc_free(dev, bulk->base_id);
kfree(bulk->bitmask);
kfree(bulk);
kvfree(bulk->bitmask);
kvfree(bulk);
return 0;
}
......
......@@ -481,28 +481,19 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
struct mlx5e_channels new_channels = {};
struct mlx5e_params *params;
struct mlx5e_params new_params;
int err = 0;
mutex_lock(&priv->state_lock);
params = &priv->channels.params;
new_params = priv->channels.params;
new_params.sw_mtu = new_mtu;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
params->sw_mtu = new_mtu;
netdev->mtu = params->sw_mtu;
goto out;
}
new_channels.params = *params;
new_channels.params.sw_mtu = new_mtu;
err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
if (err)
goto out;
netdev->mtu = new_channels.params.sw_mtu;
netdev->mtu = new_params.sw_mtu;
out:
mutex_unlock(&priv->state_lock);
......
......@@ -645,16 +645,19 @@ static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
return PTP_PF_NONE;
}
static int mlx5_init_pin_config(struct mlx5_clock *clock)
static void mlx5_init_pin_config(struct mlx5_clock *clock)
{
int i;
if (!clock->ptp_info.n_pins)
return;
clock->ptp_info.pin_config =
kcalloc(clock->ptp_info.n_pins,
sizeof(*clock->ptp_info.pin_config),
GFP_KERNEL);
if (!clock->ptp_info.pin_config)
return -ENOMEM;
return;
clock->ptp_info.enable = mlx5_ptp_enable;
clock->ptp_info.verify = mlx5_ptp_verify;
clock->ptp_info.pps = 1;
......@@ -667,8 +670,6 @@ static int mlx5_init_pin_config(struct mlx5_clock *clock)
clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
clock->ptp_info.pin_config[i].chan = 0;
}
return 0;
}
static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
......@@ -859,6 +860,17 @@ static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
}
}
static void mlx5_init_pps(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = &mdev->clock;
if (!MLX5_PPS_CAP(mdev))
return;
mlx5_get_pps_caps(mdev);
mlx5_init_pin_config(clock);
}
void mlx5_init_clock(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = &mdev->clock;
......@@ -876,10 +888,7 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
clock->ptp_info = mlx5_ptp_clock_info;
/* Initialize 1PPS data structures */
if (MLX5_PPS_CAP(mdev))
mlx5_get_pps_caps(mdev);
if (clock->ptp_info.n_pins)
mlx5_init_pin_config(clock);
mlx5_init_pps(mdev);
clock->ptp = ptp_clock_register(&clock->ptp_info,
&mdev->pdev->dev);
......
......@@ -127,6 +127,7 @@ enum {
MLX5_REG_PELC = 0x500e,
MLX5_REG_PVLC = 0x500f,
MLX5_REG_PCMR = 0x5041,
MLX5_REG_PDDR = 0x5031,
MLX5_REG_PMLP = 0x5002,
MLX5_REG_PPLM = 0x5023,
MLX5_REG_PCAM = 0x507f,
......
......@@ -9956,6 +9956,53 @@ struct mlx5_ifc_mirc_reg_bits {
u8 reserved_at_20[0x20];
};
struct mlx5_ifc_pddr_monitor_opcode_bits {
u8 reserved_at_0[0x10];
u8 monitor_opcode[0x10];
};
union mlx5_ifc_pddr_troubleshooting_page_status_opcode_auto_bits {
struct mlx5_ifc_pddr_monitor_opcode_bits pddr_monitor_opcode;
u8 reserved_at_0[0x20];
};
enum {
/* Monitor opcodes */
MLX5_PDDR_REG_TRBLSH_GROUP_OPCODE_MONITOR = 0x0,
};
struct mlx5_ifc_pddr_troubleshooting_page_bits {
u8 reserved_at_0[0x10];
u8 group_opcode[0x10];
union mlx5_ifc_pddr_troubleshooting_page_status_opcode_auto_bits status_opcode;
u8 reserved_at_40[0x20];
u8 status_message[59][0x20];
};
union mlx5_ifc_pddr_reg_page_data_auto_bits {
struct mlx5_ifc_pddr_troubleshooting_page_bits pddr_troubleshooting_page;
u8 reserved_at_0[0x7c0];
};
enum {
MLX5_PDDR_REG_PAGE_SELECT_TROUBLESHOOTING_INFO_PAGE = 0x1,
};
struct mlx5_ifc_pddr_reg_bits {
u8 reserved_at_0[0x8];
u8 local_port[0x8];
u8 pnat[0x2];
u8 reserved_at_12[0xe];
u8 reserved_at_20[0x18];
u8 page_select[0x8];
union mlx5_ifc_pddr_reg_page_data_auto_bits page_data;
};
union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
......@@ -9970,6 +10017,9 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_pamp_reg_bits pamp_reg;
struct mlx5_ifc_paos_reg_bits paos_reg;
struct mlx5_ifc_pcap_reg_bits pcap_reg;
struct mlx5_ifc_pddr_monitor_opcode_bits pddr_monitor_opcode;
struct mlx5_ifc_pddr_reg_bits pddr_reg;
struct mlx5_ifc_pddr_troubleshooting_page_bits pddr_troubleshooting_page;
struct mlx5_ifc_peir_reg_bits peir_reg;
struct mlx5_ifc_pelc_reg_bits pelc_reg;
struct mlx5_ifc_pfcc_reg_bits pfcc_reg;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment