Commit 47f058ce authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-updates-2022-07-17' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2022-07-17

1) Add resiliency for lost completions for PTP TX port timestamp

2) Report Header-data split state via ethtool

3) Decouple HTB code from main regular TX code

* tag 'mlx5-updates-2022-07-17' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5: CT: Remove warning of ignore_flow_level support for non PF
  net/mlx5e: Add resiliency for PTP TX port timestamp
  net/mlx5: Expose ts_cqe_metadata_size2wqe_counter
  net/mlx5e: HTB, move htb functions to a new file
  net/mlx5e: HTB, change functions name to follow convention
  net/mlx5e: HTB, remove priv from htb function calls
  net/mlx5e: HTB, hide and dynamically allocate mlx5e_htb structure
  net/mlx5e: HTB, move stats and max_sqs to priv
  net/mlx5e: HTB, move section comment to the right place
  net/mlx5e: HTB, move ids to selq_params struct
  net/mlx5e: HTB, reduce visibility of htb functions
  net/mlx5e: Fix mqprio_rl handling on devlink reload
  net/mlx5e: Report header-data split state through ethtool
====================

Link: https://lore.kernel.org/r/20220719203529.51151-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 5fb859f7 22df2e93
...@@ -28,7 +28,8 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \ ...@@ -28,7 +28,8 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \ en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \ en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \ en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
en/qos.o en/trap.o en/fs_tt_redirect.o en/selq.o lib/crypto.o en/qos.o en/htb.o en/trap.o en/fs_tt_redirect.o en/selq.o \
lib/crypto.o
# #
# Netdev extra # Netdev extra
......
...@@ -321,7 +321,8 @@ struct mlx5e_params { ...@@ -321,7 +321,8 @@ struct mlx5e_params {
u8 num_tc; u8 num_tc;
struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
struct { struct {
struct mlx5e_mqprio_rl *rl; u64 max_rate[TC_MAX_QUEUE];
u32 hw_id[TC_MAX_QUEUE];
} channel; } channel;
} mqprio; } mqprio;
bool rx_cqe_compress_def; bool rx_cqe_compress_def;
...@@ -898,16 +899,8 @@ struct mlx5e_scratchpad { ...@@ -898,16 +899,8 @@ struct mlx5e_scratchpad {
cpumask_var_t cpumask; cpumask_var_t cpumask;
}; };
struct mlx5e_htb {
DECLARE_HASHTABLE(qos_tc2node, order_base_2(MLX5E_QOS_MAX_LEAF_NODES));
DECLARE_BITMAP(qos_used_qids, MLX5E_QOS_MAX_LEAF_NODES);
struct mlx5e_sq_stats **qos_sq_stats;
u16 max_qos_sqs;
u16 maj_id;
u16 defcls;
};
struct mlx5e_trap; struct mlx5e_trap;
struct mlx5e_htb;
struct mlx5e_priv { struct mlx5e_priv {
/* priv data path fields - start */ /* priv data path fields - start */
...@@ -945,6 +938,8 @@ struct mlx5e_priv { ...@@ -945,6 +938,8 @@ struct mlx5e_priv {
struct mlx5e_channel_stats **channel_stats; struct mlx5e_channel_stats **channel_stats;
struct mlx5e_channel_stats trap_stats; struct mlx5e_channel_stats trap_stats;
struct mlx5e_ptp_stats ptp_stats; struct mlx5e_ptp_stats ptp_stats;
struct mlx5e_sq_stats **htb_qos_sq_stats;
u16 htb_max_qos_sqs;
u16 stats_nch; u16 stats_nch;
u16 max_nch; u16 max_nch;
u8 max_opened_tc; u8 max_opened_tc;
...@@ -976,7 +971,7 @@ struct mlx5e_priv { ...@@ -976,7 +971,7 @@ struct mlx5e_priv {
struct mlx5e_hv_vhca_stats_agent stats_agent; struct mlx5e_hv_vhca_stats_agent stats_agent;
#endif #endif
struct mlx5e_scratchpad scratchpad; struct mlx5e_scratchpad scratchpad;
struct mlx5e_htb htb; struct mlx5e_htb *htb;
struct mlx5e_mqprio_rl *mqprio_rl; struct mlx5e_mqprio_rl *mqprio_rl;
}; };
...@@ -1181,7 +1176,8 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset); ...@@ -1181,7 +1176,8 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
struct ethtool_stats *stats, u64 *data); struct ethtool_stats *stats, u64 *data);
void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv, void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param); struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kernel_param);
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param); struct ethtool_ringparam *param);
void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv, void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#ifndef __MLX5E_EN_HTB_H_
#define __MLX5E_EN_HTB_H_
#include "qos.h"
#define MLX5E_QOS_MAX_LEAF_NODES 256
struct mlx5e_selq;
struct mlx5e_htb;
typedef int (*mlx5e_fp_htb_enumerate)(void *data, u16 qid, u32 hw_id);
int mlx5e_htb_enumerate_leaves(struct mlx5e_htb *htb, mlx5e_fp_htb_enumerate callback, void *data);
int mlx5e_htb_cur_leaf_nodes(struct mlx5e_htb *htb);
/* TX datapath API */
int mlx5e_htb_get_txq_by_classid(struct mlx5e_htb *htb, u16 classid);
/* HTB TC handlers */
int
mlx5e_htb_leaf_alloc_queue(struct mlx5e_htb *htb, u16 classid,
u32 parent_classid, u64 rate, u64 ceil,
struct netlink_ext_ack *extack);
int
mlx5e_htb_leaf_to_inner(struct mlx5e_htb *htb, u16 classid, u16 child_classid,
u64 rate, u64 ceil, struct netlink_ext_ack *extack);
int mlx5e_htb_leaf_del(struct mlx5e_htb *htb, u16 *classid,
struct netlink_ext_ack *extack);
int
mlx5e_htb_leaf_del_last(struct mlx5e_htb *htb, u16 classid, bool force,
struct netlink_ext_ack *extack);
int
mlx5e_htb_node_modify(struct mlx5e_htb *htb, u16 classid, u64 rate, u64 ceil,
struct netlink_ext_ack *extack);
struct mlx5e_htb *mlx5e_htb_alloc(void);
void mlx5e_htb_free(struct mlx5e_htb *htb);
int mlx5e_htb_init(struct mlx5e_htb *htb, struct tc_htb_qopt_offload *htb_qopt,
struct net_device *netdev, struct mlx5_core_dev *mdev,
struct mlx5e_selq *selq, struct mlx5e_priv *priv);
void mlx5e_htb_cleanup(struct mlx5e_htb *htb);
#endif
...@@ -79,19 +79,49 @@ void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type, ...@@ -79,19 +79,49 @@ void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp)); memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
} }
#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
static bool mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb_id)
{
return (ptpsq->ts_cqe_ctr_mask && (skb_cc != skb_id));
}
static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb_id)
{
struct skb_shared_hwtstamps hwts = {};
struct sk_buff *skb;
ptpsq->cq_stats->resync_event++;
while (skb_cc != skb_id) {
skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
skb_tstamp_tx(skb, &hwts);
ptpsq->cq_stats->resync_cqe++;
skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
}
}
static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq, static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
struct mlx5_cqe64 *cqe, struct mlx5_cqe64 *cqe,
int budget) int budget)
{ {
struct sk_buff *skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo); u16 skb_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter));
u16 skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
struct mlx5e_txqsq *sq = &ptpsq->txqsq; struct mlx5e_txqsq *sq = &ptpsq->txqsq;
struct sk_buff *skb;
ktime_t hwtstamp; ktime_t hwtstamp;
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
ptpsq->cq_stats->err_cqe++; ptpsq->cq_stats->err_cqe++;
goto out; goto out;
} }
if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_cc, skb_id))
mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_cc, skb_id);
skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe)); hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP, mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
hwtstamp, ptpsq->cq_stats); hwtstamp, ptpsq->cq_stats);
...@@ -241,6 +271,7 @@ static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn) ...@@ -241,6 +271,7 @@ static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa) static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
{ {
int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq); int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq);
struct mlx5_core_dev *mdev = ptpsq->txqsq.mdev;
ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)), ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)),
GFP_KERNEL, numa); GFP_KERNEL, numa);
...@@ -250,7 +281,9 @@ static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa) ...@@ -250,7 +281,9 @@ static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
ptpsq->skb_fifo.pc = &ptpsq->skb_fifo_pc; ptpsq->skb_fifo.pc = &ptpsq->skb_fifo_pc;
ptpsq->skb_fifo.cc = &ptpsq->skb_fifo_cc; ptpsq->skb_fifo.cc = &ptpsq->skb_fifo_cc;
ptpsq->skb_fifo.mask = wq_sz - 1; ptpsq->skb_fifo.mask = wq_sz - 1;
if (MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter))
ptpsq->ts_cqe_ctr_mask =
(1 << MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter)) - 1;
return 0; return 0;
} }
......
...@@ -17,6 +17,7 @@ struct mlx5e_ptpsq { ...@@ -17,6 +17,7 @@ struct mlx5e_ptpsq {
u16 skb_fifo_pc; u16 skb_fifo_pc;
struct mlx5e_skb_fifo skb_fifo; struct mlx5e_skb_fifo skb_fifo;
struct mlx5e_ptp_cq_stats *cq_stats; struct mlx5e_ptp_cq_stats *cq_stats;
u16 ts_cqe_ctr_mask;
}; };
enum { enum {
......
...@@ -6,40 +6,39 @@ ...@@ -6,40 +6,39 @@
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#define MLX5E_QOS_MAX_LEAF_NODES 256 #define BYTES_IN_MBIT 125000
struct mlx5e_priv; struct mlx5e_priv;
struct mlx5e_htb;
struct mlx5e_channels; struct mlx5e_channels;
struct mlx5e_channel; struct mlx5e_channel;
struct tc_htb_qopt_offload;
int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes); int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes);
int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev); int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev);
int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv);
/* TX datapath API */
int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid);
/* SQ lifecycle */ /* SQ lifecycle */
int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
u16 node_qid, u32 hw_id);
int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id);
void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid);
void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid);
void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq);
void mlx5e_reset_qdisc(struct net_device *dev, u16 qid);
int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs); int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs);
void mlx5e_qos_activate_queues(struct mlx5e_priv *priv); void mlx5e_qos_activate_queues(struct mlx5e_priv *priv);
void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c); void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c);
void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs);
void mlx5e_qos_close_queues(struct mlx5e_channel *c); void mlx5e_qos_close_queues(struct mlx5e_channel *c);
void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs);
int mlx5e_qos_alloc_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs);
/* TX datapath API */
u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid);
/* HTB API */ /* HTB API */
int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls, int mlx5e_htb_setup_tc(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb);
struct netlink_ext_ack *extack);
int mlx5e_htb_root_del(struct mlx5e_priv *priv);
int mlx5e_htb_leaf_alloc_queue(struct mlx5e_priv *priv, u16 classid,
u32 parent_classid, u64 rate, u64 ceil,
struct netlink_ext_ack *extack);
int mlx5e_htb_leaf_to_inner(struct mlx5e_priv *priv, u16 classid, u16 child_classid,
u64 rate, u64 ceil, struct netlink_ext_ack *extack);
int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 *classid,
struct netlink_ext_ack *extack);
int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force,
struct netlink_ext_ack *extack);
int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil,
struct netlink_ext_ack *extack);
/* MQPRIO TX rate limit */ /* MQPRIO TX rate limit */
struct mlx5e_mqprio_rl; struct mlx5e_mqprio_rl;
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include "en.h" #include "en.h"
#include "en/ptp.h" #include "en/ptp.h"
#include "en/htb.h"
struct mlx5e_selq_params { struct mlx5e_selq_params {
unsigned int num_regular_queues; unsigned int num_regular_queues;
...@@ -19,6 +20,8 @@ struct mlx5e_selq_params { ...@@ -19,6 +20,8 @@ struct mlx5e_selq_params {
bool is_ptp : 1; bool is_ptp : 1;
}; };
}; };
u16 htb_maj_id;
u16 htb_defcls;
}; };
int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock) int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
...@@ -44,6 +47,8 @@ int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock) ...@@ -44,6 +47,8 @@ int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
.num_tcs = 1, .num_tcs = 1,
.is_htb = false, .is_htb = false,
.is_ptp = false, .is_ptp = false,
.htb_maj_id = 0,
.htb_defcls = 0,
}; };
rcu_assign_pointer(selq->active, init_params); rcu_assign_pointer(selq->active, init_params);
...@@ -64,21 +69,50 @@ void mlx5e_selq_cleanup(struct mlx5e_selq *selq) ...@@ -64,21 +69,50 @@ void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
selq->standby = NULL; selq->standby = NULL;
} }
void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bool htb) void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params)
{ {
struct mlx5e_selq_params *selq_active;
lockdep_assert_held(selq->state_lock); lockdep_assert_held(selq->state_lock);
WARN_ON_ONCE(selq->is_prepared); WARN_ON_ONCE(selq->is_prepared);
selq->is_prepared = true; selq->is_prepared = true;
selq_active = rcu_dereference_protected(selq->active,
lockdep_is_held(selq->state_lock));
*selq->standby = *selq_active;
selq->standby->num_channels = params->num_channels; selq->standby->num_channels = params->num_channels;
selq->standby->num_tcs = mlx5e_get_dcb_num_tc(params); selq->standby->num_tcs = mlx5e_get_dcb_num_tc(params);
selq->standby->num_regular_queues = selq->standby->num_regular_queues =
selq->standby->num_channels * selq->standby->num_tcs; selq->standby->num_channels * selq->standby->num_tcs;
selq->standby->is_htb = htb;
selq->standby->is_ptp = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS); selq->standby->is_ptp = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS);
} }
bool mlx5e_selq_is_htb_enabled(struct mlx5e_selq *selq)
{
struct mlx5e_selq_params *selq_active =
rcu_dereference_protected(selq->active, lockdep_is_held(selq->state_lock));
return selq_active->htb_maj_id;
}
void mlx5e_selq_prepare_htb(struct mlx5e_selq *selq, u16 htb_maj_id, u16 htb_defcls)
{
struct mlx5e_selq_params *selq_active;
lockdep_assert_held(selq->state_lock);
WARN_ON_ONCE(selq->is_prepared);
selq->is_prepared = true;
selq_active = rcu_dereference_protected(selq->active,
lockdep_is_held(selq->state_lock));
*selq->standby = *selq_active;
selq->standby->is_htb = htb_maj_id;
selq->standby->htb_maj_id = htb_maj_id;
selq->standby->htb_defcls = htb_defcls;
}
void mlx5e_selq_apply(struct mlx5e_selq *selq) void mlx5e_selq_apply(struct mlx5e_selq *selq)
{ {
struct mlx5e_selq_params *old_params; struct mlx5e_selq_params *old_params;
...@@ -137,20 +171,21 @@ static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb, ...@@ -137,20 +171,21 @@ static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb,
return selq->num_regular_queues + up; return selq->num_regular_queues + up;
} }
static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb) static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb,
struct mlx5e_selq_params *selq)
{ {
u16 classid; u16 classid;
/* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */ /* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
if ((TC_H_MAJ(skb->priority) >> 16) == smp_load_acquire(&priv->htb.maj_id)) if ((TC_H_MAJ(skb->priority) >> 16) == selq->htb_maj_id)
classid = TC_H_MIN(skb->priority); classid = TC_H_MIN(skb->priority);
else else
classid = READ_ONCE(priv->htb.defcls); classid = selq->htb_defcls;
if (!classid) if (!classid)
return 0; return 0;
return mlx5e_get_txq_by_classid(priv, classid); return mlx5e_htb_get_txq_by_classid(priv->htb, classid);
} }
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
...@@ -187,10 +222,10 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, ...@@ -187,10 +222,10 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
up * selq->num_channels; up * selq->num_channels;
} }
if (unlikely(selq->is_htb)) { if (unlikely(selq->htb_maj_id)) {
/* num_tcs == 1, shortcut for PTP */ /* num_tcs == 1, shortcut for PTP */
txq_ix = mlx5e_select_htb_queue(priv, skb); txq_ix = mlx5e_select_htb_queue(priv, skb, selq);
if (txq_ix > 0) if (txq_ix > 0)
return txq_ix; return txq_ix;
......
...@@ -21,7 +21,9 @@ struct sk_buff; ...@@ -21,7 +21,9 @@ struct sk_buff;
int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock); int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock);
void mlx5e_selq_cleanup(struct mlx5e_selq *selq); void mlx5e_selq_cleanup(struct mlx5e_selq *selq);
void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bool htb); void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params);
void mlx5e_selq_prepare_htb(struct mlx5e_selq *selq, u16 htb_maj_id, u16 htb_defcls);
bool mlx5e_selq_is_htb_enabled(struct mlx5e_selq *selq);
void mlx5e_selq_apply(struct mlx5e_selq *selq); void mlx5e_selq_apply(struct mlx5e_selq *selq);
void mlx5e_selq_cancel(struct mlx5e_selq *selq); void mlx5e_selq_cancel(struct mlx5e_selq *selq);
......
...@@ -36,7 +36,7 @@ mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, ...@@ -36,7 +36,7 @@ mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
int err; int err;
if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ignore_flow_level, table_type)) { if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ignore_flow_level, table_type)) {
if (priv->mdev->coredev_type != MLX5_COREDEV_VF) if (priv->mdev->coredev_type == MLX5_COREDEV_PF)
mlx5_core_warn(priv->mdev, "firmware level support is missing\n"); mlx5_core_warn(priv->mdev, "firmware level support is missing\n");
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
goto err_check; goto err_check;
......
...@@ -2062,7 +2062,7 @@ mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv, ...@@ -2062,7 +2062,7 @@ mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv,
/* Ignore_flow_level support isn't supported by default for VFs and so post_act /* Ignore_flow_level support isn't supported by default for VFs and so post_act
* won't be supported. Skip showing error msg. * won't be supported. Skip showing error msg.
*/ */
if (priv->mdev->coredev_type != MLX5_COREDEV_VF) if (priv->mdev->coredev_type == MLX5_COREDEV_PF)
err_msg = "post action is missing"; err_msg = "post action is missing";
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
goto out_err; goto out_err;
......
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <linux/ethtool_netlink.h>
#include "en.h" #include "en.h"
#include "en/port.h" #include "en/port.h"
#include "en/params.h" #include "en/params.h"
...@@ -305,12 +307,18 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, ...@@ -305,12 +307,18 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
} }
void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv, void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param) struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kernel_param)
{ {
param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE; param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE; param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
param->rx_pending = 1 << priv->channels.params.log_rq_mtu_frames; param->rx_pending = 1 << priv->channels.params.log_rq_mtu_frames;
param->tx_pending = 1 << priv->channels.params.log_sq_size; param->tx_pending = 1 << priv->channels.params.log_sq_size;
kernel_param->tcp_data_split =
(priv->channels.params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) ?
ETHTOOL_TCP_DATA_SPLIT_ENABLED :
ETHTOOL_TCP_DATA_SPLIT_DISABLED;
} }
static void mlx5e_get_ringparam(struct net_device *dev, static void mlx5e_get_ringparam(struct net_device *dev,
...@@ -320,7 +328,7 @@ static void mlx5e_get_ringparam(struct net_device *dev, ...@@ -320,7 +328,7 @@ static void mlx5e_get_ringparam(struct net_device *dev,
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
mlx5e_ethtool_get_ringparam(priv, param); mlx5e_ethtool_get_ringparam(priv, param, kernel_param);
} }
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
...@@ -451,7 +459,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, ...@@ -451,7 +459,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
* because the numeration of the QoS SQs will change, while per-queue * because the numeration of the QoS SQs will change, while per-queue
* qdiscs are attached. * qdiscs are attached.
*/ */
if (priv->htb.maj_id) { if (mlx5e_selq_is_htb_enabled(&priv->selq)) {
err = -EINVAL; err = -EINVAL;
netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the number of channels\n", netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the number of channels\n",
__func__); __func__);
...@@ -2067,7 +2075,7 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable) ...@@ -2067,7 +2075,7 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
* the numeration of the QoS SQs will change, while per-queue qdiscs are * the numeration of the QoS SQs will change, while per-queue qdiscs are
* attached. * attached.
*/ */
if (priv->htb.maj_id) { if (mlx5e_selq_is_htb_enabled(&priv->selq)) {
netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the PTP state\n", netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the PTP state\n",
__func__); __func__);
return -EINVAL; return -EINVAL;
......
...@@ -229,7 +229,7 @@ mlx5e_rep_get_ringparam(struct net_device *dev, ...@@ -229,7 +229,7 @@ mlx5e_rep_get_ringparam(struct net_device *dev,
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
mlx5e_ethtool_get_ringparam(priv, param); mlx5e_ethtool_get_ringparam(priv, param, kernel_param);
} }
static int static int
......
...@@ -474,8 +474,8 @@ static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv, ...@@ -474,8 +474,8 @@ static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
int i; int i;
/* Pairs with smp_store_release in mlx5e_open_qos_sq. */ /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs); max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
stats = READ_ONCE(priv->htb.qos_sq_stats); stats = READ_ONCE(priv->htb_qos_sq_stats);
for (i = 0; i < max_qos_sqs; i++) { for (i = 0; i < max_qos_sqs; i++) {
mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i])); mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i]));
...@@ -2100,6 +2100,8 @@ static const struct counter_desc ptp_cq_stats_desc[] = { ...@@ -2100,6 +2100,8 @@ static const struct counter_desc ptp_cq_stats_desc[] = {
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) }, { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) }, { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) }, { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_cqe) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_event) },
}; };
static const struct counter_desc ptp_rq_stats_desc[] = { static const struct counter_desc ptp_rq_stats_desc[] = {
...@@ -2184,13 +2186,13 @@ static const struct counter_desc qos_sq_stats_desc[] = { ...@@ -2184,13 +2186,13 @@ static const struct counter_desc qos_sq_stats_desc[] = {
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos) static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
{ {
/* Pairs with smp_store_release in mlx5e_open_qos_sq. */ /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb.max_qos_sqs); return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb_max_qos_sqs);
} }
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos) static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
{ {
/* Pairs with smp_store_release in mlx5e_open_qos_sq. */ /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
u16 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs); u16 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
int i, qid; int i, qid;
for (qid = 0; qid < max_qos_sqs; qid++) for (qid = 0; qid < max_qos_sqs; qid++)
...@@ -2208,8 +2210,8 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos) ...@@ -2208,8 +2210,8 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
int i, qid; int i, qid;
/* Pairs with smp_store_release in mlx5e_open_qos_sq. */ /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs); max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
stats = READ_ONCE(priv->htb.qos_sq_stats); stats = READ_ONCE(priv->htb_qos_sq_stats);
for (qid = 0; qid < max_qos_sqs; qid++) { for (qid = 0; qid < max_qos_sqs; qid++) {
struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]); struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
......
...@@ -453,6 +453,8 @@ struct mlx5e_ptp_cq_stats { ...@@ -453,6 +453,8 @@ struct mlx5e_ptp_cq_stats {
u64 err_cqe; u64 err_cqe;
u64 abort; u64 abort;
u64 abort_abs_diff_ns; u64 abort_abs_diff_ns;
u64 resync_cqe;
u64 resync_event;
}; };
struct mlx5e_stats { struct mlx5e_stats {
......
...@@ -631,12 +631,22 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq) ...@@ -631,12 +631,22 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
mlx5e_tx_mpwqe_session_complete(sq); mlx5e_tx_mpwqe_session_complete(sq);
} }
static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg)
{
if (ptpsq->ts_cqe_ctr_mask && unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
eseg->flow_table_metadata = cpu_to_be32(ptpsq->skb_fifo_pc &
ptpsq->ts_cqe_ctr_mask);
}
static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq, static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel, struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg, u16 ihs) struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{ {
mlx5e_accel_tx_eseg(priv, skb, eseg, ihs); mlx5e_accel_tx_eseg(priv, skb, eseg, ihs);
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg); mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
if (unlikely(sq->ptpsq))
mlx5e_cqe_ts_id_eseg(sq->ptpsq, skb, eseg);
} }
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
......
...@@ -83,7 +83,7 @@ static void mlx5i_get_ringparam(struct net_device *dev, ...@@ -83,7 +83,7 @@ static void mlx5i_get_ringparam(struct net_device *dev,
{ {
struct mlx5e_priv *priv = mlx5i_epriv(dev); struct mlx5e_priv *priv = mlx5i_epriv(dev);
mlx5e_ethtool_get_ringparam(priv, param); mlx5e_ethtool_get_ringparam(priv, param, kernel_param);
} }
static int mlx5i_set_channels(struct net_device *dev, static int mlx5i_set_channels(struct net_device *dev,
......
...@@ -1833,7 +1833,11 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { ...@@ -1833,7 +1833,11 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 sw_vhca_id[0xe]; u8 sw_vhca_id[0xe];
u8 reserved_at_230[0x10]; u8 reserved_at_230[0x10];
u8 reserved_at_240[0x5c0]; u8 reserved_at_240[0xb];
u8 ts_cqe_metadata_size2wqe_counter[0x5];
u8 reserved_at_250[0x10];
u8 reserved_at_260[0x5a0];
}; };
enum mlx5_ifc_flow_destination_type { enum mlx5_ifc_flow_destination_type {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment