Commit 5a0d7dcf authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2019-08-01' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2019-08-01

Misc updates for mlx5 netdev driver:

1) Ingress rate support for E-Switch vports from Eli.
2) Gavi introduces flow counters bulk allocation and pool,
   To improve the performance of flow counter acquisition.
3) From Tariq, micro improvements for tx path
4) From Shay, small improvement for XDP TX MPWQE inline flow.
5) Aya provides some cleanups for tx devlink health reporters.
6) Saeed, refactor checksum handling into a single function.
7) Tonghao, allows dropping specific tunnel packets.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a9e21bea 6830b468
...@@ -359,6 +359,7 @@ enum { ...@@ -359,6 +359,7 @@ enum {
MLX5E_SQ_STATE_IPSEC, MLX5E_SQ_STATE_IPSEC,
MLX5E_SQ_STATE_AM, MLX5E_SQ_STATE_AM,
MLX5E_SQ_STATE_TLS, MLX5E_SQ_STATE_TLS,
MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
}; };
struct mlx5e_sq_wqe_info { struct mlx5e_sq_wqe_info {
...@@ -483,8 +484,6 @@ struct mlx5e_xdp_mpwqe { ...@@ -483,8 +484,6 @@ struct mlx5e_xdp_mpwqe {
struct mlx5e_tx_wqe *wqe; struct mlx5e_tx_wqe *wqe;
u8 ds_count; u8 ds_count;
u8 pkt_count; u8 pkt_count;
u8 max_ds_count;
u8 complete;
u8 inline_on; u8 inline_on;
}; };
...@@ -1134,7 +1133,6 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, ...@@ -1134,7 +1133,6 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params); struct mlx5e_params *params);
void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
u16 num_channels); u16 num_channels);
u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
void mlx5e_rx_dim_work(struct work_struct *work); void mlx5e_rx_dim_work(struct work_struct *work);
void mlx5e_tx_dim_work(struct work_struct *work); void mlx5e_tx_dim_work(struct work_struct *work);
......
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
#ifndef __MLX5E_EN_REPORTER_H #ifndef __MLX5E_EN_REPORTER_H
#define __MLX5E_EN_REPORTER_H #define __MLX5E_EN_REPORTER_H
#include <linux/mlx5/driver.h>
#include "en.h" #include "en.h"
int mlx5e_tx_reporter_create(struct mlx5e_priv *priv); int mlx5e_tx_reporter_create(struct mlx5e_priv *priv);
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2019 Mellanox Technologies. */ /* Copyright (c) 2019 Mellanox Technologies. */
#include <net/devlink.h>
#include "reporter.h" #include "reporter.h"
#include "lib/eq.h" #include "lib/eq.h"
...@@ -117,7 +116,7 @@ static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter, ...@@ -117,7 +116,7 @@ static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter,
char *err_str, char *err_str,
struct mlx5e_tx_err_ctx *err_ctx) struct mlx5e_tx_err_ctx *err_ctx)
{ {
if (IS_ERR_OR_NULL(tx_reporter)) { if (!tx_reporter) {
netdev_err(err_ctx->sq->channel->netdev, err_str); netdev_err(err_ctx->sq->channel->netdev, err_str);
return err_ctx->recover(err_ctx->sq); return err_ctx->recover(err_ctx->sq);
} }
...@@ -289,23 +288,27 @@ static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = { ...@@ -289,23 +288,27 @@ static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
int mlx5e_tx_reporter_create(struct mlx5e_priv *priv) int mlx5e_tx_reporter_create(struct mlx5e_priv *priv)
{ {
struct devlink_health_reporter *reporter;
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
struct devlink *devlink = priv_to_devlink(mdev); struct devlink *devlink = priv_to_devlink(mdev);
priv->tx_reporter = reporter =
devlink_health_reporter_create(devlink, &mlx5_tx_reporter_ops, devlink_health_reporter_create(devlink, &mlx5_tx_reporter_ops,
MLX5_REPORTER_TX_GRACEFUL_PERIOD, MLX5_REPORTER_TX_GRACEFUL_PERIOD,
true, priv); true, priv);
if (IS_ERR(priv->tx_reporter)) if (IS_ERR(reporter)) {
netdev_warn(priv->netdev, netdev_warn(priv->netdev,
"Failed to create tx reporter, err = %ld\n", "Failed to create tx reporter, err = %ld\n",
PTR_ERR(priv->tx_reporter)); PTR_ERR(reporter));
return IS_ERR_OR_NULL(priv->tx_reporter); return PTR_ERR(reporter);
}
priv->tx_reporter = reporter;
return 0;
} }
void mlx5e_tx_reporter_destroy(struct mlx5e_priv *priv) void mlx5e_tx_reporter_destroy(struct mlx5e_priv *priv)
{ {
if (IS_ERR_OR_NULL(priv->tx_reporter)) if (!priv->tx_reporter)
return; return;
devlink_health_reporter_destroy(priv->tx_reporter); devlink_health_reporter_destroy(priv->tx_reporter);
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#include "en.h" #include "en.h"
#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS #define MLX5E_SQ_NOPS_ROOM (MLX5_SEND_WQE_MAX_WQEBBS - 1)
#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\ #define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
MLX5E_SQ_NOPS_ROOM) MLX5E_SQ_NOPS_ROOM)
...@@ -117,9 +117,27 @@ mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map, ...@@ -117,9 +117,27 @@ mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
mlx5_write64((__be32 *)ctrl, uar_map); mlx5_write64((__be32 *)ctrl, uar_map);
} }
static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5e_tx_wqe *wqe) static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg)
{ {
return !!wqe->ctrl.tisn; return cseg && !!cseg->tisn;
}
static inline u8
mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
struct sk_buff *skb)
{
u8 mode;
if (mlx5e_transport_inline_tx_wqe(cseg))
return MLX5_INLINE_MODE_TCP_UDP;
mode = sq->min_inline_mode;
if (skb_vlan_tag_present(skb) &&
test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state))
mode = max_t(u8, MLX5_INLINE_MODE_L2, mode);
return mode;
} }
static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
......
...@@ -179,33 +179,19 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq) ...@@ -179,33 +179,19 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
struct mlx5e_xdp_mpwqe *session = &sq->mpwqe; struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
struct mlx5e_xdpsq_stats *stats = sq->stats; struct mlx5e_xdpsq_stats *stats = sq->stats;
struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5_wq_cyc *wq = &sq->wq;
u8 wqebbs; u16 pi, contig_wqebbs;
u16 pi;
mlx5e_xdpsq_fetch_wqe(sq, &session->wqe);
prefetchw(session->wqe->data);
session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT;
session->pkt_count = 0;
session->complete = 0;
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS if (unlikely(contig_wqebbs < MLX5_SEND_WQE_MAX_WQEBBS))
* (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment. mlx5e_fill_xdpsq_frag_edge(sq, wq, pi, contig_wqebbs);
* We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a
* full-session WQE be cache-aligned.
*/
#if L1_CACHE_BYTES < 128
#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1)
#else
#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2)
#endif
wqebbs = min_t(u16, mlx5_wq_cyc_get_contig_wqebbs(wq, pi), session->wqe = mlx5e_xdpsq_fetch_wqe(sq, &pi);
MLX5E_XDP_MPW_MAX_WQEBBS);
session->max_ds_count = MLX5_SEND_WQEBB_NUM_DS * wqebbs; prefetchw(session->wqe->data);
session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT;
session->pkt_count = 0;
mlx5e_xdp_update_inline_state(sq); mlx5e_xdp_update_inline_state(sq);
...@@ -244,7 +230,7 @@ static int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq) ...@@ -244,7 +230,7 @@ static int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq)
{ {
if (unlikely(!sq->mpwqe.wqe)) { if (unlikely(!sq->mpwqe.wqe)) {
if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc,
MLX5_SEND_WQE_MAX_WQEBBS))) { MLX5E_XDPSQ_STOP_ROOM))) {
/* SQ is full, ring doorbell */ /* SQ is full, ring doorbell */
mlx5e_xmit_xdp_doorbell(sq); mlx5e_xmit_xdp_doorbell(sq);
sq->stats->full++; sq->stats->full++;
...@@ -285,8 +271,8 @@ static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, ...@@ -285,8 +271,8 @@ static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats); mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats);
if (unlikely(session->complete || if (unlikely(mlx5e_xdp_no_room_for_inline_pkt(session) ||
session->ds_count == session->max_ds_count)) session->ds_count == MLX5E_XDP_MPW_MAX_NUM_DS))
mlx5e_xdp_mpwqe_complete(sq); mlx5e_xdp_mpwqe_complete(sq);
mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi); mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
......
...@@ -40,6 +40,26 @@ ...@@ -40,6 +40,26 @@
(sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
#define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */) #define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */)
#define MLX5E_XDPSQ_STOP_ROOM (MLX5E_SQ_STOP_ROOM)
#define MLX5E_XDP_INLINE_WQE_SZ_THRSD (256 - sizeof(struct mlx5_wqe_inline_seg))
#define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT \
DIV_ROUND_UP(MLX5E_XDP_INLINE_WQE_SZ_THRSD, MLX5_SEND_WQE_DS)
/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS
* (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment.
* We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a
* full-session WQE be cache-aligned.
*/
#if L1_CACHE_BYTES < 128
#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1)
#else
#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2)
#endif
#define MLX5E_XDP_MPW_MAX_NUM_DS \
(MLX5E_XDP_MPW_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS)
struct mlx5e_xsk_param; struct mlx5e_xsk_param;
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk); int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
...@@ -114,6 +134,30 @@ static inline void mlx5e_xdp_update_inline_state(struct mlx5e_xdpsq *sq) ...@@ -114,6 +134,30 @@ static inline void mlx5e_xdp_update_inline_state(struct mlx5e_xdpsq *sq)
session->inline_on = 1; session->inline_on = 1;
} }
static inline bool
mlx5e_xdp_no_room_for_inline_pkt(struct mlx5e_xdp_mpwqe *session)
{
return session->inline_on &&
session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT > MLX5E_XDP_MPW_MAX_NUM_DS;
}
static inline void
mlx5e_fill_xdpsq_frag_edge(struct mlx5e_xdpsq *sq, struct mlx5_wq_cyc *wq,
u16 pi, u16 nnops)
{
struct mlx5e_xdp_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
edge_wi = wi + nnops;
/* fill sq frag edge with nops to avoid wqe wrapping two pages */
for (; wi < edge_wi; wi++) {
wi->num_wqebbs = 1;
wi->num_pkts = 0;
mlx5e_post_nop(wq, sq->sqn, &sq->pc);
}
sq->stats->nops += nnops;
}
static inline void static inline void
mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
struct mlx5e_xdp_xmit_data *xdptxd, struct mlx5e_xdp_xmit_data *xdptxd,
...@@ -126,20 +170,12 @@ mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, ...@@ -126,20 +170,12 @@ mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
session->pkt_count++; session->pkt_count++;
#define MLX5E_XDP_INLINE_WQE_SZ_THRSD (256 - sizeof(struct mlx5_wqe_inline_seg))
if (session->inline_on && dma_len <= MLX5E_XDP_INLINE_WQE_SZ_THRSD) { if (session->inline_on && dma_len <= MLX5E_XDP_INLINE_WQE_SZ_THRSD) {
struct mlx5_wqe_inline_seg *inline_dseg = struct mlx5_wqe_inline_seg *inline_dseg =
(struct mlx5_wqe_inline_seg *)dseg; (struct mlx5_wqe_inline_seg *)dseg;
u16 ds_len = sizeof(*inline_dseg) + dma_len; u16 ds_len = sizeof(*inline_dseg) + dma_len;
u16 ds_cnt = DIV_ROUND_UP(ds_len, MLX5_SEND_WQE_DS); u16 ds_cnt = DIV_ROUND_UP(ds_len, MLX5_SEND_WQE_DS);
if (unlikely(session->ds_count + ds_cnt > session->max_ds_count)) {
/* Not enough space for inline wqe, send with memory pointer */
session->complete = true;
goto no_inline;
}
inline_dseg->byte_count = cpu_to_be32(dma_len | MLX5_INLINE_SEG); inline_dseg->byte_count = cpu_to_be32(dma_len | MLX5_INLINE_SEG);
memcpy(inline_dseg->data, xdptxd->data, dma_len); memcpy(inline_dseg->data, xdptxd->data, dma_len);
...@@ -148,21 +184,23 @@ mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, ...@@ -148,21 +184,23 @@ mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
return; return;
} }
no_inline:
dseg->addr = cpu_to_be64(xdptxd->dma_addr); dseg->addr = cpu_to_be64(xdptxd->dma_addr);
dseg->byte_count = cpu_to_be32(dma_len); dseg->byte_count = cpu_to_be32(dma_len);
dseg->lkey = sq->mkey_be; dseg->lkey = sq->mkey_be;
session->ds_count++; session->ds_count++;
} }
static inline void mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq *sq, static inline struct mlx5e_tx_wqe *
struct mlx5e_tx_wqe **wqe) mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq *sq, u16 *pi)
{ {
struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5_wq_cyc *wq = &sq->wq;
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); struct mlx5e_tx_wqe *wqe;
*pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
memset(wqe, 0, sizeof(*wqe));
*wqe = mlx5_wq_cyc_get_wqe(wq, pi); return wqe;
memset(*wqe, 0, sizeof(**wqe));
} }
static inline void static inline void
......
...@@ -180,15 +180,3 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb) ...@@ -180,15 +180,3 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
return err; return err;
} }
u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev)
{
u8 min_inline_mode;
mlx5_query_min_inline(mdev, &min_inline_mode);
if (min_inline_mode == MLX5_INLINE_MODE_NONE &&
!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
min_inline_mode = MLX5_INLINE_MODE_L2;
return min_inline_mode;
}
...@@ -1101,7 +1101,7 @@ void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv) ...@@ -1101,7 +1101,7 @@ void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv)
static void mlx5e_trust_update_tx_min_inline_mode(struct mlx5e_priv *priv, static void mlx5e_trust_update_tx_min_inline_mode(struct mlx5e_priv *priv,
struct mlx5e_params *params) struct mlx5e_params *params)
{ {
params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(priv->mdev); mlx5_query_min_inline(priv->mdev, &params->tx_min_inline_mode);
if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP && if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP &&
params->tx_min_inline_mode == MLX5_INLINE_MODE_L2) params->tx_min_inline_mode == MLX5_INLINE_MODE_L2)
params->tx_min_inline_mode = MLX5_INLINE_MODE_IP; params->tx_min_inline_mode = MLX5_INLINE_MODE_IP;
......
...@@ -1131,6 +1131,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, ...@@ -1131,6 +1131,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->stats = &c->priv->channel_stats[c->ix].sq[tc]; sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
sq->stop_room = MLX5E_SQ_STOP_ROOM; sq->stop_room = MLX5E_SQ_STOP_ROOM;
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work); INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
if (MLX5_IPSEC_DEV(c->priv->mdev)) if (MLX5_IPSEC_DEV(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
if (mlx5_accel_is_tls_device(c->priv->mdev)) { if (mlx5_accel_is_tls_device(c->priv->mdev)) {
...@@ -2323,7 +2325,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, ...@@ -2323,7 +2325,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
goto err_close_channels; goto err_close_channels;
} }
if (!IS_ERR_OR_NULL(priv->tx_reporter)) if (priv->tx_reporter)
devlink_health_reporter_state_update(priv->tx_reporter, devlink_health_reporter_state_update(priv->tx_reporter,
DEVLINK_HEALTH_REPORTER_STATE_HEALTHY); DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
...@@ -4777,7 +4779,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, ...@@ -4777,7 +4779,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
/* TX inline */ /* TX inline */
params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev); mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
/* RSS */ /* RSS */
mlx5e_build_rss_params(rss_params, params->num_channels); mlx5e_build_rss_params(rss_params, params->num_channels);
......
...@@ -1156,6 +1156,23 @@ mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv, ...@@ -1156,6 +1156,23 @@ mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
} }
} }
static
int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *ma)
{
switch (ma->command) {
case TC_CLSMATCHALL_REPLACE:
return mlx5e_tc_configure_matchall(priv, ma);
case TC_CLSMATCHALL_DESTROY:
return mlx5e_tc_delete_matchall(priv, ma);
case TC_CLSMATCHALL_STATS:
mlx5e_tc_stats_matchall(priv, ma);
return 0;
default:
return -EOPNOTSUPP;
}
}
static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
void *cb_priv) void *cb_priv)
{ {
...@@ -1165,6 +1182,8 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, ...@@ -1165,6 +1182,8 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
switch (type) { switch (type) {
case TC_SETUP_CLSFLOWER: case TC_SETUP_CLSFLOWER:
return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags); return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags);
case TC_SETUP_CLSMATCHALL:
return mlx5e_rep_setup_tc_cls_matchall(priv, type_data);
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
......
...@@ -88,6 +88,7 @@ struct mlx5e_rep_priv { ...@@ -88,6 +88,7 @@ struct mlx5e_rep_priv {
struct mlx5_flow_handle *vport_rx_rule; struct mlx5_flow_handle *vport_rx_rule;
struct list_head vport_sqs_list; struct list_head vport_sqs_list;
struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */ struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */
struct rtnl_link_stats64 prev_vf_vport_stats;
struct devlink_port dl_port; struct devlink_port dl_port;
}; };
......
...@@ -859,13 +859,24 @@ tail_padding_csum(struct sk_buff *skb, int offset, ...@@ -859,13 +859,24 @@ tail_padding_csum(struct sk_buff *skb, int offset,
} }
static void static void
mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto, mlx5e_skb_csum_fixup(struct sk_buff *skb, int network_depth, __be16 proto,
struct mlx5e_rq_stats *stats) struct mlx5e_rq_stats *stats)
{ {
struct ipv6hdr *ip6; struct ipv6hdr *ip6;
struct iphdr *ip4; struct iphdr *ip4;
int pkt_len; int pkt_len;
/* Fixup vlan headers, if any */
if (network_depth > ETH_HLEN)
/* CQE csum is calculated from the IP header and does
* not cover VLAN headers (if present). This will add
* the checksum manually.
*/
skb->csum = csum_partial(skb->data + ETH_HLEN,
network_depth - ETH_HLEN,
skb->csum);
/* Fixup tail padding, if any */
switch (proto) { switch (proto) {
case htons(ETH_P_IP): case htons(ETH_P_IP):
ip4 = (struct iphdr *)(skb->data + network_depth); ip4 = (struct iphdr *)(skb->data + network_depth);
...@@ -931,16 +942,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, ...@@ -931,16 +942,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
return; /* CQE csum covers all received bytes */ return; /* CQE csum covers all received bytes */
/* csum might need some fixups ...*/ /* csum might need some fixups ...*/
if (network_depth > ETH_HLEN) mlx5e_skb_csum_fixup(skb, network_depth, proto, stats);
/* CQE csum is calculated from the IP header and does
* not cover VLAN headers (if present). This will add
* the checksum manually.
*/
skb->csum = csum_partial(skb->data + ETH_HLEN,
network_depth - ETH_HLEN,
skb->csum);
mlx5e_skb_padding_csum(skb, network_depth, proto, stats);
return; return;
} }
......
...@@ -74,6 +74,7 @@ static const struct counter_desc sw_stats_desc[] = { ...@@ -74,6 +74,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
...@@ -90,6 +91,7 @@ static const struct counter_desc sw_stats_desc[] = { ...@@ -90,6 +91,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
...@@ -200,6 +202,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) ...@@ -200,6 +202,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_xdp_tx_xmit += xdpsq_stats->xmit; s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe; s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw; s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
s->rx_xdp_tx_nops += xdpsq_stats->nops;
s->rx_xdp_tx_full += xdpsq_stats->full; s->rx_xdp_tx_full += xdpsq_stats->full;
s->rx_xdp_tx_err += xdpsq_stats->err; s->rx_xdp_tx_err += xdpsq_stats->err;
s->rx_xdp_tx_cqe += xdpsq_stats->cqes; s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
...@@ -227,6 +230,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) ...@@ -227,6 +230,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_xdp_xmit += xdpsq_red_stats->xmit; s->tx_xdp_xmit += xdpsq_red_stats->xmit;
s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe; s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
s->tx_xdp_inlnw += xdpsq_red_stats->inlnw; s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
s->tx_xdp_nops += xdpsq_red_stats->nops;
s->tx_xdp_full += xdpsq_red_stats->full; s->tx_xdp_full += xdpsq_red_stats->full;
s->tx_xdp_err += xdpsq_red_stats->err; s->tx_xdp_err += xdpsq_red_stats->err;
s->tx_xdp_cqes += xdpsq_red_stats->cqes; s->tx_xdp_cqes += xdpsq_red_stats->cqes;
...@@ -1331,6 +1335,7 @@ static const struct counter_desc rq_xdpsq_stats_desc[] = { ...@@ -1331,6 +1335,7 @@ static const struct counter_desc rq_xdpsq_stats_desc[] = {
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) }, { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) }, { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) }, { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
...@@ -1340,6 +1345,7 @@ static const struct counter_desc xdpsq_stats_desc[] = { ...@@ -1340,6 +1345,7 @@ static const struct counter_desc xdpsq_stats_desc[] = {
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) }, { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) }, { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) }, { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
......
...@@ -81,6 +81,7 @@ struct mlx5e_sw_stats { ...@@ -81,6 +81,7 @@ struct mlx5e_sw_stats {
u64 rx_xdp_tx_xmit; u64 rx_xdp_tx_xmit;
u64 rx_xdp_tx_mpwqe; u64 rx_xdp_tx_mpwqe;
u64 rx_xdp_tx_inlnw; u64 rx_xdp_tx_inlnw;
u64 rx_xdp_tx_nops;
u64 rx_xdp_tx_full; u64 rx_xdp_tx_full;
u64 rx_xdp_tx_err; u64 rx_xdp_tx_err;
u64 rx_xdp_tx_cqe; u64 rx_xdp_tx_cqe;
...@@ -97,6 +98,7 @@ struct mlx5e_sw_stats { ...@@ -97,6 +98,7 @@ struct mlx5e_sw_stats {
u64 tx_xdp_xmit; u64 tx_xdp_xmit;
u64 tx_xdp_mpwqe; u64 tx_xdp_mpwqe;
u64 tx_xdp_inlnw; u64 tx_xdp_inlnw;
u64 tx_xdp_nops;
u64 tx_xdp_full; u64 tx_xdp_full;
u64 tx_xdp_err; u64 tx_xdp_err;
u64 tx_xdp_cqes; u64 tx_xdp_cqes;
...@@ -288,6 +290,7 @@ struct mlx5e_xdpsq_stats { ...@@ -288,6 +290,7 @@ struct mlx5e_xdpsq_stats {
u64 xmit; u64 xmit;
u64 mpwqe; u64 mpwqe;
u64 inlnw; u64 inlnw;
u64 nops;
u64 full; u64 full;
u64 err; u64 err;
/* dirtied @completion */ /* dirtied @completion */
......
...@@ -2485,7 +2485,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv, ...@@ -2485,7 +2485,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
if (flow_flag_test(flow, EGRESS) && if (flow_flag_test(flow, EGRESS) &&
!((actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) || !((actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) ||
(actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP))) (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
(actions & MLX5_FLOW_CONTEXT_ACTION_DROP)))
return false; return false;
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
...@@ -3638,6 +3639,106 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, ...@@ -3638,6 +3639,106 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
return err; return err;
} }
static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
struct netlink_ext_ack *extack)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch *esw;
u16 vport_num;
u32 rate_mbps;
int err;
esw = priv->mdev->priv.eswitch;
/* rate is given in bytes/sec.
* First convert to bits/sec and then round to the nearest mbit/secs.
* mbit means million bits.
* Moreover, if rate is non zero we choose to configure to a minimum of
* 1 mbit/sec.
*/
rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
vport_num = rpriv->rep->vport;
err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
if (err)
NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
return err;
}
static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct netlink_ext_ack *extack)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
const struct flow_action_entry *act;
int err;
int i;
if (!flow_action_has_entries(flow_action)) {
NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
return -EINVAL;
}
if (!flow_offload_has_one_action(flow_action)) {
NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
return -EOPNOTSUPP;
}
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_POLICE:
err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
if (err)
return err;
rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
break;
default:
NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
return -EOPNOTSUPP;
}
}
return 0;
}
int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *ma)
{
struct netlink_ext_ack *extack = ma->common.extack;
int prio = TC_H_MAJ(ma->common.prio) >> 16;
if (prio != 1) {
NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
return -EINVAL;
}
return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
}
int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *ma)
{
struct netlink_ext_ack *extack = ma->common.extack;
return apply_police_params(priv, 0, extack);
}
void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *ma)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct rtnl_link_stats64 cur_stats;
u64 dbytes;
u64 dpkts;
cur_stats = priv->stats.vf_vport;
dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
rpriv->prev_vf_vport_stats = cur_stats;
flow_stats_update(&ma->stats, dpkts, dbytes, jiffies);
}
static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv, static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
struct mlx5e_priv *peer_priv) struct mlx5e_priv *peer_priv)
{ {
......
...@@ -63,6 +63,13 @@ int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv, ...@@ -63,6 +63,13 @@ int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags); struct flow_cls_offload *f, unsigned long flags);
int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *f);
int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *f);
void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *ma);
struct mlx5e_encap_entry; struct mlx5e_encap_entry;
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e); struct mlx5e_encap_entry *e);
......
...@@ -292,8 +292,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -292,8 +292,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
stats->packets += skb_shinfo(skb)->gso_segs; stats->packets += skb_shinfo(skb)->gso_segs;
} else { } else {
u8 mode = mlx5e_transport_inline_tx_wqe(wqe) ? u8 mode = mlx5e_tx_wqe_inline_mode(sq, &wqe->ctrl, skb);
MLX5_INLINE_MODE_TCP_UDP : sq->min_inline_mode;
opcode = MLX5_OPCODE_SEND; opcode = MLX5_OPCODE_SEND;
mss = 0; mss = 0;
...@@ -608,9 +607,11 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -608,9 +607,11 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
stats->packets += skb_shinfo(skb)->gso_segs; stats->packets += skb_shinfo(skb)->gso_segs;
} else { } else {
u8 mode = mlx5e_tx_wqe_inline_mode(sq, NULL, skb);
opcode = MLX5_OPCODE_SEND; opcode = MLX5_OPCODE_SEND;
mss = 0; mss = 0;
ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); ihs = mlx5e_calc_min_inline(mode, skb);
num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
stats->packets++; stats->packets++;
} }
......
...@@ -215,11 +215,7 @@ static int mlx5_eq_async_int(struct notifier_block *nb, ...@@ -215,11 +215,7 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
*/ */
dma_rmb(); dma_rmb();
if (likely(eqe->type < MLX5_EVENT_TYPE_MAX)) atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
else
mlx5_core_warn_once(dev, "notifier_call_chain is not setup for eqe: %d\n", eqe->type);
atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe); atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
++eq->cons_index; ++eq->cons_index;
...@@ -945,9 +941,6 @@ int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb) ...@@ -945,9 +941,6 @@ int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
{ {
struct mlx5_eq_table *eqt = dev->priv.eq_table; struct mlx5_eq_table *eqt = dev->priv.eq_table;
if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
return -EINVAL;
return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb); return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
} }
EXPORT_SYMBOL(mlx5_eq_notifier_register); EXPORT_SYMBOL(mlx5_eq_notifier_register);
...@@ -956,9 +949,6 @@ int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb) ...@@ -956,9 +949,6 @@ int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
{ {
struct mlx5_eq_table *eqt = dev->priv.eq_table; struct mlx5_eq_table *eqt = dev->priv.eq_table;
if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
return -EINVAL;
return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb); return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
} }
EXPORT_SYMBOL(mlx5_eq_notifier_unregister); EXPORT_SYMBOL(mlx5_eq_notifier_unregister);
...@@ -102,6 +102,13 @@ struct mlx5_vport_info { ...@@ -102,6 +102,13 @@ struct mlx5_vport_info {
bool trusted; bool trusted;
}; };
/* Vport context events */
enum mlx5_eswitch_vport_event {
MLX5_VPORT_UC_ADDR_CHANGE = BIT(0),
MLX5_VPORT_MC_ADDR_CHANGE = BIT(1),
MLX5_VPORT_PROMISC_CHANGE = BIT(3),
};
struct mlx5_vport { struct mlx5_vport {
struct mlx5_core_dev *dev; struct mlx5_core_dev *dev;
int vport; int vport;
...@@ -123,7 +130,7 @@ struct mlx5_vport { ...@@ -123,7 +130,7 @@ struct mlx5_vport {
} qos; } qos;
bool enabled; bool enabled;
u16 enabled_events; enum mlx5_eswitch_vport_event enabled_events;
}; };
enum offloads_fdb_flags { enum offloads_fdb_flags {
...@@ -208,8 +215,11 @@ enum { ...@@ -208,8 +215,11 @@ enum {
struct mlx5_eswitch { struct mlx5_eswitch {
struct mlx5_core_dev *dev; struct mlx5_core_dev *dev;
struct mlx5_nb nb; struct mlx5_nb nb;
/* legacy data structures */
struct mlx5_eswitch_fdb fdb_table; struct mlx5_eswitch_fdb fdb_table;
struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE]; struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
struct esw_mc_addr mc_promisc;
/* end of legacy */
struct workqueue_struct *work_queue; struct workqueue_struct *work_queue;
struct mlx5_vport *vports; struct mlx5_vport *vports;
u32 flags; u32 flags;
...@@ -219,7 +229,6 @@ struct mlx5_eswitch { ...@@ -219,7 +229,6 @@ struct mlx5_eswitch {
* and async SRIOV admin state changes * and async SRIOV admin state changes
*/ */
struct mutex state_lock; struct mutex state_lock;
struct esw_mc_addr mc_promisc;
struct { struct {
bool enabled; bool enabled;
...@@ -234,8 +243,8 @@ struct mlx5_eswitch { ...@@ -234,8 +243,8 @@ struct mlx5_eswitch {
struct mlx5_esw_functions esw_funcs; struct mlx5_esw_functions esw_funcs;
}; };
void esw_offloads_cleanup(struct mlx5_eswitch *esw); void esw_offloads_disable(struct mlx5_eswitch *esw);
int esw_offloads_init(struct mlx5_eswitch *esw); int esw_offloads_enable(struct mlx5_eswitch *esw);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw); void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw); int esw_offloads_init_reps(struct mlx5_eswitch *esw);
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
...@@ -252,6 +261,8 @@ void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, ...@@ -252,6 +261,8 @@ void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport); struct mlx5_vport *vport);
void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
struct mlx5_vport *vport); struct mlx5_vport *vport);
int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
u32 rate_mbps);
/* E-Switch API */ /* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev); int mlx5_eswitch_init(struct mlx5_core_dev *dev);
...@@ -514,6 +525,11 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); ...@@ -514,6 +525,11 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
(vport) = &(esw)->vports[i], \ (vport) = &(esw)->vports[i], \
(i) < (esw)->total_vports; (i)++) (i) < (esw)->total_vports; (i)++)
#define mlx5_esw_for_all_vports_reverse(esw, i, vport) \
for ((i) = (esw)->total_vports - 1; \
(vport) = &(esw)->vports[i], \
(i) >= MLX5_VPORT_PF; (i)--)
#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \ #define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
for ((i) = MLX5_VPORT_FIRST_VF; \ for ((i) = MLX5_VPORT_FIRST_VF; \
(vport) = &(esw)->vports[(i)], \ (vport) = &(esw)->vports[(i)], \
...@@ -575,6 +591,11 @@ bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num); ...@@ -575,6 +591,11 @@ bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs); void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs);
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data); int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
void
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enum mlx5_eswitch_vport_event enabled_events);
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
#else /* CONFIG_MLX5_ESWITCH */ #else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */ /* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
......
...@@ -594,38 +594,15 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule) ...@@ -594,38 +594,15 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
mlx5_del_flow_rules(rule); mlx5_del_flow_rules(rule);
} }
static int mlx5_eswitch_enable_passing_vport_metadata(struct mlx5_eswitch *esw) static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
{ {
u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {}; u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {}; u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
u8 fdb_to_vport_reg_c_id; u8 fdb_to_vport_reg_c_id;
int err; int err;
err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport, if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
out, sizeof(out)); return 0;
if (err)
return err;
fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
esw_vport_context.fdb_to_vport_reg_c_id);
fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
MLX5_SET(modify_esw_vport_context_in, in,
field_select.fdb_to_vport_reg_c_id, 1);
return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport,
in, sizeof(in));
}
static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw)
{
u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
u8 fdb_to_vport_reg_c_id;
int err;
err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport, err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
out, sizeof(out)); out, sizeof(out));
...@@ -635,7 +612,10 @@ static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw) ...@@ -635,7 +612,10 @@ static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw)
fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out, fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
esw_vport_context.fdb_to_vport_reg_c_id); esw_vport_context.fdb_to_vport_reg_c_id);
fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0; if (enable)
fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
else
fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
MLX5_SET(modify_esw_vport_context_in, in, MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id); esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
...@@ -2131,7 +2111,7 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type ...@@ -2131,7 +2111,7 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type
return NOTIFY_OK; return NOTIFY_OK;
} }
int esw_offloads_init(struct mlx5_eswitch *esw) int esw_offloads_enable(struct mlx5_eswitch *esw)
{ {
int err; int err;
...@@ -2145,11 +2125,11 @@ int esw_offloads_init(struct mlx5_eswitch *esw) ...@@ -2145,11 +2125,11 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
if (err) if (err)
return err; return err;
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { err = esw_set_passing_vport_metadata(esw, true);
err = mlx5_eswitch_enable_passing_vport_metadata(esw); if (err)
if (err) goto err_vport_metadata;
goto err_vport_metadata;
} mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
err = esw_offloads_load_all_reps(esw); err = esw_offloads_load_all_reps(esw);
if (err) if (err)
...@@ -2163,8 +2143,8 @@ int esw_offloads_init(struct mlx5_eswitch *esw) ...@@ -2163,8 +2143,8 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
return 0; return 0;
err_reps: err_reps:
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) mlx5_eswitch_disable_pf_vf_vports(esw);
mlx5_eswitch_disable_passing_vport_metadata(esw); esw_set_passing_vport_metadata(esw, false);
err_vport_metadata: err_vport_metadata:
esw_offloads_steering_cleanup(esw); esw_offloads_steering_cleanup(esw);
return err; return err;
...@@ -2189,13 +2169,13 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, ...@@ -2189,13 +2169,13 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
return err; return err;
} }
void esw_offloads_cleanup(struct mlx5_eswitch *esw) void esw_offloads_disable(struct mlx5_eswitch *esw)
{ {
mlx5_rdma_disable_roce(esw->dev); mlx5_rdma_disable_roce(esw->dev);
esw_offloads_devcom_cleanup(esw); esw_offloads_devcom_cleanup(esw);
esw_offloads_unload_all_reps(esw); esw_offloads_unload_all_reps(esw);
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) mlx5_eswitch_disable_pf_vf_vports(esw);
mlx5_eswitch_disable_passing_vport_metadata(esw); esw_set_passing_vport_metadata(esw, false);
esw_offloads_steering_cleanup(esw); esw_offloads_steering_cleanup(esw);
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
} }
......
...@@ -566,7 +566,9 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns, ...@@ -566,7 +566,9 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
} }
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id) int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
u32 *id)
{ {
u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0}; u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0}; u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
...@@ -574,6 +576,7 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id) ...@@ -574,6 +576,7 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
MLX5_SET(alloc_flow_counter_in, in, opcode, MLX5_SET(alloc_flow_counter_in, in, opcode,
MLX5_CMD_OP_ALLOC_FLOW_COUNTER); MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err) if (!err)
...@@ -581,6 +584,11 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id) ...@@ -581,6 +584,11 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
return err; return err;
} }
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
{
return mlx5_cmd_fc_bulk_alloc(dev, 0, id);
}
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id) int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
{ {
u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0}; u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
...@@ -615,67 +623,24 @@ int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id, ...@@ -615,67 +623,24 @@ int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
return 0; return 0;
} }
struct mlx5_cmd_fc_bulk { int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)
u32 id;
int num;
int outlen;
u32 out[0];
};
struct mlx5_cmd_fc_bulk *
mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num)
{
struct mlx5_cmd_fc_bulk *b;
int outlen =
MLX5_ST_SZ_BYTES(query_flow_counter_out) +
MLX5_ST_SZ_BYTES(traffic_counter) * num;
b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
if (!b)
return NULL;
b->id = id;
b->num = num;
b->outlen = outlen;
return b;
}
void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
{ {
kfree(b); return MLX5_ST_SZ_BYTES(query_flow_counter_out) +
MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len;
} }
int int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b) u32 *out)
{ {
int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len);
u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0}; u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
MLX5_SET(query_flow_counter_in, in, opcode, MLX5_SET(query_flow_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_FLOW_COUNTER); MLX5_CMD_OP_QUERY_FLOW_COUNTER);
MLX5_SET(query_flow_counter_in, in, op_mod, 0); MLX5_SET(query_flow_counter_in, in, op_mod, 0);
MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id); MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id);
MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num); MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len);
return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen); return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
struct mlx5_cmd_fc_bulk *b, u32 id,
u64 *packets, u64 *bytes)
{
int index = id - b->id;
void *stats;
if (index < 0 || index >= b->num) {
mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
id, b->id, b->id + b->num - 1);
return;
}
stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
flow_statistics[index]);
*packets = MLX5_GET64(traffic_counter, stats, packets);
*bytes = MLX5_GET64(traffic_counter, stats, octets);
} }
int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev, int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
......
...@@ -78,20 +78,16 @@ struct mlx5_flow_cmds { ...@@ -78,20 +78,16 @@ struct mlx5_flow_cmds {
}; };
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id); int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
u32 *id);
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id); int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id);
int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id, int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
u64 *packets, u64 *bytes); u64 *packets, u64 *bytes);
struct mlx5_cmd_fc_bulk; int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len);
int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
struct mlx5_cmd_fc_bulk * u32 *out);
mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num);
void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b);
int
mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b);
void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
struct mlx5_cmd_fc_bulk *b, u32 id,
u64 *packets, u64 *bytes);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type); const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type);
......
...@@ -1217,8 +1217,10 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup) ...@@ -1217,8 +1217,10 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
{ {
int err = 0; int err = 0;
if (cleanup) if (cleanup) {
mlx5_unregister_device(dev);
mlx5_drain_health_wq(dev); mlx5_drain_health_wq(dev);
}
mutex_lock(&dev->intf_state_mutex); mutex_lock(&dev->intf_state_mutex);
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
...@@ -1369,7 +1371,6 @@ static void remove_one(struct pci_dev *pdev) ...@@ -1369,7 +1371,6 @@ static void remove_one(struct pci_dev *pdev)
mlx5_crdump_disable(dev); mlx5_crdump_disable(dev);
mlx5_devlink_unregister(devlink); mlx5_devlink_unregister(devlink);
mlx5_unregister_device(dev);
if (mlx5_unload_one(dev, true)) { if (mlx5_unload_one(dev, true)) {
mlx5_core_err(dev, "mlx5_unload_one failed\n"); mlx5_core_err(dev, "mlx5_unload_one failed\n");
......
...@@ -122,12 +122,13 @@ void mlx5_query_min_inline(struct mlx5_core_dev *mdev, ...@@ -122,12 +122,13 @@ void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
u8 *min_inline_mode) u8 *min_inline_mode)
{ {
switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) { switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
if (!mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode))
break;
/* fall through */
case MLX5_CAP_INLINE_MODE_L2: case MLX5_CAP_INLINE_MODE_L2:
*min_inline_mode = MLX5_INLINE_MODE_L2; *min_inline_mode = MLX5_INLINE_MODE_L2;
break; break;
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
break;
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
*min_inline_mode = MLX5_INLINE_MODE_NONE; *min_inline_mode = MLX5_INLINE_MODE_NONE;
break; break;
......
...@@ -477,6 +477,17 @@ struct mlx5_core_sriov { ...@@ -477,6 +477,17 @@ struct mlx5_core_sriov {
u16 max_vfs; u16 max_vfs;
}; };
struct mlx5_fc_pool {
struct mlx5_core_dev *dev;
struct mutex pool_lock; /* protects pool lists */
struct list_head fully_used;
struct list_head partially_used;
struct list_head unused;
int available_fcs;
int used_fcs;
int threshold;
};
struct mlx5_fc_stats { struct mlx5_fc_stats {
spinlock_t counters_idr_lock; /* protects counters_idr */ spinlock_t counters_idr_lock; /* protects counters_idr */
struct idr counters_idr; struct idr counters_idr;
...@@ -488,6 +499,8 @@ struct mlx5_fc_stats { ...@@ -488,6 +499,8 @@ struct mlx5_fc_stats {
struct delayed_work work; struct delayed_work work;
unsigned long next_query; unsigned long next_query;
unsigned long sampling_interval; /* jiffies */ unsigned long sampling_interval; /* jiffies */
u32 *bulk_query_out;
struct mlx5_fc_pool fc_pool;
}; };
struct mlx5_events; struct mlx5_events;
......
...@@ -1040,6 +1040,21 @@ enum { ...@@ -1040,6 +1040,21 @@ enum {
MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1, MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1,
}; };
#define MLX5_FC_BULK_SIZE_FACTOR 128
enum mlx5_fc_bulk_alloc_bitmask {
MLX5_FC_BULK_128 = (1 << 0),
MLX5_FC_BULK_256 = (1 << 1),
MLX5_FC_BULK_512 = (1 << 2),
MLX5_FC_BULK_1024 = (1 << 3),
MLX5_FC_BULK_2048 = (1 << 4),
MLX5_FC_BULK_4096 = (1 << 5),
MLX5_FC_BULK_8192 = (1 << 6),
MLX5_FC_BULK_16384 = (1 << 7),
};
#define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
struct mlx5_ifc_cmd_hca_cap_bits { struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_0[0x30]; u8 reserved_at_0[0x30];
u8 vhca_id[0x10]; u8 vhca_id[0x10];
...@@ -1244,7 +1259,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -1244,7 +1259,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_2e0[0x7]; u8 reserved_at_2e0[0x7];
u8 max_qp_mcg[0x19]; u8 max_qp_mcg[0x19];
u8 reserved_at_300[0x18]; u8 reserved_at_300[0x10];
u8 flow_counter_bulk_alloc[0x8];
u8 log_max_mcg[0x8]; u8 log_max_mcg[0x8];
u8 reserved_at_320[0x3]; u8 reserved_at_320[0x3];
...@@ -2766,7 +2782,7 @@ struct mlx5_ifc_traffic_counter_bits { ...@@ -2766,7 +2782,7 @@ struct mlx5_ifc_traffic_counter_bits {
struct mlx5_ifc_tisc_bits { struct mlx5_ifc_tisc_bits {
u8 strict_lag_tx_port_affinity[0x1]; u8 strict_lag_tx_port_affinity[0x1];
u8 tls_en[0x1]; u8 tls_en[0x1];
u8 reserved_at_1[0x2]; u8 reserved_at_2[0x2];
u8 lag_tx_port_affinity[0x04]; u8 lag_tx_port_affinity[0x04];
u8 reserved_at_8[0x4]; u8 reserved_at_8[0x4];
...@@ -2941,6 +2957,13 @@ enum { ...@@ -2941,6 +2957,13 @@ enum {
SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3, SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3,
}; };
enum {
ELEMENT_TYPE_CAP_MASK_TASR = 1 << 0,
ELEMENT_TYPE_CAP_MASK_VPORT = 1 << 1,
ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2,
ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3,
};
struct mlx5_ifc_scheduling_context_bits { struct mlx5_ifc_scheduling_context_bits {
u8 element_type[0x8]; u8 element_type[0x8];
u8 reserved_at_8[0x18]; u8 reserved_at_8[0x18];
...@@ -7815,7 +7838,8 @@ struct mlx5_ifc_alloc_flow_counter_in_bits { ...@@ -7815,7 +7838,8 @@ struct mlx5_ifc_alloc_flow_counter_in_bits {
u8 reserved_at_20[0x10]; u8 reserved_at_20[0x10];
u8 op_mod[0x10]; u8 op_mod[0x10];
u8 reserved_at_40[0x40]; u8 reserved_at_40[0x38];
u8 flow_counter_bulk[0x8];
}; };
struct mlx5_ifc_add_vxlan_udp_dport_out_bits { struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment