Commit ff254dad authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2021-04-19' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2021-04-19

This patchset provides some updates to mlx5e and mlx5 SW steering drivers:

1) Tariq and Vladyslav they both provide some trivial update to mlx5e netdev.

The next 12 patches in the patchset are focused toward mlx5 SW steering:
2) 3 trivial cleanup patches

3) Dynamic Flex parser support:
   Flex parser is a HW parser that can support protocols that are not
    natively supported by the HCA, such as Geneve (TLV options) and GTP-U.
    There are 8 such parsers, and each of them can be assigned to parse a
    specific set of protocols.

4) Enable matching on Geneve TLV options

5) Use Flex parser for MPLS over UDP/GRE

6) Enable matching on tunnel GTP-U and GTP-U first extension
   header using

7) Improved QoS for SW steering internal QPair for a better insertion rate
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 316bcffe aeacb52a
...@@ -55,12 +55,17 @@ void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv) ...@@ -55,12 +55,17 @@ void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv)
{ {
struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv); struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
devlink_port_unregister(dl_port); if (dl_port->registered)
devlink_port_unregister(dl_port);
} }
struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev) struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
struct devlink_port *port;
return mlx5e_devlink_get_dl_port(priv); port = mlx5e_devlink_get_dl_port(priv);
if (port->registered)
return port;
return NULL;
} }
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include "en/port.h" #include "en/port.h"
#include "en_accel/en_accel.h" #include "en_accel/en_accel.h"
#include "accel/ipsec.h" #include "accel/ipsec.h"
#include "fpga/ipsec.h"
static bool mlx5e_rx_is_xdp(struct mlx5e_params *params, static bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk) struct mlx5e_xsk_param *xsk)
...@@ -89,30 +90,39 @@ bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params, ...@@ -89,30 +90,39 @@ bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
return !params->lro_en && linear_frag_sz <= PAGE_SIZE; return !params->lro_en && linear_frag_sz <= PAGE_SIZE;
} }
#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \ bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
MLX5_MPWQE_LOG_STRIDE_SZ_BASE) u8 log_stride_sz, u8 log_num_strides)
bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{ {
u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params, xsk); if (log_stride_sz + log_num_strides != MLX5_MPWRQ_LOG_WQE_SZ)
s8 signed_log_num_strides_param; return false;
u8 log_num_strides;
if (!mlx5e_rx_is_linear_skb(params, xsk)) if (log_stride_sz < MLX5_MPWQE_LOG_STRIDE_SZ_BASE ||
log_stride_sz > MLX5_MPWQE_LOG_STRIDE_SZ_MAX)
return false; return false;
if (order_base_2(linear_frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ) if (log_num_strides > MLX5_MPWQE_LOG_NUM_STRIDES_MAX)
return false; return false;
if (MLX5_CAP_GEN(mdev, ext_stride_num_range)) if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
return true; return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE;
log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz); return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
signed_log_num_strides_param = }
(s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
return signed_log_num_strides_param >= 0; bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
s8 log_num_strides;
u8 log_stride_sz;
if (!mlx5e_rx_is_linear_skb(params, xsk))
return false;
log_stride_sz = order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk));
log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - log_stride_sz;
return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz, log_num_strides);
} }
u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params, u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params,
...@@ -282,7 +292,7 @@ bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, ...@@ -282,7 +292,7 @@ bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
if (!mlx5e_check_fragmented_striding_rq_cap(mdev)) if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
return false; return false;
if (MLX5_IPSEC_DEV(mdev)) if (mlx5_fpga_is_ipsec_device(mdev))
return false; return false;
if (params->xdp_prog) { if (params->xdp_prog) {
...@@ -364,7 +374,7 @@ static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev, ...@@ -364,7 +374,7 @@ static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
u32 buf_size = 0; u32 buf_size = 0;
int i; int i;
if (MLX5_IPSEC_DEV(mdev)) if (mlx5_fpga_is_ipsec_device(mdev))
byte_count += MLX5E_METADATA_ETHER_LEN; byte_count += MLX5E_METADATA_ETHER_LEN;
if (mlx5e_rx_is_linear_skb(params, xsk)) { if (mlx5e_rx_is_linear_skb(params, xsk)) {
...@@ -461,26 +471,36 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev, ...@@ -461,26 +471,36 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
param->cq_period_mode = params->rx_cq_moderation.cq_period_mode; param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
} }
void mlx5e_build_rq_param(struct mlx5_core_dev *mdev, int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk, struct mlx5e_xsk_param *xsk,
u16 q_counter, u16 q_counter,
struct mlx5e_rq_param *param) struct mlx5e_rq_param *param)
{ {
void *rqc = param->rqc; void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq); void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
int ndsegs = 1; int ndsegs = 1;
switch (params->rq_wq_type) { switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: {
u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
log_wqe_num_of_strides)) {
mlx5_core_err(mdev,
"Bad RX MPWQE params: log_stride_size %u, log_num_strides %u\n",
log_wqe_stride_size, log_wqe_num_of_strides);
return -EINVAL;
}
MLX5_SET(wq, wq, log_wqe_num_of_strides, MLX5_SET(wq, wq, log_wqe_num_of_strides,
mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk) - log_wqe_num_of_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
MLX5_SET(wq, wq, log_wqe_stride_size, MLX5_SET(wq, wq, log_wqe_stride_size,
mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk) - log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk)); MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk));
break; break;
}
default: /* MLX5_WQ_TYPE_CYCLIC */ default: /* MLX5_WQ_TYPE_CYCLIC */
MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames); MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info); mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info);
...@@ -498,6 +518,8 @@ void mlx5e_build_rq_param(struct mlx5_core_dev *mdev, ...@@ -498,6 +518,8 @@ void mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
mlx5e_build_rx_cq_param(mdev, params, xsk, &param->cqp); mlx5e_build_rx_cq_param(mdev, params, xsk, &param->cqp);
return 0;
} }
void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev, void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
...@@ -642,14 +664,17 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev, ...@@ -642,14 +664,17 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
mlx5e_build_tx_cq_param(mdev, params, &param->cqp); mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
} }
void mlx5e_build_channel_param(struct mlx5_core_dev *mdev, int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, struct mlx5e_params *params,
u16 q_counter, u16 q_counter,
struct mlx5e_channel_param *cparam) struct mlx5e_channel_param *cparam)
{ {
u8 icosq_log_wq_sz, async_icosq_log_wq_sz; u8 icosq_log_wq_sz, async_icosq_log_wq_sz;
int err;
mlx5e_build_rq_param(mdev, params, NULL, q_counter, &cparam->rq); err = mlx5e_build_rq_param(mdev, params, NULL, q_counter, &cparam->rq);
if (err)
return err;
icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq); icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq);
async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev); async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev);
...@@ -658,4 +683,6 @@ void mlx5e_build_channel_param(struct mlx5_core_dev *mdev, ...@@ -658,4 +683,6 @@ void mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq); mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq); mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq);
mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq); mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq);
return 0;
} }
...@@ -96,6 +96,8 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *para ...@@ -96,6 +96,8 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *para
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params); void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params); void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
u8 log_stride_sz, u8 log_num_strides);
u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params, u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk); struct mlx5e_xsk_param *xsk);
u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params, u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
...@@ -122,11 +124,11 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, ...@@ -122,11 +124,11 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
/* Build queue parameters */ /* Build queue parameters */
void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c); void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c);
void mlx5e_build_rq_param(struct mlx5_core_dev *mdev, int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk, struct mlx5e_xsk_param *xsk,
u16 q_counter, u16 q_counter,
struct mlx5e_rq_param *param); struct mlx5e_rq_param *param);
void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev, void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
u16 q_counter, u16 q_counter,
struct mlx5e_rq_param *param); struct mlx5e_rq_param *param);
...@@ -141,10 +143,10 @@ void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev, ...@@ -141,10 +143,10 @@ void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev, void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_sq_param *param); struct mlx5e_sq_param *param);
void mlx5e_build_channel_param(struct mlx5_core_dev *mdev, int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, struct mlx5e_params *params,
u16 q_counter, u16 q_counter,
struct mlx5e_channel_param *cparam); struct mlx5e_channel_param *cparam);
u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params); u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params); int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
......
...@@ -2086,7 +2086,10 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, ...@@ -2086,7 +2086,10 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
if (!chs->c || !cparam) if (!chs->c || !cparam)
goto err_free; goto err_free;
mlx5e_build_channel_param(priv->mdev, &chs->params, priv->q_counter, cparam); err = mlx5e_build_channel_param(priv->mdev, &chs->params, priv->q_counter, cparam);
if (err)
goto err_free;
for (i = 0; i < chs->num; i++) { for (i = 0; i < chs->num; i++) {
struct xsk_buff_pool *xsk_pool = NULL; struct xsk_buff_pool *xsk_pool = NULL;
...@@ -4886,6 +4889,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, ...@@ -4886,6 +4889,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
struct net_device *netdev) struct net_device *netdev)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
struct devlink_port *dl_port;
int err; int err;
mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu); mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu);
...@@ -4901,14 +4905,19 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, ...@@ -4901,14 +4905,19 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
if (err) if (err)
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
mlx5e_health_create_reporters(priv); dl_port = mlx5e_devlink_get_dl_port(priv);
if (dl_port->registered)
mlx5e_health_create_reporters(priv);
return 0; return 0;
} }
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{ {
mlx5e_health_destroy_reporters(priv); struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
if (dl_port->registered)
mlx5e_health_destroy_reporters(priv);
mlx5e_tls_cleanup(priv); mlx5e_tls_cleanup(priv);
mlx5e_ipsec_cleanup(priv); mlx5e_ipsec_cleanup(priv);
} }
......
...@@ -83,14 +83,16 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev, ...@@ -83,14 +83,16 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
ft_attr.autogroup.max_num_groups = 1; ft_attr.autogroup.max_num_groups = 1;
tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr); tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
if (IS_ERR(tt->termtbl)) { if (IS_ERR(tt->termtbl)) {
esw_warn(dev, "Failed to create termination table\n"); esw_warn(dev, "Failed to create termination table (error %d)\n",
IS_ERR(tt->termtbl));
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
tt->rule = mlx5_add_flow_rules(tt->termtbl, NULL, flow_act, tt->rule = mlx5_add_flow_rules(tt->termtbl, NULL, flow_act,
&tt->dest, 1); &tt->dest, 1);
if (IS_ERR(tt->rule)) { if (IS_ERR(tt->rule)) {
esw_warn(dev, "Failed to create termination table rule\n"); esw_warn(dev, "Failed to create termination table rule (error %d)\n",
IS_ERR(tt->rule));
goto add_flow_err; goto add_flow_err;
} }
return 0; return 0;
...@@ -140,10 +142,9 @@ mlx5_eswitch_termtbl_get_create(struct mlx5_eswitch *esw, ...@@ -140,10 +142,9 @@ mlx5_eswitch_termtbl_get_create(struct mlx5_eswitch *esw,
memcpy(&tt->flow_act, flow_act, sizeof(*flow_act)); memcpy(&tt->flow_act, flow_act, sizeof(*flow_act));
err = mlx5_eswitch_termtbl_create(esw->dev, tt, flow_act); err = mlx5_eswitch_termtbl_create(esw->dev, tt, flow_act);
if (err) { if (err)
esw_warn(esw->dev, "Failed to create termination table\n");
goto tt_create_err; goto tt_create_err;
}
hash_add(esw->offloads.termtbl_tbl, &tt->termtbl_hlist, hash_key); hash_add(esw->offloads.termtbl_tbl, &tt->termtbl_hlist, hash_key);
tt_add_ref: tt_add_ref:
tt->ref_count++; tt->ref_count++;
...@@ -282,7 +283,8 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw, ...@@ -282,7 +283,8 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
tt = mlx5_eswitch_termtbl_get_create(esw, &term_tbl_act, tt = mlx5_eswitch_termtbl_get_create(esw, &term_tbl_act,
&dest[i], attr); &dest[i], attr);
if (IS_ERR(tt)) { if (IS_ERR(tt)) {
esw_warn(esw->dev, "Failed to create termination table\n"); esw_warn(esw->dev, "Failed to get termination table (error %d)\n",
IS_ERR(tt));
goto revert_changes; goto revert_changes;
} }
attr->dests[num_vport_dests].termtbl = tt; attr->dests[num_vport_dests].termtbl = tt;
......
...@@ -85,15 +85,53 @@ int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev, ...@@ -85,15 +85,53 @@ int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
return 0; return 0;
} }
static int dr_cmd_query_nic_vport_roce_en(struct mlx5_core_dev *mdev,
u16 vport, bool *roce_en)
{
u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
int err;
MLX5_SET(query_nic_vport_context_in, in, opcode,
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
MLX5_SET(query_nic_vport_context_in, in, other_vport, !!vport);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
*roce_en = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.roce_en);
return 0;
}
int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
struct mlx5dr_cmd_caps *caps) struct mlx5dr_cmd_caps *caps)
{ {
bool roce_en;
int err;
caps->prio_tag_required = MLX5_CAP_GEN(mdev, prio_tag_required); caps->prio_tag_required = MLX5_CAP_GEN(mdev, prio_tag_required);
caps->eswitch_manager = MLX5_CAP_GEN(mdev, eswitch_manager); caps->eswitch_manager = MLX5_CAP_GEN(mdev, eswitch_manager);
caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id); caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id);
caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols); caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols);
caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version); caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version);
if (MLX5_CAP_GEN(mdev, roce)) {
err = dr_cmd_query_nic_vport_roce_en(mdev, 0, &roce_en);
if (err)
return err;
caps->roce_caps.roce_en = roce_en;
caps->roce_caps.fl_rc_qp_when_roce_disabled =
MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_disabled);
caps->roce_caps.fl_rc_qp_when_roce_enabled =
MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_enabled);
}
caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new);
if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) { if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) {
caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0); caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1); caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
...@@ -106,6 +144,34 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, ...@@ -106,6 +144,34 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw1); MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw1);
} }
if (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED)
caps->flex_parser_id_geneve_tlv_option_0 =
MLX5_CAP_GEN(mdev, flex_parser_id_geneve_tlv_option_0);
if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)
caps->flex_parser_id_mpls_over_gre =
MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_gre);
if (caps->flex_protocols & mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
caps->flex_parser_id_mpls_over_udp =
MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_udp_label);
if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED)
caps->flex_parser_id_gtpu_dw_0 =
MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_0);
if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED)
caps->flex_parser_id_gtpu_teid =
MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_teid);
if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED)
caps->flex_parser_id_gtpu_dw_2 =
MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_2);
if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED)
caps->flex_parser_id_gtpu_first_ext_dw_0 =
MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_first_ext_dw_0);
caps->nic_rx_drop_address = caps->nic_rx_drop_address =
MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_rx_action_drop_icm_address); MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_rx_action_drop_icm_address);
caps->nic_tx_drop_address = caps->nic_tx_drop_address =
......
...@@ -952,6 +952,17 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher, ...@@ -952,6 +952,17 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
return false; return false;
} }
} }
if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
s_idx = offsetof(struct mlx5dr_match_param, misc4);
e_idx = min(s_idx + sizeof(param->misc4), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
mlx5dr_err(matcher->tbl->dmn,
"Rule misc4 parameters contains a value not specified by mask\n");
return false;
}
}
return true; return true;
} }
......
...@@ -32,6 +32,7 @@ struct dr_qp_rtr_attr { ...@@ -32,6 +32,7 @@ struct dr_qp_rtr_attr {
u8 min_rnr_timer; u8 min_rnr_timer;
u8 sgid_index; u8 sgid_index;
u16 udp_src_port; u16 udp_src_port;
u8 fl:1;
}; };
struct dr_qp_rts_attr { struct dr_qp_rts_attr {
...@@ -45,6 +46,7 @@ struct dr_qp_init_attr { ...@@ -45,6 +46,7 @@ struct dr_qp_init_attr {
u32 pdn; u32 pdn;
u32 max_send_wr; u32 max_send_wr;
struct mlx5_uars_page *uar; struct mlx5_uars_page *uar;
u8 isolate_vl_tc:1;
}; };
static int dr_parse_cqe(struct mlx5dr_cq *dr_cq, struct mlx5_cqe64 *cqe64) static int dr_parse_cqe(struct mlx5dr_cq *dr_cq, struct mlx5_cqe64 *cqe64)
...@@ -157,6 +159,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev, ...@@ -157,6 +159,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC); MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
MLX5_SET(qpc, qpc, isolate_vl_tc, attr->isolate_vl_tc);
MLX5_SET(qpc, qpc, pd, attr->pdn); MLX5_SET(qpc, qpc, pd, attr->pdn);
MLX5_SET(qpc, qpc, uar_page, attr->uar->index); MLX5_SET(qpc, qpc, uar_page, attr->uar->index);
MLX5_SET(qpc, qpc, log_page_size, MLX5_SET(qpc, qpc, log_page_size,
...@@ -213,7 +216,7 @@ static void dr_destroy_qp(struct mlx5_core_dev *mdev, ...@@ -213,7 +216,7 @@ static void dr_destroy_qp(struct mlx5_core_dev *mdev,
static void dr_cmd_notify_hw(struct mlx5dr_qp *dr_qp, void *ctrl) static void dr_cmd_notify_hw(struct mlx5dr_qp *dr_qp, void *ctrl)
{ {
dma_wmb(); dma_wmb();
*dr_qp->wq.sq.db = cpu_to_be32(dr_qp->sq.pc & 0xfffff); *dr_qp->wq.sq.db = cpu_to_be32(dr_qp->sq.pc & 0xffff);
/* After wmb() the hw aware of new work */ /* After wmb() the hw aware of new work */
wmb(); wmb();
...@@ -223,7 +226,7 @@ static void dr_cmd_notify_hw(struct mlx5dr_qp *dr_qp, void *ctrl) ...@@ -223,7 +226,7 @@ static void dr_cmd_notify_hw(struct mlx5dr_qp *dr_qp, void *ctrl)
static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr, static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
u32 rkey, struct dr_data_seg *data_seg, u32 rkey, struct dr_data_seg *data_seg,
u32 opcode, int nreq) u32 opcode, bool notify_hw)
{ {
struct mlx5_wqe_raddr_seg *wq_raddr; struct mlx5_wqe_raddr_seg *wq_raddr;
struct mlx5_wqe_ctrl_seg *wq_ctrl; struct mlx5_wqe_ctrl_seg *wq_ctrl;
...@@ -255,16 +258,16 @@ static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr, ...@@ -255,16 +258,16 @@ static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
dr_qp->sq.wqe_head[idx] = dr_qp->sq.pc++; dr_qp->sq.wqe_head[idx] = dr_qp->sq.pc++;
if (nreq) if (notify_hw)
dr_cmd_notify_hw(dr_qp, wq_ctrl); dr_cmd_notify_hw(dr_qp, wq_ctrl);
} }
static void dr_post_send(struct mlx5dr_qp *dr_qp, struct postsend_info *send_info) static void dr_post_send(struct mlx5dr_qp *dr_qp, struct postsend_info *send_info)
{ {
dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey, dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
&send_info->write, MLX5_OPCODE_RDMA_WRITE, 0); &send_info->write, MLX5_OPCODE_RDMA_WRITE, false);
dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey, dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
&send_info->read, MLX5_OPCODE_RDMA_READ, 1); &send_info->read, MLX5_OPCODE_RDMA_READ, true);
} }
/** /**
...@@ -650,6 +653,7 @@ static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev, ...@@ -650,6 +653,7 @@ static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev,
attr->udp_src_port); attr->udp_src_port);
MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, attr->port_num); MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, attr->port_num);
MLX5_SET(qpc, qpc, primary_address_path.fl, attr->fl);
MLX5_SET(qpc, qpc, min_rnr_nak, 1); MLX5_SET(qpc, qpc, min_rnr_nak, 1);
MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP); MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
...@@ -658,6 +662,19 @@ static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev, ...@@ -658,6 +662,19 @@ static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev,
return mlx5_cmd_exec_in(mdev, init2rtr_qp, in); return mlx5_cmd_exec_in(mdev, init2rtr_qp, in);
} }
static bool dr_send_allow_fl(struct mlx5dr_cmd_caps *caps)
{
/* Check whether RC RoCE QP creation with force loopback is allowed.
* There are two separate capability bits for this:
* - force loopback when RoCE is enabled
* - force loopback when RoCE is disabled
*/
return ((caps->roce_caps.roce_en &&
caps->roce_caps.fl_rc_qp_when_roce_enabled) ||
(!caps->roce_caps.roce_en &&
caps->roce_caps.fl_rc_qp_when_roce_disabled));
}
static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn) static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
{ {
struct mlx5dr_qp *dr_qp = dmn->send_ring->qp; struct mlx5dr_qp *dr_qp = dmn->send_ring->qp;
...@@ -676,17 +693,26 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn) ...@@ -676,17 +693,26 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
} }
/* RTR */ /* RTR */
ret = mlx5dr_cmd_query_gid(dmn->mdev, port, gid_index, &rtr_attr.dgid_attr);
if (ret)
return ret;
rtr_attr.mtu = mtu; rtr_attr.mtu = mtu;
rtr_attr.qp_num = dr_qp->qpn; rtr_attr.qp_num = dr_qp->qpn;
rtr_attr.min_rnr_timer = 12; rtr_attr.min_rnr_timer = 12;
rtr_attr.port_num = port; rtr_attr.port_num = port;
rtr_attr.sgid_index = gid_index;
rtr_attr.udp_src_port = dmn->info.caps.roce_min_src_udp; rtr_attr.udp_src_port = dmn->info.caps.roce_min_src_udp;
/* If QP creation with force loopback is allowed, then there
* is no need for GID index when creating the QP.
* Otherwise we query GID attributes and use GID index.
*/
rtr_attr.fl = dr_send_allow_fl(&dmn->info.caps);
if (!rtr_attr.fl) {
ret = mlx5dr_cmd_query_gid(dmn->mdev, port, gid_index,
&rtr_attr.dgid_attr);
if (ret)
return ret;
rtr_attr.sgid_index = gid_index;
}
ret = dr_cmd_modify_qp_init2rtr(dmn->mdev, dr_qp, &rtr_attr); ret = dr_cmd_modify_qp_init2rtr(dmn->mdev, dr_qp, &rtr_attr);
if (ret) { if (ret) {
mlx5dr_err(dmn, "Failed modify QP init2rtr\n"); mlx5dr_err(dmn, "Failed modify QP init2rtr\n");
...@@ -900,6 +926,11 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn) ...@@ -900,6 +926,11 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
init_attr.pdn = dmn->pdn; init_attr.pdn = dmn->pdn;
init_attr.uar = dmn->uar; init_attr.uar = dmn->uar;
init_attr.max_send_wr = QUEUE_SIZE; init_attr.max_send_wr = QUEUE_SIZE;
/* Isolated VL is applicable only if force loopback is supported */
if (dr_send_allow_fl(&dmn->info.caps))
init_attr.isolate_vl_tc = dmn->info.caps.isolate_vl_tc;
spin_lock_init(&dmn->send_ring->lock); spin_lock_init(&dmn->send_ring->lock);
dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr); dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr);
......
...@@ -852,6 +852,35 @@ static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec) ...@@ -852,6 +852,35 @@ static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec)
spec->icmpv4_code = MLX5_GET(fte_match_set_misc3, mask, icmp_code); spec->icmpv4_code = MLX5_GET(fte_match_set_misc3, mask, icmp_code);
spec->icmpv6_type = MLX5_GET(fte_match_set_misc3, mask, icmpv6_type); spec->icmpv6_type = MLX5_GET(fte_match_set_misc3, mask, icmpv6_type);
spec->icmpv6_code = MLX5_GET(fte_match_set_misc3, mask, icmpv6_code); spec->icmpv6_code = MLX5_GET(fte_match_set_misc3, mask, icmpv6_code);
spec->geneve_tlv_option_0_data =
MLX5_GET(fte_match_set_misc3, mask, geneve_tlv_option_0_data);
spec->gtpu_msg_flags = MLX5_GET(fte_match_set_misc3, mask, gtpu_msg_flags);
spec->gtpu_msg_type = MLX5_GET(fte_match_set_misc3, mask, gtpu_msg_type);
spec->gtpu_teid = MLX5_GET(fte_match_set_misc3, mask, gtpu_teid);
spec->gtpu_dw_0 = MLX5_GET(fte_match_set_misc3, mask, gtpu_dw_0);
spec->gtpu_dw_2 = MLX5_GET(fte_match_set_misc3, mask, gtpu_dw_2);
spec->gtpu_first_ext_dw_0 =
MLX5_GET(fte_match_set_misc3, mask, gtpu_first_ext_dw_0);
}
static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec)
{
spec->prog_sample_field_id_0 =
MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_0);
spec->prog_sample_field_value_0 =
MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_0);
spec->prog_sample_field_id_1 =
MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_1);
spec->prog_sample_field_value_1 =
MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_1);
spec->prog_sample_field_id_2 =
MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_2);
spec->prog_sample_field_value_2 =
MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_2);
spec->prog_sample_field_id_3 =
MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_3);
spec->prog_sample_field_value_3 =
MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_3);
} }
void mlx5dr_ste_copy_param(u8 match_criteria, void mlx5dr_ste_copy_param(u8 match_criteria,
...@@ -925,6 +954,20 @@ void mlx5dr_ste_copy_param(u8 match_criteria, ...@@ -925,6 +954,20 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
} }
dr_ste_copy_mask_misc3(buff, &set_param->misc3); dr_ste_copy_mask_misc3(buff, &set_param->misc3);
} }
param_location += sizeof(struct mlx5dr_match_misc3);
if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
if (mask->match_sz < param_location +
sizeof(struct mlx5dr_match_misc4)) {
memcpy(tail_param, data + param_location,
mask->match_sz - param_location);
buff = tail_param;
} else {
buff = data + param_location;
}
dr_ste_copy_mask_misc4(buff, &set_param->misc4);
}
} }
void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx, void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
...@@ -1051,26 +1094,40 @@ void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx, ...@@ -1051,26 +1094,40 @@ void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
ste_ctx->build_tnl_gre_init(sb, mask); ste_ctx->build_tnl_gre_init(sb, mask);
} }
void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx, void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb, struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, struct mlx5dr_match_param *mask,
bool inner, bool rx) struct mlx5dr_cmd_caps *caps,
bool inner, bool rx)
{
sb->rx = rx;
sb->inner = inner;
sb->caps = caps;
return ste_ctx->build_tnl_mpls_over_gre_init(sb, mask);
}
void mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx)
{ {
sb->rx = rx; sb->rx = rx;
sb->inner = inner; sb->inner = inner;
ste_ctx->build_tnl_mpls_init(sb, mask); sb->caps = caps;
return ste_ctx->build_tnl_mpls_over_udp_init(sb, mask);
} }
int mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx, void mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb, struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps, struct mlx5dr_cmd_caps *caps,
bool inner, bool rx) bool inner, bool rx)
{ {
sb->rx = rx; sb->rx = rx;
sb->inner = inner; sb->inner = inner;
sb->caps = caps; sb->caps = caps;
return ste_ctx->build_icmp_init(sb, mask); ste_ctx->build_icmp_init(sb, mask);
} }
void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx, void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
...@@ -1113,6 +1170,52 @@ void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx, ...@@ -1113,6 +1170,52 @@ void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
ste_ctx->build_tnl_geneve_init(sb, mask); ste_ctx->build_tnl_geneve_init(sb, mask);
} }
void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx)
{
sb->rx = rx;
sb->caps = caps;
sb->inner = inner;
ste_ctx->build_tnl_geneve_tlv_opt_init(sb, mask);
}
void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
sb->rx = rx;
sb->inner = inner;
ste_ctx->build_tnl_gtpu_init(sb, mask);
}
void mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx)
{
sb->rx = rx;
sb->caps = caps;
sb->inner = inner;
ste_ctx->build_tnl_gtpu_flex_parser_0_init(sb, mask);
}
void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx)
{
sb->rx = rx;
sb->caps = caps;
sb->inner = inner;
ste_ctx->build_tnl_gtpu_flex_parser_1_init(sb, mask);
}
void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx, void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb, struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, struct mlx5dr_match_param *mask,
...@@ -1148,6 +1251,26 @@ void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx, ...@@ -1148,6 +1251,26 @@ void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
ste_ctx->build_src_gvmi_qpn_init(sb, mask); ste_ctx->build_src_gvmi_qpn_init(sb, mask);
} }
void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
sb->rx = rx;
sb->inner = inner;
ste_ctx->build_flex_parser_0_init(sb, mask);
}
void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
sb->rx = rx;
sb->inner = inner;
ste_ctx->build_flex_parser_1_init(sb, mask);
}
static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = { static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = {
[MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0, [MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0,
[MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1, [MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1,
......
...@@ -62,6 +62,13 @@ ...@@ -62,6 +62,13 @@
in_out##_first_mpls_ttl); \ in_out##_first_mpls_ttl); \
} while (0) } while (0)
#define DR_STE_SET_FLEX_PARSER_FIELD(tag, fname, caps, spec) do { \
u8 parser_id = (caps)->flex_parser_id_##fname; \
u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id); \
*(__be32 *)parser_ptr = cpu_to_be32((spec)->fname);\
(spec)->fname = 0;\
} while (0)
#define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\ #define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
(_misc)->outer_first_mpls_over_gre_label || \ (_misc)->outer_first_mpls_over_gre_label || \
(_misc)->outer_first_mpls_over_gre_exp || \ (_misc)->outer_first_mpls_over_gre_exp || \
...@@ -86,8 +93,22 @@ enum dr_ste_action_modify_type_l4 { ...@@ -86,8 +93,22 @@ enum dr_ste_action_modify_type_l4 {
DR_STE_ACTION_MDFY_TYPE_L4_UDP = 0x2, DR_STE_ACTION_MDFY_TYPE_L4_UDP = 0x2,
}; };
enum {
HDR_MPLS_OFFSET_LABEL = 12,
HDR_MPLS_OFFSET_EXP = 9,
HDR_MPLS_OFFSET_S_BOS = 8,
HDR_MPLS_OFFSET_TTL = 0,
};
u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask); u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask);
static inline u8 *
dr_ste_calc_flex_parser_offset(u8 *tag, u8 parser_id)
{
/* Calculate tag byte offset based on flex parser id */
return tag + 4 * (3 - (parser_id % 4));
}
#define DR_STE_CTX_BUILDER(fname) \ #define DR_STE_CTX_BUILDER(fname) \
((*build_##fname##_init)(struct mlx5dr_ste_build *sb, \ ((*build_##fname##_init)(struct mlx5dr_ste_build *sb, \
struct mlx5dr_match_param *mask)) struct mlx5dr_match_param *mask))
...@@ -106,14 +127,22 @@ struct mlx5dr_ste_ctx { ...@@ -106,14 +127,22 @@ struct mlx5dr_ste_ctx {
void DR_STE_CTX_BUILDER(mpls); void DR_STE_CTX_BUILDER(mpls);
void DR_STE_CTX_BUILDER(tnl_gre); void DR_STE_CTX_BUILDER(tnl_gre);
void DR_STE_CTX_BUILDER(tnl_mpls); void DR_STE_CTX_BUILDER(tnl_mpls);
int DR_STE_CTX_BUILDER(icmp); void DR_STE_CTX_BUILDER(tnl_mpls_over_gre);
void DR_STE_CTX_BUILDER(tnl_mpls_over_udp);
void DR_STE_CTX_BUILDER(icmp);
void DR_STE_CTX_BUILDER(general_purpose); void DR_STE_CTX_BUILDER(general_purpose);
void DR_STE_CTX_BUILDER(eth_l4_misc); void DR_STE_CTX_BUILDER(eth_l4_misc);
void DR_STE_CTX_BUILDER(tnl_vxlan_gpe); void DR_STE_CTX_BUILDER(tnl_vxlan_gpe);
void DR_STE_CTX_BUILDER(tnl_geneve); void DR_STE_CTX_BUILDER(tnl_geneve);
void DR_STE_CTX_BUILDER(tnl_geneve_tlv_opt);
void DR_STE_CTX_BUILDER(register_0); void DR_STE_CTX_BUILDER(register_0);
void DR_STE_CTX_BUILDER(register_1); void DR_STE_CTX_BUILDER(register_1);
void DR_STE_CTX_BUILDER(src_gvmi_qpn); void DR_STE_CTX_BUILDER(src_gvmi_qpn);
void DR_STE_CTX_BUILDER(flex_parser_0);
void DR_STE_CTX_BUILDER(flex_parser_1);
void DR_STE_CTX_BUILDER(tnl_gtpu);
void DR_STE_CTX_BUILDER(tnl_gtpu_flex_parser_0);
void DR_STE_CTX_BUILDER(tnl_gtpu_flex_parser_1);
/* Getters and Setters */ /* Getters and Setters */
void (*ste_init)(u8 *hw_ste_p, u16 lu_type, void (*ste_init)(u8 *hw_ste_p, u16 lu_type,
......
...@@ -12,17 +12,30 @@ ...@@ -12,17 +12,30 @@
#include "mlx5_ifc_dr.h" #include "mlx5_ifc_dr.h"
#include "mlx5dr.h" #include "mlx5dr.h"
#define DR_RULE_MAX_STES 17 #define DR_RULE_MAX_STES 18
#define DR_ACTION_MAX_STES 5 #define DR_ACTION_MAX_STES 5
#define WIRE_PORT 0xFFFF #define WIRE_PORT 0xFFFF
#define DR_STE_SVLAN 0x1 #define DR_STE_SVLAN 0x1
#define DR_STE_CVLAN 0x2 #define DR_STE_CVLAN 0x2
#define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4) #define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4)
#define DR_NUM_OF_FLEX_PARSERS 8
#define DR_STE_MAX_FLEX_0_ID 3
#define DR_STE_MAX_FLEX_1_ID 7
#define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg) #define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg)
#define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg) #define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg)
#define mlx5dr_dbg(dmn, arg...) mlx5_core_dbg((dmn)->mdev, ##arg) #define mlx5dr_dbg(dmn, arg...) mlx5_core_dbg((dmn)->mdev, ##arg)
static inline bool dr_is_flex_parser_0_id(u8 parser_id)
{
return parser_id <= DR_STE_MAX_FLEX_0_ID;
}
static inline bool dr_is_flex_parser_1_id(u8 parser_id)
{
return parser_id > DR_STE_MAX_FLEX_0_ID;
}
enum mlx5dr_icm_chunk_size { enum mlx5dr_icm_chunk_size {
DR_CHUNK_SIZE_1, DR_CHUNK_SIZE_1,
DR_CHUNK_SIZE_MIN = DR_CHUNK_SIZE_1, /* keep updated when changing */ DR_CHUNK_SIZE_MIN = DR_CHUNK_SIZE_1, /* keep updated when changing */
...@@ -87,7 +100,8 @@ enum mlx5dr_matcher_criteria { ...@@ -87,7 +100,8 @@ enum mlx5dr_matcher_criteria {
DR_MATCHER_CRITERIA_INNER = 1 << 2, DR_MATCHER_CRITERIA_INNER = 1 << 2,
DR_MATCHER_CRITERIA_MISC2 = 1 << 3, DR_MATCHER_CRITERIA_MISC2 = 1 << 3,
DR_MATCHER_CRITERIA_MISC3 = 1 << 4, DR_MATCHER_CRITERIA_MISC3 = 1 << 4,
DR_MATCHER_CRITERIA_MAX = 1 << 5, DR_MATCHER_CRITERIA_MISC4 = 1 << 5,
DR_MATCHER_CRITERIA_MAX = 1 << 6,
}; };
enum mlx5dr_action_type { enum mlx5dr_action_type {
...@@ -389,11 +403,21 @@ void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx, ...@@ -389,11 +403,21 @@ void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb, struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, struct mlx5dr_match_param *mask,
bool inner, bool rx); bool inner, bool rx);
int mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx, void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb, struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps, struct mlx5dr_cmd_caps *caps,
bool inner, bool rx); bool inner, bool rx);
void mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx);
void mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx);
void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx, void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb, struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, struct mlx5dr_match_param *mask,
...@@ -402,6 +426,25 @@ void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx, ...@@ -402,6 +426,25 @@ void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb, struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, struct mlx5dr_match_param *mask,
bool inner, bool rx); bool inner, bool rx);
void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx);
void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx);
void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx);
void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx, void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb, struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, struct mlx5dr_match_param *mask,
...@@ -419,6 +462,14 @@ void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx, ...@@ -419,6 +462,14 @@ void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_match_param *mask, struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn, struct mlx5dr_domain *dmn,
bool inner, bool rx); bool inner, bool rx);
void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx); void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx);
/* Actions utils */ /* Actions utils */
...@@ -646,7 +697,24 @@ struct mlx5dr_match_misc3 { ...@@ -646,7 +697,24 @@ struct mlx5dr_match_misc3 {
u8 icmpv6_type; u8 icmpv6_type;
u8 icmpv4_code; u8 icmpv4_code;
u8 icmpv4_type; u8 icmpv4_type;
u8 reserved_auto3[0x1c]; u32 geneve_tlv_option_0_data;
u8 gtpu_msg_flags;
u8 gtpu_msg_type;
u32 gtpu_teid;
u32 gtpu_dw_2;
u32 gtpu_first_ext_dw_0;
u32 gtpu_dw_0;
};
struct mlx5dr_match_misc4 {
u32 prog_sample_field_value_0;
u32 prog_sample_field_id_0;
u32 prog_sample_field_value_1;
u32 prog_sample_field_id_1;
u32 prog_sample_field_value_2;
u32 prog_sample_field_id_2;
u32 prog_sample_field_value_3;
u32 prog_sample_field_id_3;
}; };
struct mlx5dr_match_param { struct mlx5dr_match_param {
...@@ -655,6 +723,7 @@ struct mlx5dr_match_param { ...@@ -655,6 +723,7 @@ struct mlx5dr_match_param {
struct mlx5dr_match_spec inner; struct mlx5dr_match_spec inner;
struct mlx5dr_match_misc2 misc2; struct mlx5dr_match_misc2 misc2;
struct mlx5dr_match_misc3 misc3; struct mlx5dr_match_misc3 misc3;
struct mlx5dr_match_misc4 misc4;
}; };
#define DR_MASK_IS_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \ #define DR_MASK_IS_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \
...@@ -678,6 +747,12 @@ struct mlx5dr_cmd_vport_cap { ...@@ -678,6 +747,12 @@ struct mlx5dr_cmd_vport_cap {
u32 num; u32 num;
}; };
struct mlx5dr_roce_cap {
u8 roce_en:1;
u8 fl_rc_qp_when_roce_disabled:1;
u8 fl_rc_qp_when_roce_enabled:1;
};
struct mlx5dr_cmd_caps { struct mlx5dr_cmd_caps {
u16 gvmi; u16 gvmi;
u64 nic_rx_drop_address; u64 nic_rx_drop_address;
...@@ -692,6 +767,13 @@ struct mlx5dr_cmd_caps { ...@@ -692,6 +767,13 @@ struct mlx5dr_cmd_caps {
u8 flex_parser_id_icmp_dw1; u8 flex_parser_id_icmp_dw1;
u8 flex_parser_id_icmpv6_dw0; u8 flex_parser_id_icmpv6_dw0;
u8 flex_parser_id_icmpv6_dw1; u8 flex_parser_id_icmpv6_dw1;
u8 flex_parser_id_geneve_tlv_option_0;
u8 flex_parser_id_mpls_over_gre;
u8 flex_parser_id_mpls_over_udp;
u8 flex_parser_id_gtpu_dw_0;
u8 flex_parser_id_gtpu_teid;
u8 flex_parser_id_gtpu_dw_2;
u8 flex_parser_id_gtpu_first_ext_dw_0;
u8 max_ft_level; u8 max_ft_level;
u16 roce_min_src_udp; u16 roce_min_src_udp;
u8 num_esw_ports; u8 num_esw_ports;
...@@ -707,6 +789,8 @@ struct mlx5dr_cmd_caps { ...@@ -707,6 +789,8 @@ struct mlx5dr_cmd_caps {
struct mlx5dr_esw_caps esw_caps; struct mlx5dr_esw_caps esw_caps;
struct mlx5dr_cmd_vport_cap *vports_caps; struct mlx5dr_cmd_vport_cap *vports_caps;
bool prio_tag_required; bool prio_tag_required;
struct mlx5dr_roce_cap roce_caps;
u8 isolate_vl_tc:1;
}; };
struct mlx5dr_domain_rx_tx { struct mlx5dr_domain_rx_tx {
...@@ -1081,6 +1165,7 @@ struct mlx5dr_cmd_qp_create_attr { ...@@ -1081,6 +1165,7 @@ struct mlx5dr_cmd_qp_create_attr {
u32 sq_wqe_cnt; u32 sq_wqe_cnt;
u32 rq_wqe_cnt; u32 rq_wqe_cnt;
u32 rq_wqe_shift; u32 rq_wqe_shift;
u8 isolate_vl_tc:1;
}; };
int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num, int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
......
...@@ -434,10 +434,7 @@ struct mlx5_ifc_ste_gre_bits { ...@@ -434,10 +434,7 @@ struct mlx5_ifc_ste_gre_bits {
}; };
struct mlx5_ifc_ste_flex_parser_0_bits { struct mlx5_ifc_ste_flex_parser_0_bits {
u8 parser_3_label[0x14]; u8 flex_parser_3[0x20];
u8 parser_3_exp[0x3];
u8 parser_3_s_bos[0x1];
u8 parser_3_ttl[0x8];
u8 flex_parser_2[0x20]; u8 flex_parser_2[0x20];
...@@ -488,6 +485,17 @@ struct mlx5_ifc_ste_flex_parser_tnl_geneve_bits { ...@@ -488,6 +485,17 @@ struct mlx5_ifc_ste_flex_parser_tnl_geneve_bits {
u8 reserved_at_40[0x40]; u8 reserved_at_40[0x40];
}; };
struct mlx5_ifc_ste_flex_parser_tnl_gtpu_bits {
u8 reserved_at_0[0x5];
u8 gtpu_msg_flags[0x3];
u8 gtpu_msg_type[0x8];
u8 reserved_at_10[0x10];
u8 gtpu_teid[0x20];
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_ste_general_purpose_bits { struct mlx5_ifc_ste_general_purpose_bits {
u8 general_purpose_lookup_field[0x20]; u8 general_purpose_lookup_field[0x20];
......
...@@ -911,8 +911,11 @@ static inline u16 get_cqe_flow_tag(struct mlx5_cqe64 *cqe) ...@@ -911,8 +911,11 @@ static inline u16 get_cqe_flow_tag(struct mlx5_cqe64 *cqe)
return be32_to_cpu(cqe->sop_drop_qpn) & 0xFFF; return be32_to_cpu(cqe->sop_drop_qpn) & 0xFFF;
} }
#define MLX5_MPWQE_LOG_NUM_STRIDES_BASE (9) #define MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE 3
#define MLX5_MPWQE_LOG_STRIDE_SZ_BASE (6) #define MLX5_MPWQE_LOG_NUM_STRIDES_BASE 9
#define MLX5_MPWQE_LOG_NUM_STRIDES_MAX 16
#define MLX5_MPWQE_LOG_STRIDE_SZ_BASE 6
#define MLX5_MPWQE_LOG_STRIDE_SZ_MAX 13
struct mpwrq_cqe_bc { struct mpwrq_cqe_bc {
__be16 filler_consumed_strides; __be16 filler_consumed_strides;
......
...@@ -622,7 +622,19 @@ struct mlx5_ifc_fte_match_set_misc3_bits { ...@@ -622,7 +622,19 @@ struct mlx5_ifc_fte_match_set_misc3_bits {
u8 geneve_tlv_option_0_data[0x20]; u8 geneve_tlv_option_0_data[0x20];
u8 reserved_at_140[0xc0]; u8 gtpu_teid[0x20];
u8 gtpu_msg_type[0x8];
u8 gtpu_msg_flags[0x8];
u8 reserved_at_170[0x10];
u8 gtpu_dw_2[0x20];
u8 gtpu_first_ext_dw_0[0x20];
u8 gtpu_dw_0[0x20];
u8 reserved_at_1e0[0x20];
}; };
struct mlx5_ifc_fte_match_set_misc4_bits { struct mlx5_ifc_fte_match_set_misc4_bits {
...@@ -949,7 +961,9 @@ struct mlx5_ifc_roce_cap_bits { ...@@ -949,7 +961,9 @@ struct mlx5_ifc_roce_cap_bits {
u8 roce_apm[0x1]; u8 roce_apm[0x1];
u8 reserved_at_1[0x3]; u8 reserved_at_1[0x3];
u8 sw_r_roce_src_udp_port[0x1]; u8 sw_r_roce_src_udp_port[0x1];
u8 reserved_at_5[0x19]; u8 fl_rc_qp_when_roce_disabled[0x1];
u8 fl_rc_qp_when_roce_enabled[0x1];
u8 reserved_at_7[0x17];
u8 qp_ts_format[0x2]; u8 qp_ts_format[0x2];
u8 reserved_at_20[0x60]; u8 reserved_at_20[0x60];
...@@ -1237,9 +1251,17 @@ enum { ...@@ -1237,9 +1251,17 @@ enum {
enum { enum {
MLX5_FLEX_PARSER_GENEVE_ENABLED = 1 << 3, MLX5_FLEX_PARSER_GENEVE_ENABLED = 1 << 3,
MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED = 1 << 4,
mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED = 1 << 5,
MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7, MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7,
MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8, MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8,
MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9, MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9,
MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED = 1 << 10,
MLX5_FLEX_PARSER_GTPU_ENABLED = 1 << 11,
MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED = 1 << 16,
MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED = 1 << 17,
MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED = 1 << 18,
MLX5_FLEX_PARSER_GTPU_TEID_ENABLED = 1 << 19,
}; };
enum { enum {
...@@ -1297,7 +1319,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -1297,7 +1319,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_srq_sz[0x8]; u8 log_max_srq_sz[0x8];
u8 log_max_qp_sz[0x8]; u8 log_max_qp_sz[0x8];
u8 event_cap[0x1]; u8 event_cap[0x1];
u8 reserved_at_91[0x7]; u8 reserved_at_91[0x2];
u8 isolate_vl_tc_new[0x1];
u8 reserved_at_94[0x4];
u8 prio_tag_required[0x1]; u8 prio_tag_required[0x1];
u8 reserved_at_99[0x2]; u8 reserved_at_99[0x2];
u8 log_max_qp[0x5]; u8 log_max_qp[0x5];
...@@ -1637,7 +1661,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -1637,7 +1661,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 cqe_compression_timeout[0x10]; u8 cqe_compression_timeout[0x10];
u8 cqe_compression_max_num[0x10]; u8 cqe_compression_max_num[0x10];
u8 reserved_at_5e0[0x10]; u8 reserved_at_5e0[0x8];
u8 flex_parser_id_gtpu_dw_0[0x4];
u8 reserved_at_5ec[0x4];
u8 tag_matching[0x1]; u8 tag_matching[0x1];
u8 rndv_offload_rc[0x1]; u8 rndv_offload_rc[0x1];
u8 rndv_offload_dc[0x1]; u8 rndv_offload_dc[0x1];
...@@ -1648,7 +1674,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -1648,7 +1674,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 affiliate_nic_vport_criteria[0x8]; u8 affiliate_nic_vport_criteria[0x8];
u8 native_port_num[0x8]; u8 native_port_num[0x8];
u8 num_vhca_ports[0x8]; u8 num_vhca_ports[0x8];
u8 reserved_at_618[0x6]; u8 flex_parser_id_gtpu_teid[0x4];
u8 reserved_at_61c[0x2];
u8 sw_owner_id[0x1]; u8 sw_owner_id[0x1];
u8 reserved_at_61f[0x1]; u8 reserved_at_61f[0x1];
...@@ -1683,7 +1710,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -1683,7 +1710,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_6e0[0x10]; u8 reserved_at_6e0[0x10];
u8 sf_base_id[0x10]; u8 sf_base_id[0x10];
u8 reserved_at_700[0x8]; u8 flex_parser_id_gtpu_dw_2[0x4];
u8 flex_parser_id_gtpu_first_ext_dw_0[0x4];
u8 num_total_dynamic_vf_msix[0x18]; u8 num_total_dynamic_vf_msix[0x18];
u8 reserved_at_720[0x14]; u8 reserved_at_720[0x14];
u8 dynamic_msix_table_size[0xc]; u8 dynamic_msix_table_size[0xc];
...@@ -2918,7 +2946,8 @@ struct mlx5_ifc_qpc_bits { ...@@ -2918,7 +2946,8 @@ struct mlx5_ifc_qpc_bits {
u8 state[0x4]; u8 state[0x4];
u8 lag_tx_port_affinity[0x4]; u8 lag_tx_port_affinity[0x4];
u8 st[0x8]; u8 st[0x8];
u8 reserved_at_10[0x3]; u8 reserved_at_10[0x2];
u8 isolate_vl_tc[0x1];
u8 pm_state[0x2]; u8 pm_state[0x2];
u8 reserved_at_15[0x1]; u8 reserved_at_15[0x1];
u8 req_e2e_credit_mode[0x2]; u8 req_e2e_credit_mode[0x2];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment