Commit 1a2b60f6 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-dr-2021-01-29' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-dr-2021-01-29

Add support for Connect-X6DX Software steering

This series adds SW Steering support for Connect-X6DX.

Since the STE and actions formats are different on this new HW,
we implemented the HW specific STEv1 layer on the infrastructure
implemented in previous mlx5 DR patchset to support all the
functionalities as previous devices.

Most of the code in this series very is low level HW specific, we
implement the function pointers for the generic SW steering layer.

* tag 'mlx5-dr-2021-01-29' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5: DR, Allow SW steering for sw_owner_v2 devices
  net/mlx5: DR, Copy all 64B whenever replacing STE in the head of miss-list
  net/mlx5: DR, Use HW specific logic API when writing STE
  net/mlx5: DR, Use the right size when writing partial STE into HW
  net/mlx5: DR, Add STEv1 modify header logic
  net/mlx5: DR, Add STEv1 action apply logic
  net/mlx5: DR, Add STEv1 setters and getters
  net/mlx5: DR, Allow native protocol support for HW STEv1
  net/mlx5: DR, Add HW STEv1 match logic
  net/mlx5: DR, Add match STEv1 structs to ifc
  net/mlx5: DR, Fix potential shift wrapping of 32-bit value
====================

Link: https://lore.kernel.org/r/20210130022618.317351-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents f222a993 64f45c0f
......@@ -85,7 +85,7 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
steering/dr_matcher.o steering/dr_rule.o \
steering/dr_icm_pool.o steering/dr_buddy.o \
steering/dr_ste.o steering/dr_send.o \
steering/dr_ste_v0.o \
steering/dr_ste_v0.o steering/dr_ste_v1.o \
steering/dr_cmd.o steering/dr_fw.o \
steering/dr_action.o steering/fs_dr.o
#
......
......@@ -78,9 +78,9 @@ int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
caps->uplink_icm_address_tx =
MLX5_CAP64_ESW_FLOWTABLE(mdev,
sw_steering_uplink_icm_address_tx);
caps->sw_owner =
MLX5_CAP_ESW_FLOWTABLE_FDB(mdev,
sw_owner);
caps->sw_owner_v2 = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner_v2);
if (!caps->sw_owner_v2)
caps->sw_owner = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner);
return 0;
}
......@@ -113,10 +113,15 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
caps->nic_tx_allow_address =
MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_allow_icm_address);
caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner);
caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level);
caps->rx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner_v2);
caps->tx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner_v2);
if (!caps->rx_sw_owner_v2)
caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner);
if (!caps->tx_sw_owner_v2)
caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner);
caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner);
caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level);
caps->log_icm_size = MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size);
caps->hdr_modify_icm_addr =
......
......@@ -4,6 +4,11 @@
#include <linux/mlx5/eswitch.h>
#include "dr_types.h"
#define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
((dmn)->info.caps.dmn_type##_sw_owner || \
((dmn)->info.caps.dmn_type##_sw_owner_v2 && \
(dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX))
static int dr_domain_init_cache(struct mlx5dr_domain *dmn)
{
/* Per vport cached FW FT for checksum recalculation, this
......@@ -187,6 +192,7 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
return ret;
dmn->info.caps.fdb_sw_owner = dmn->info.caps.esw_caps.sw_owner;
dmn->info.caps.fdb_sw_owner_v2 = dmn->info.caps.esw_caps.sw_owner_v2;
dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
......@@ -229,18 +235,13 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
if (ret)
return ret;
if (dmn->info.caps.sw_format_ver != MLX5_STEERING_FORMAT_CONNECTX_5) {
mlx5dr_err(dmn, "SW steering is not supported on this device\n");
return -EOPNOTSUPP;
}
ret = dr_domain_query_fdb_caps(mdev, dmn);
if (ret)
return ret;
switch (dmn->type) {
case MLX5DR_DOMAIN_TYPE_NIC_RX:
if (!dmn->info.caps.rx_sw_owner)
if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, rx))
return -ENOTSUPP;
dmn->info.supp_sw_steering = true;
......@@ -249,7 +250,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address;
break;
case MLX5DR_DOMAIN_TYPE_NIC_TX:
if (!dmn->info.caps.tx_sw_owner)
if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, tx))
return -ENOTSUPP;
dmn->info.supp_sw_steering = true;
......@@ -261,7 +262,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
if (!dmn->info.caps.eswitch_manager)
return -ENOTSUPP;
if (!dmn->info.caps.fdb_sw_owner)
if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb))
return -ENOTSUPP;
dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX;
......
......@@ -113,7 +113,8 @@ dr_mask_is_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3)
static bool
dr_matcher_supp_vxlan_gpe(struct mlx5dr_cmd_caps *caps)
{
return caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED;
return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
(caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED);
}
static bool
......@@ -135,7 +136,8 @@ static bool dr_mask_is_tnl_geneve_set(struct mlx5dr_match_misc *misc)
static bool
dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps)
{
return caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_ENABLED;
return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
(caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_ENABLED);
}
static bool
......@@ -148,12 +150,14 @@ dr_mask_is_tnl_geneve(struct mlx5dr_match_param *mask,
static int dr_matcher_supp_icmp_v4(struct mlx5dr_cmd_caps *caps)
{
return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED;
return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED);
}
static int dr_matcher_supp_icmp_v6(struct mlx5dr_cmd_caps *caps)
{
return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED;
return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED);
}
static bool dr_mask_is_icmpv6_set(struct mlx5dr_match_misc3 *misc3)
......
......@@ -30,7 +30,7 @@ static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx,
mlx5dr_ste_get_icm_addr(new_last_ste));
list_add_tail(&new_last_ste->miss_list_node, miss_list);
mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_REDUCED,
mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_CTRL,
0, last_ste->hw_ste,
ste_info_last, send_list, true);
......@@ -106,14 +106,19 @@ dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
int ret;
list_del(&ste_info->send_list);
/* Copy data to ste, only reduced size or control, the last 16B (mask)
* is already written to the hw.
*/
if (ste_info->size == DR_STE_SIZE_CTRL)
memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_CTRL);
else
memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_REDUCED);
ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data,
ste_info->size, ste_info->offset);
if (ret)
goto out;
/* Copy data to ste, only reduced size, the last 16B (mask)
* is already written to the hw.
*/
memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_REDUCED);
out:
kfree(ste_info);
......@@ -456,7 +461,7 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
ste_to_update = cur_htbl->pointing_ste;
}
mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_REDUCED,
mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_CTRL,
0, ste_to_update->hw_ste, ste_info,
update_list, false);
......
......@@ -431,6 +431,8 @@ int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn, struct mlx5dr_ste *ste,
{
struct postsend_info send_info = {};
mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, data, size);
send_info.write.addr = (uintptr_t)data;
send_info.write.length = size;
send_info.write.lkey = 0;
......@@ -457,6 +459,8 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
if (ret)
return ret;
mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, formatted_ste, DR_STE_SIZE);
/* Send the data iteration times */
for (i = 0; i < iterations; i++) {
u32 ste_index = i * (byte_size / DR_STE_SIZE);
......@@ -480,6 +484,10 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
/* Copy bit_mask */
memcpy(data + ste_off + DR_STE_SIZE_REDUCED,
mask, DR_STE_SIZE_MASK);
/* Only when we have mask we need to re-arrange the STE */
mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx,
data + (j * DR_STE_SIZE),
DR_STE_SIZE);
}
}
......@@ -509,6 +517,7 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
u32 byte_size = htbl->chunk->byte_size;
int iterations;
int num_stes;
u8 *copy_dst;
u8 *data;
int ret;
int i;
......@@ -518,20 +527,22 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
if (ret)
return ret;
for (i = 0; i < num_stes; i++) {
u8 *copy_dst;
/* Copy the same ste on the data buffer */
copy_dst = data + i * DR_STE_SIZE;
memcpy(copy_dst, ste_init_data, DR_STE_SIZE);
if (update_hw_ste) {
/* Copy the reduced ste to hash table ste_arr */
if (update_hw_ste) {
/* Copy the reduced STE to hash table ste_arr */
for (i = 0; i < num_stes; i++) {
copy_dst = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
memcpy(copy_dst, ste_init_data, DR_STE_SIZE_REDUCED);
}
}
mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, ste_init_data, DR_STE_SIZE);
/* Copy the same STE on the data buffer */
for (i = 0; i < num_stes; i++) {
copy_dst = data + i * DR_STE_SIZE;
memcpy(copy_dst, ste_init_data, DR_STE_SIZE);
}
/* Send the data iteration times */
for (i = 0; i < iterations; i++) {
u8 ste_index = i * (byte_size / DR_STE_SIZE);
......
......@@ -211,13 +211,17 @@ dr_ste_remove_head_ste(struct mlx5dr_ste_ctx *ste_ctx,
* |_ste_| --> |_next_ste_| -->|__| -->|__| -->/0
*/
static void
dr_ste_replace_head_ste(struct mlx5dr_ste *ste, struct mlx5dr_ste *next_ste,
dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher,
struct mlx5dr_ste *ste,
struct mlx5dr_ste *next_ste,
struct mlx5dr_ste_send_info *ste_info_head,
struct list_head *send_ste_list,
struct mlx5dr_ste_htbl *stats_tbl)
{
struct mlx5dr_ste_htbl *next_miss_htbl;
u8 hw_ste[DR_STE_SIZE] = {};
int sb_idx;
next_miss_htbl = next_ste->htbl;
......@@ -230,13 +234,19 @@ dr_ste_replace_head_ste(struct mlx5dr_ste *ste, struct mlx5dr_ste *next_ste,
/* Move data from next into ste */
dr_ste_replace(ste, next_ste);
/* Copy all 64 hw_ste bytes */
memcpy(hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
sb_idx = ste->ste_chain_location - 1;
mlx5dr_ste_set_bit_mask(hw_ste,
nic_matcher->ste_builder[sb_idx].bit_mask);
/* Del the htbl that contains the next_ste.
* The origin htbl stay with the same number of entries.
*/
mlx5dr_htbl_put(next_miss_htbl);
mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE_REDUCED,
0, ste->hw_ste,
mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
0, hw_ste,
ste_info_head,
send_ste_list,
true /* Copy data */);
......@@ -264,7 +274,7 @@ static void dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx *ste_ctx,
miss_addr = ste_ctx->get_miss_addr(ste->hw_ste);
ste_ctx->set_miss_addr(prev_ste->hw_ste, miss_addr);
mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_REDUCED, 0,
mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_CTRL, 0,
prev_ste->hw_ste, ste_info,
send_ste_list, true /* Copy data*/);
......@@ -316,7 +326,8 @@ void mlx5dr_ste_free(struct mlx5dr_ste *ste,
stats_tbl);
} else {
/* First but not only entry in the list */
dr_ste_replace_head_ste(ste, next_ste, &ste_info_head,
dr_ste_replace_head_ste(nic_matcher, ste,
next_ste, &ste_info_head,
&send_ste_list, stats_tbl);
put_on_origin_table = false;
}
......@@ -356,6 +367,13 @@ void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
}
void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
u8 *hw_ste_p, u32 ste_size)
{
if (ste_ctx->prepare_for_postsend)
ste_ctx->prepare_for_postsend(hw_ste_p, ste_size);
}
/* Init one ste as a pattern for ste data array */
void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
u16 gvmi,
......@@ -1127,7 +1145,7 @@ void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = {
[MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0,
[MLX5_STEERING_FORMAT_CONNECTX_6DX] = NULL,
[MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1,
};
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
......
......@@ -160,8 +160,12 @@ struct mlx5dr_ste_ctx {
u8 *hw_action,
u32 hw_action_sz,
u16 *used_hw_action_num);
/* Send */
void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size);
};
extern struct mlx5dr_ste_ctx ste_ctx_v0;
extern struct mlx5dr_ste_ctx ste_ctx_v1;
#endif /* _DR_STE_ */
......@@ -248,8 +248,8 @@ static void dr_ste_v0_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
static u64 dr_ste_v0_get_miss_addr(u8 *hw_ste_p)
{
u64 index =
(MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6) |
MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32) << 26);
((u64)MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6) |
((u64)MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32)) << 26);
return index << 6;
}
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
#include <linux/types.h>
#include "mlx5_ifc_dr_ste_v1.h"
#include "dr_ste.h"
#define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
DR_STE_V1_LU_TYPE_##lookup_type##_O)
enum dr_ste_v1_entry_format {
DR_STE_V1_TYPE_BWC_BYTE = 0x0,
DR_STE_V1_TYPE_BWC_DW = 0x1,
DR_STE_V1_TYPE_MATCH = 0x2,
};
/* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
enum {
DR_STE_V1_LU_TYPE_NOP = 0x0000,
DR_STE_V1_LU_TYPE_ETHL2_TNL = 0x0002,
DR_STE_V1_LU_TYPE_IBL3_EXT = 0x0102,
DR_STE_V1_LU_TYPE_ETHL2_O = 0x0003,
DR_STE_V1_LU_TYPE_IBL4 = 0x0103,
DR_STE_V1_LU_TYPE_ETHL2_I = 0x0004,
DR_STE_V1_LU_TYPE_SRC_QP_GVMI = 0x0104,
DR_STE_V1_LU_TYPE_ETHL2_SRC_O = 0x0005,
DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O = 0x0105,
DR_STE_V1_LU_TYPE_ETHL2_SRC_I = 0x0006,
DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I = 0x0106,
DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x0007,
DR_STE_V1_LU_TYPE_IPV6_DES_O = 0x0107,
DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x0008,
DR_STE_V1_LU_TYPE_IPV6_DES_I = 0x0108,
DR_STE_V1_LU_TYPE_ETHL4_O = 0x0009,
DR_STE_V1_LU_TYPE_IPV6_SRC_O = 0x0109,
DR_STE_V1_LU_TYPE_ETHL4_I = 0x000a,
DR_STE_V1_LU_TYPE_IPV6_SRC_I = 0x010a,
DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O = 0x000b,
DR_STE_V1_LU_TYPE_MPLS_O = 0x010b,
DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I = 0x000c,
DR_STE_V1_LU_TYPE_MPLS_I = 0x010c,
DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O = 0x000d,
DR_STE_V1_LU_TYPE_GRE = 0x010d,
DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x000e,
DR_STE_V1_LU_TYPE_GENERAL_PURPOSE = 0x010e,
DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f,
DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f,
DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110,
DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111,
DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112,
DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113,
DR_STE_V1_LU_TYPE_ETHL4_MISC_I = 0x0114,
DR_STE_V1_LU_TYPE_INVALID = 0x00ff,
DR_STE_V1_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
};
enum dr_ste_v1_header_anchors {
DR_STE_HEADER_ANCHOR_START_OUTER = 0x00,
DR_STE_HEADER_ANCHOR_1ST_VLAN = 0x02,
DR_STE_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
DR_STE_HEADER_ANCHOR_INNER_MAC = 0x13,
DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
};
enum dr_ste_v1_action_size {
DR_STE_ACTION_SINGLE_SZ = 4,
DR_STE_ACTION_DOUBLE_SZ = 8,
DR_STE_ACTION_TRIPLE_SZ = 12,
};
enum dr_ste_v1_action_insert_ptr_attr {
DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0, /* Regular push header (e.g. push vlan) */
DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */
DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2, /* IPsec */
};
enum dr_ste_v1_action_id {
DR_STE_V1_ACTION_ID_NOP = 0x00,
DR_STE_V1_ACTION_ID_COPY = 0x05,
DR_STE_V1_ACTION_ID_SET = 0x06,
DR_STE_V1_ACTION_ID_ADD = 0x07,
DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE = 0x08,
DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER = 0x09,
DR_STE_V1_ACTION_ID_INSERT_INLINE = 0x0a,
DR_STE_V1_ACTION_ID_INSERT_POINTER = 0x0b,
DR_STE_V1_ACTION_ID_FLOW_TAG = 0x0c,
DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d,
DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e,
DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f,
DR_STE_V1_ACTION_ID_TRAILER = 0x13,
DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14,
DR_STE_V1_ACTION_ID_MAX = 0x21,
/* use for special cases */
DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3 = 0x22,
};
enum {
DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2 = 0x8c,
DR_STE_V1_ACTION_MDFY_FLD_REGISTER_3 = 0x8d,
DR_STE_V1_ACTION_MDFY_FLD_REGISTER_4 = 0x8e,
DR_STE_V1_ACTION_MDFY_FLD_REGISTER_5 = 0x8f,
DR_STE_V1_ACTION_MDFY_FLD_REGISTER_6 = 0x90,
DR_STE_V1_ACTION_MDFY_FLD_REGISTER_7 = 0x91,
};
static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
[MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
},
[MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
},
[MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
},
[MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
},
[MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
},
[MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
},
[MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
},
[MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
},
[MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
},
[MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
},
[MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
},
[MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
},
[MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
},
[MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
},
[MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
},
[MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
},
[MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
},
[MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
},
[MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
},
[MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
},
[MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_6, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_7, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_4, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_5, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_3, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
},
};
static void dr_ste_v1_set_entry_type(u8 *hw_ste_p, u8 entry_type)
{
MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, entry_type);
}
static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
{
u64 index = miss_addr >> 6;
MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32, index >> 26);
MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6, index);
}
static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
{
u64 index =
(MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) |
MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32) << 26);
return index << 6;
}
static void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
{
MLX5_SET(ste_match_bwc_v1, hw_ste_p, byte_mask, byte_mask);
}
static u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p)
{
return MLX5_GET(ste_match_bwc_v1, hw_ste_p, byte_mask);
}
static void dr_ste_v1_set_lu_type(u8 *hw_ste_p, u16 lu_type)
{
MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, lu_type >> 8);
MLX5_SET(ste_match_bwc_v1, hw_ste_p, match_definer_ctx_idx, lu_type & 0xFF);
}
static void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
{
MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_entry_format, lu_type >> 8);
MLX5_SET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx, lu_type & 0xFF);
}
static u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p)
{
u8 mode = MLX5_GET(ste_match_bwc_v1, hw_ste_p, next_entry_format);
u8 index = MLX5_GET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx);
return (mode << 8 | index);
}
static void dr_ste_v1_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
{
MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
}
static void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
{
u64 index = (icm_addr >> 5) | ht_size;
MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_39_32_size, index >> 27);
MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_31_5_size, index);
}
static void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type,
u8 entry_type, u16 gvmi)
{
dr_ste_v1_set_lu_type(hw_ste_p, lu_type);
dr_ste_v1_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
MLX5_SET(ste_match_bwc_v1, hw_ste_p, gvmi, gvmi);
MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_63_48, gvmi);
}
static void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p,
u32 ste_size)
{
u8 *tag = hw_ste_p + DR_STE_SIZE_CTRL;
u8 *mask = tag + DR_STE_SIZE_TAG;
u8 tmp_tag[DR_STE_SIZE_TAG] = {};
if (ste_size == DR_STE_SIZE_CTRL)
return;
WARN_ON(ste_size != DR_STE_SIZE);
/* Backup tag */
memcpy(tmp_tag, tag, DR_STE_SIZE_TAG);
/* Swap mask and tag both are the same size */
memcpy(tag, mask, DR_STE_SIZE_MASK);
memcpy(mask, tmp_tag, DR_STE_SIZE_TAG);
}
static void dr_ste_v1_set_rx_flow_tag(u8 *s_action, u32 flow_tag)
{
MLX5_SET(ste_single_action_flow_tag_v1, s_action, action_id,
DR_STE_V1_ACTION_ID_FLOW_TAG);
MLX5_SET(ste_single_action_flow_tag_v1, s_action, flow_tag, flow_tag);
}
static void dr_ste_v1_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
{
MLX5_SET(ste_match_bwc_v1, hw_ste_p, counter_id, ctr_id);
}
static void dr_ste_v1_set_reparse(u8 *hw_ste_p)
{
MLX5_SET(ste_match_bwc_v1, hw_ste_p, reparse, 1);
}
static void dr_ste_v1_set_tx_encap(u8 *hw_ste_p, u8 *d_action,
u32 reformat_id, int size)
{
MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, action_id,
DR_STE_V1_ACTION_ID_INSERT_POINTER);
/* The hardware expects here size in words (2 byte) */
MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, size, size / 2);
MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, pointer, reformat_id);
MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, attributes,
DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
dr_ste_v1_set_reparse(hw_ste_p);
}
static void dr_ste_v1_set_tx_push_vlan(u8 *hw_ste_p, u8 *d_action,
u32 vlan_hdr)
{
MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
action_id, DR_STE_V1_ACTION_ID_INSERT_INLINE);
/* The hardware expects offset to vlan header in words (2 byte) */
MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
start_offset, HDR_LEN_L2_MACS >> 1);
MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
inline_data, vlan_hdr);
dr_ste_v1_set_reparse(hw_ste_p);
}
static void dr_ste_v1_set_rx_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
{
MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
start_anchor, DR_STE_HEADER_ANCHOR_1ST_VLAN);
/* The hardware expects here size in words (2 byte) */
MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
remove_size, (HDR_LEN_L2_VLAN >> 1) * vlans_num);
dr_ste_v1_set_reparse(hw_ste_p);
}
static void dr_ste_v1_set_tx_encap_l3(u8 *hw_ste_p,
u8 *frst_s_action,
u8 *scnd_d_action,
u32 reformat_id,
int size)
{
/* Remove L2 headers */
MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, action_id,
DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, end_anchor,
DR_STE_HEADER_ANCHOR_IPV6_IPV4);
/* Encapsulate with given reformat ID */
MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, action_id,
DR_STE_V1_ACTION_ID_INSERT_POINTER);
/* The hardware expects here size in words (2 byte) */
MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, size, size / 2);
MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, pointer, reformat_id);
MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, attributes,
DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
dr_ste_v1_set_reparse(hw_ste_p);
}
static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
{
MLX5_SET(ste_single_action_remove_header_v1, s_action, action_id,
DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
MLX5_SET(ste_single_action_remove_header_v1, s_action, decap, 1);
MLX5_SET(ste_single_action_remove_header_v1, s_action, vni_to_cqe, 1);
MLX5_SET(ste_single_action_remove_header_v1, s_action, end_anchor,
DR_STE_HEADER_ANCHOR_INNER_MAC);
dr_ste_v1_set_reparse(hw_ste_p);
}
static void dr_ste_v1_set_rx_decap_l3(u8 *hw_ste_p,
u8 *s_action,
u16 decap_actions,
u32 decap_index)
{
MLX5_SET(ste_single_action_modify_list_v1, s_action, action_id,
DR_STE_V1_ACTION_ID_MODIFY_LIST);
MLX5_SET(ste_single_action_modify_list_v1, s_action, num_of_modify_actions,
decap_actions);
MLX5_SET(ste_single_action_modify_list_v1, s_action, modify_actions_ptr,
decap_index);
dr_ste_v1_set_reparse(hw_ste_p);
}
static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p,
u8 *s_action,
u16 num_of_actions,
u32 re_write_index)
{
MLX5_SET(ste_single_action_modify_list_v1, s_action, action_id,
DR_STE_V1_ACTION_ID_MODIFY_LIST);
MLX5_SET(ste_single_action_modify_list_v1, s_action, num_of_modify_actions,
num_of_actions);
MLX5_SET(ste_single_action_modify_list_v1, s_action, modify_actions_ptr,
re_write_index);
dr_ste_v1_set_reparse(hw_ste_p);
}
static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
u32 *added_stes,
u16 gvmi)
{
u8 *action;
(*added_stes)++;
*last_ste += DR_STE_SIZE;
dr_ste_v1_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, 0, gvmi);
dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH);
action = MLX5_ADDR_OF(ste_mask_and_match_v1, *last_ste, action);
memset(action, 0, MLX5_FLD_SZ_BYTES(ste_mask_and_match_v1, action));
}
static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
u8 *action_type_set,
u8 *last_ste,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
{
u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
bool allow_encap = true;
if (action_type_set[DR_ACTION_TYP_CTR])
dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
dr_ste_v1_arr_init_next_match(&last_ste, added_stes,
attr->gvmi);
action = MLX5_ADDR_OF(ste_mask_and_match_v1,
last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
dr_ste_v1_set_rewrite_actions(last_ste, action,
attr->modify_actions,
attr->modify_index);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
allow_encap = false;
}
if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
int i;
for (i = 0; i < attr->vlans.count; i++) {
if (action_sz < DR_STE_ACTION_DOUBLE_SZ || !allow_encap) {
dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_encap = true;
}
dr_ste_v1_set_tx_push_vlan(last_ste, action, attr->vlans.headers[i]);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
}
}
if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
if (!allow_encap || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_encap = true;
}
dr_ste_v1_set_tx_encap(last_ste, action,
attr->reformat_id,
attr->reformat_size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
} else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
u8 *d_action;
dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
d_action = action + DR_STE_ACTION_SINGLE_SZ;
dr_ste_v1_set_tx_encap_l3(last_ste,
action, d_action,
attr->reformat_id,
attr->reformat_size);
action_sz -= DR_STE_ACTION_TRIPLE_SZ;
action += DR_STE_ACTION_TRIPLE_SZ;
}
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
u8 *action_type_set,
u8 *last_ste,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
{
u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
bool allow_modify_hdr = true;
bool allow_ctr = true;
if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
dr_ste_v1_set_rx_decap_l3(last_ste, action,
attr->decap_actions,
attr->decap_index);
dr_ste_v1_set_rewrite_actions(last_ste, action,
attr->decap_actions,
attr->decap_index);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
allow_modify_hdr = false;
allow_ctr = false;
} else if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2]) {
dr_ste_v1_set_rx_decap(last_ste, action);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
allow_modify_hdr = false;
allow_ctr = false;
}
if (action_type_set[DR_ACTION_TYP_TAG]) {
if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_modify_hdr = true;
allow_ctr = true;
}
dr_ste_v1_set_rx_flow_tag(action, attr->flow_tag);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
}
if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
if (action_sz < DR_STE_ACTION_SINGLE_SZ ||
!allow_modify_hdr) {
dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_modify_hdr = false;
allow_ctr = false;
}
dr_ste_v1_set_rx_pop_vlan(last_ste, action, attr->vlans.count);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
}
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
/* Modify header and decapsulation must use different STEs */
if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_modify_hdr = true;
allow_ctr = true;
}
dr_ste_v1_set_rewrite_actions(last_ste, action,
attr->modify_actions,
attr->modify_index);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
}
if (action_type_set[DR_ACTION_TYP_CTR]) {
/* Counter action set after decap to exclude decaped header */
if (!allow_ctr) {
dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_modify_hdr = true;
allow_ctr = false;
}
dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
}
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
static void dr_ste_v1_set_action_set(u8 *d_action,
u8 hw_field,
u8 shifter,
u8 length,
u32 data)
{
shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
MLX5_SET(ste_double_action_set_v1, d_action, action_id, DR_STE_V1_ACTION_ID_SET);
MLX5_SET(ste_double_action_set_v1, d_action, destination_dw_offset, hw_field);
MLX5_SET(ste_double_action_set_v1, d_action, destination_left_shifter, shifter);
MLX5_SET(ste_double_action_set_v1, d_action, destination_length, length);
MLX5_SET(ste_double_action_set_v1, d_action, inline_data, data);
}
static void dr_ste_v1_set_action_add(u8 *d_action,
u8 hw_field,
u8 shifter,
u8 length,
u32 data)
{
shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
MLX5_SET(ste_double_action_add_v1, d_action, action_id, DR_STE_V1_ACTION_ID_ADD);
MLX5_SET(ste_double_action_add_v1, d_action, destination_dw_offset, hw_field);
MLX5_SET(ste_double_action_add_v1, d_action, destination_left_shifter, shifter);
MLX5_SET(ste_double_action_add_v1, d_action, destination_length, length);
MLX5_SET(ste_double_action_add_v1, d_action, add_value, data);
}
static void dr_ste_v1_set_action_copy(u8 *d_action,
u8 dst_hw_field,
u8 dst_shifter,
u8 dst_len,
u8 src_hw_field,
u8 src_shifter)
{
dst_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
src_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
MLX5_SET(ste_double_action_copy_v1, d_action, action_id, DR_STE_V1_ACTION_ID_COPY);
MLX5_SET(ste_double_action_copy_v1, d_action, destination_dw_offset, dst_hw_field);
MLX5_SET(ste_double_action_copy_v1, d_action, destination_left_shifter, dst_shifter);
MLX5_SET(ste_double_action_copy_v1, d_action, destination_length, dst_len);
MLX5_SET(ste_double_action_copy_v1, d_action, source_dw_offset, src_hw_field);
MLX5_SET(ste_double_action_copy_v1, d_action, source_right_shifter, src_shifter);
}
#define DR_STE_DECAP_L3_ACTION_NUM 8
#define DR_STE_L2_HDR_MAX_SZ 20
static int dr_ste_v1_set_action_decap_l3_list(void *data,
u32 data_sz,
u8 *hw_action,
u32 hw_action_sz,
u16 *used_hw_action_num)
{
u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {};
void *data_ptr = padded_data;
u16 used_actions = 0;
u32 inline_data_sz;
u32 i;
if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
return -EINVAL;
memcpy(padded_data, data, data_sz);
/* Remove L2L3 outer headers */
MLX5_SET(ste_single_action_remove_header_v1, hw_action, action_id,
DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
MLX5_SET(ste_single_action_remove_header_v1, hw_action, decap, 1);
MLX5_SET(ste_single_action_remove_header_v1, hw_action, vni_to_cqe, 1);
MLX5_SET(ste_single_action_remove_header_v1, hw_action, end_anchor,
DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4);
hw_action += DR_STE_ACTION_DOUBLE_SZ;
used_actions++; /* Remove and NOP are a single double action */
inline_data_sz =
MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
/* Add the new header inline + 2 extra bytes */
for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
void *addr_inline;
MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, action_id,
DR_STE_V1_ACTION_ID_INSERT_INLINE);
/* The hardware expects here offset to words (2 bytes) */
MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset,
i * 2);
/* Copy bytes one by one to avoid endianness problem */
addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v1,
hw_action, inline_data);
memcpy(addr_inline, data_ptr, inline_data_sz);
hw_action += DR_STE_ACTION_DOUBLE_SZ;
data_ptr += inline_data_sz;
used_actions++;
}
/* Remove 2 extra bytes */
MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, action_id,
DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, data_sz / 2);
/* The hardware expects here size in words (2 bytes) */
MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, remove_size, 1);
used_actions++;
*used_hw_action_num = used_actions;
return 0;
}
static void dr_ste_v1_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param *value,
bool inner, u8 *bit_mask)
{
struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, smac_47_16, mask, smac_47_16);
DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, smac_15_0, mask, smac_15_0);
DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_vlan_id, mask, first_vid);
DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_cfi, mask, first_cfi);
DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_priority, mask, first_prio);
DR_STE_SET_ONES(eth_l2_src_dst_v1, bit_mask, l3_type, mask, ip_version);
if (mask->cvlan_tag) {
MLX5_SET(ste_eth_l2_src_dst_v1, bit_mask, first_vlan_qualifier, -1);
mask->cvlan_tag = 0;
} else if (mask->svlan_tag) {
MLX5_SET(ste_eth_l2_src_dst_v1, bit_mask, first_vlan_qualifier, -1);
mask->svlan_tag = 0;
}
}
static int dr_ste_v1_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, dmac_47_16, spec, dmac_47_16);
DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, dmac_15_0, spec, dmac_15_0);
DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, smac_47_16, spec, smac_47_16);
DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, smac_15_0, spec, smac_15_0);
if (spec->ip_version == IP_VERSION_IPV4) {
MLX5_SET(ste_eth_l2_src_dst_v1, tag, l3_type, STE_IPV4);
spec->ip_version = 0;
} else if (spec->ip_version == IP_VERSION_IPV6) {
MLX5_SET(ste_eth_l2_src_dst_v1, tag, l3_type, STE_IPV6);
spec->ip_version = 0;
} else if (spec->ip_version) {
return -EINVAL;
}
DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_vlan_id, spec, first_vid);
DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_cfi, spec, first_cfi);
DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_priority, spec, first_prio);
if (spec->cvlan_tag) {
MLX5_SET(ste_eth_l2_src_dst_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
spec->cvlan_tag = 0;
} else if (spec->svlan_tag) {
MLX5_SET(ste_eth_l2_src_dst_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
spec->svlan_tag = 0;
}
return 0;
}
static void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask);
sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2_SRC_DST, sb->inner);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_src_dst_tag;
}
static int dr_ste_v1_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
return 0;
}
static void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_CALC_DFNR_TYPE(IPV6_DES, sb->inner);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv6_dst_tag;
}
static int dr_ste_v1_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
return 0;
}
static void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_CALC_DFNR_TYPE(IPV6_SRC, sb->inner);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv6_src_tag;
}
static int dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_address, spec, dst_ip_31_0);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_address, spec, src_ip_31_0);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_port, spec, tcp_dport);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_port, spec, udp_dport);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_port, spec, tcp_sport);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_port, spec, udp_sport);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, protocol, spec, ip_protocol);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, fragmented, spec, frag);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, dscp, spec, ip_dscp);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, ecn, spec, ip_ecn);
if (spec->tcp_flags) {
DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple_v1, tag, spec);
spec->tcp_flags = 0;
}
return 0;
}
static void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL3_IPV4_5_TUPLE, sb->inner);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag;
}
static void dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
bool inner, u8 *bit_mask)
{
struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
struct mlx5dr_match_misc *misc_mask = &value->misc;
DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_vlan_id, mask, first_vid);
DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_cfi, mask, first_cfi);
DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_priority, mask, first_prio);
DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, ip_fragmented, mask, frag); // ?
DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, l3_ethertype, mask, ethertype); // ?
DR_STE_SET_ONES(eth_l2_src_v1, bit_mask, l3_type, mask, ip_version);
if (mask->svlan_tag || mask->cvlan_tag) {
MLX5_SET(ste_eth_l2_src_v1, bit_mask, first_vlan_qualifier, -1);
mask->cvlan_tag = 0;
mask->svlan_tag = 0;
}
if (inner) {
if (misc_mask->inner_second_cvlan_tag ||
misc_mask->inner_second_svlan_tag) {
MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1);
misc_mask->inner_second_cvlan_tag = 0;
misc_mask->inner_second_svlan_tag = 0;
}
DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
second_vlan_id, misc_mask, inner_second_vid);
DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
second_cfi, misc_mask, inner_second_cfi);
DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
second_priority, misc_mask, inner_second_prio);
} else {
if (misc_mask->outer_second_cvlan_tag ||
misc_mask->outer_second_svlan_tag) {
MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1);
misc_mask->outer_second_cvlan_tag = 0;
misc_mask->outer_second_svlan_tag = 0;
}
DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
second_vlan_id, misc_mask, outer_second_vid);
DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
second_cfi, misc_mask, outer_second_cfi);
DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
second_priority, misc_mask, outer_second_prio);
}
}
static int dr_ste_v1_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
bool inner, u8 *tag)
{
struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
struct mlx5dr_match_misc *misc_spec = &value->misc;
DR_STE_SET_TAG(eth_l2_src_v1, tag, first_vlan_id, spec, first_vid);
DR_STE_SET_TAG(eth_l2_src_v1, tag, first_cfi, spec, first_cfi);
DR_STE_SET_TAG(eth_l2_src_v1, tag, first_priority, spec, first_prio);
DR_STE_SET_TAG(eth_l2_src_v1, tag, ip_fragmented, spec, frag);
DR_STE_SET_TAG(eth_l2_src_v1, tag, l3_ethertype, spec, ethertype);
if (spec->ip_version == IP_VERSION_IPV4) {
MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV4);
spec->ip_version = 0;
} else if (spec->ip_version == IP_VERSION_IPV6) {
MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV6);
spec->ip_version = 0;
} else if (spec->ip_version) {
return -EINVAL;
}
if (spec->cvlan_tag) {
MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
spec->cvlan_tag = 0;
} else if (spec->svlan_tag) {
MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
spec->svlan_tag = 0;
}
if (inner) {
if (misc_spec->inner_second_cvlan_tag) {
MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_CVLAN);
misc_spec->inner_second_cvlan_tag = 0;
} else if (misc_spec->inner_second_svlan_tag) {
MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_SVLAN);
misc_spec->inner_second_svlan_tag = 0;
}
DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, inner_second_vid);
DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, inner_second_cfi);
DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, inner_second_prio);
} else {
if (misc_spec->outer_second_cvlan_tag) {
MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_CVLAN);
misc_spec->outer_second_cvlan_tag = 0;
} else if (misc_spec->outer_second_svlan_tag) {
MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_SVLAN);
misc_spec->outer_second_svlan_tag = 0;
}
DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, outer_second_vid);
DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, outer_second_cfi);
DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, outer_second_prio);
}
return 0;
}
static void dr_ste_v1_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
bool inner, u8 *bit_mask)
{
struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, smac_47_16, mask, smac_47_16);
DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, smac_15_0, mask, smac_15_0);
dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
}
static int dr_ste_v1_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l2_src_v1, tag, smac_47_16, spec, smac_47_16);
DR_STE_SET_TAG(eth_l2_src_v1, tag, smac_15_0, spec, smac_15_0);
return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
}
static void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask);
sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2_SRC, sb->inner);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_src_tag;
}
static void dr_ste_v1_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
bool inner, u8 *bit_mask)
{
struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l2_dst_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
DR_STE_SET_TAG(eth_l2_dst_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
}
static int dr_ste_v1_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l2_dst_v1, tag, dmac_47_16, spec, dmac_47_16);
DR_STE_SET_TAG(eth_l2_dst_v1, tag, dmac_15_0, spec, dmac_15_0);
return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
}
static void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l2_dst_bit_mask(mask, sb->inner, sb->bit_mask);
sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2, sb->inner);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_dst_tag;
}
static void dr_ste_v1_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
bool inner, u8 *bit_mask)
{
struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
struct mlx5dr_match_misc *misc = &value->misc;
DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_vlan_id, mask, first_vid);
DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_cfi, mask, first_cfi);
DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_priority, mask, first_prio);
DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, ip_fragmented, mask, frag);
DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, l3_ethertype, mask, ethertype);
DR_STE_SET_ONES(eth_l2_tnl_v1, bit_mask, l3_type, mask, ip_version);
if (misc->vxlan_vni) {
MLX5_SET(ste_eth_l2_tnl_v1, bit_mask,
l2_tunneling_network_id, (misc->vxlan_vni << 8));
misc->vxlan_vni = 0;
}
if (mask->svlan_tag || mask->cvlan_tag) {
MLX5_SET(ste_eth_l2_tnl_v1, bit_mask, first_vlan_qualifier, -1);
mask->cvlan_tag = 0;
mask->svlan_tag = 0;
}
}
static int dr_ste_v1_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
struct mlx5dr_match_misc *misc = &value->misc;
DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_47_16, spec, dmac_47_16);
DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_15_0, spec, dmac_15_0);
DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_vlan_id, spec, first_vid);
DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_cfi, spec, first_cfi);
DR_STE_SET_TAG(eth_l2_tnl_v1, tag, ip_fragmented, spec, frag);
DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_priority, spec, first_prio);
DR_STE_SET_TAG(eth_l2_tnl_v1, tag, l3_ethertype, spec, ethertype);
if (misc->vxlan_vni) {
MLX5_SET(ste_eth_l2_tnl_v1, tag, l2_tunneling_network_id,
(misc->vxlan_vni << 8));
misc->vxlan_vni = 0;
}
if (spec->cvlan_tag) {
MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
spec->cvlan_tag = 0;
} else if (spec->svlan_tag) {
MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
spec->svlan_tag = 0;
}
if (spec->ip_version == IP_VERSION_IPV4) {
MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV4);
spec->ip_version = 0;
} else if (spec->ip_version == IP_VERSION_IPV6) {
MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV6);
spec->ip_version = 0;
} else if (spec->ip_version) {
return -EINVAL;
}
return 0;
}
static void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask);
sb->lu_type = DR_STE_V1_LU_TYPE_ETHL2_TNL;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_tnl_tag;
}
static int dr_ste_v1_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, time_to_live, spec, ttl_hoplimit);
return 0;
}
static void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL3_IPV4_MISC, sb->inner);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv4_misc_tag;
}
static int dr_ste_v1_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
struct mlx5dr_match_misc *misc = &value->misc;
DR_STE_SET_TAG(eth_l4_v1, tag, dst_port, spec, tcp_dport);
DR_STE_SET_TAG(eth_l4_v1, tag, src_port, spec, tcp_sport);
DR_STE_SET_TAG(eth_l4_v1, tag, dst_port, spec, udp_dport);
DR_STE_SET_TAG(eth_l4_v1, tag, src_port, spec, udp_sport);
DR_STE_SET_TAG(eth_l4_v1, tag, protocol, spec, ip_protocol);
DR_STE_SET_TAG(eth_l4_v1, tag, fragmented, spec, frag);
DR_STE_SET_TAG(eth_l4_v1, tag, dscp, spec, ip_dscp);
DR_STE_SET_TAG(eth_l4_v1, tag, ecn, spec, ip_ecn);
DR_STE_SET_TAG(eth_l4_v1, tag, ipv6_hop_limit, spec, ttl_hoplimit);
if (sb->inner)
DR_STE_SET_TAG(eth_l4_v1, tag, flow_label, misc, inner_ipv6_flow_label);
else
DR_STE_SET_TAG(eth_l4_v1, tag, flow_label, misc, outer_ipv6_flow_label);
if (spec->tcp_flags) {
DR_STE_SET_TCP_FLAGS(eth_l4_v1, tag, spec);
spec->tcp_flags = 0;
}
return 0;
}
static void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL4, sb->inner);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_eth_ipv6_l3_l4_tag;
}
static int dr_ste_v1_build_mpls_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc2 *misc2 = &value->misc2;
if (sb->inner)
DR_STE_SET_MPLS(mpls_v1, misc2, inner, tag);
else
DR_STE_SET_MPLS(mpls_v1, misc2, outer, tag);
return 0;
}
static void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_mpls_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_CALC_DFNR_TYPE(MPLS, sb->inner);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_mpls_tag;
}
static int dr_ste_v1_build_tnl_gre_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc *misc = &value->misc;
DR_STE_SET_TAG(gre_v1, tag, gre_protocol, misc, gre_protocol);
DR_STE_SET_TAG(gre_v1, tag, gre_k_present, misc, gre_k_present);
DR_STE_SET_TAG(gre_v1, tag, gre_key_h, misc, gre_key_h);
DR_STE_SET_TAG(gre_v1, tag, gre_key_l, misc, gre_key_l);
DR_STE_SET_TAG(gre_v1, tag, gre_c_present, misc, gre_c_present);
DR_STE_SET_TAG(gre_v1, tag, gre_s_present, misc, gre_s_present);
return 0;
}
static void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_tnl_gre_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_V1_LU_TYPE_GRE;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gre_tag;
}
static int dr_ste_v1_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc2 *misc2 = &value->misc2;
if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc2)) {
DR_STE_SET_TAG(mpls_v1, tag, mpls0_label,
misc2, outer_first_mpls_over_gre_label);
DR_STE_SET_TAG(mpls_v1, tag, mpls0_exp,
misc2, outer_first_mpls_over_gre_exp);
DR_STE_SET_TAG(mpls_v1, tag, mpls0_s_bos,
misc2, outer_first_mpls_over_gre_s_bos);
DR_STE_SET_TAG(mpls_v1, tag, mpls0_ttl,
misc2, outer_first_mpls_over_gre_ttl);
} else {
DR_STE_SET_TAG(mpls_v1, tag, mpls0_label,
misc2, outer_first_mpls_over_udp_label);
DR_STE_SET_TAG(mpls_v1, tag, mpls0_exp,
misc2, outer_first_mpls_over_udp_exp);
DR_STE_SET_TAG(mpls_v1, tag, mpls0_s_bos,
misc2, outer_first_mpls_over_udp_s_bos);
DR_STE_SET_TAG(mpls_v1, tag, mpls0_ttl,
misc2, outer_first_mpls_over_udp_ttl);
}
return 0;
}
static void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_tnl_mpls_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_V1_LU_TYPE_MPLS_I;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_tag;
}
static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc3 *misc3 = &value->misc3;
bool is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc3);
u32 *icmp_header_data;
u8 *icmp_type;
u8 *icmp_code;
if (is_ipv4) {
icmp_header_data = &misc3->icmpv4_header_data;
icmp_type = &misc3->icmpv4_type;
icmp_code = &misc3->icmpv4_code;
} else {
icmp_header_data = &misc3->icmpv6_header_data;
icmp_type = &misc3->icmpv6_type;
icmp_code = &misc3->icmpv6_code;
}
MLX5_SET(ste_icmp_v1, tag, icmp_header_data, *icmp_header_data);
MLX5_SET(ste_icmp_v1, tag, icmp_type, *icmp_type);
MLX5_SET(ste_icmp_v1, tag, icmp_code, *icmp_code);
*icmp_header_data = 0;
*icmp_type = 0;
*icmp_code = 0;
return 0;
}
static int dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_icmp_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_icmp_tag;
return 0;
}
static int dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc2 *misc2 = &value->misc2;
DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
misc2, metadata_reg_a);
return 0;
}
static void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_general_purpose_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_V1_LU_TYPE_GENERAL_PURPOSE;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_general_purpose_tag;
}
static int dr_ste_v1_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc3 *misc3 = &value->misc3;
if (sb->inner) {
DR_STE_SET_TAG(eth_l4_misc_v1, tag, seq_num, misc3, inner_tcp_seq_num);
DR_STE_SET_TAG(eth_l4_misc_v1, tag, ack_num, misc3, inner_tcp_ack_num);
} else {
DR_STE_SET_TAG(eth_l4_misc_v1, tag, seq_num, misc3, outer_tcp_seq_num);
DR_STE_SET_TAG(eth_l4_misc_v1, tag, ack_num, misc3, outer_tcp_ack_num);
}
return 0;
}
static void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l4_misc_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_eth_l4_misc_tag;
}
static int
dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc3 *misc3 = &value->misc3;
DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
outer_vxlan_gpe_flags, misc3,
outer_vxlan_gpe_flags);
DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
outer_vxlan_gpe_next_protocol, misc3,
outer_vxlan_gpe_next_protocol);
DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
outer_vxlan_gpe_vni, misc3,
outer_vxlan_gpe_vni);
return 0;
}
static void
dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag;
}
static int
dr_ste_v1_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc *misc = &value->misc;
DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
geneve_protocol_type, misc, geneve_protocol_type);
DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
geneve_oam, misc, geneve_oam);
DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
geneve_opt_len, misc, geneve_opt_len);
DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
geneve_vni, misc, geneve_vni);
return 0;
}
static void
dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tag;
}
static int dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc2 *misc2 = &value->misc2;
DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
return 0;
}
static void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_register_0_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_register_0_tag;
}
static int dr_ste_v1_build_register_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc2 *misc2 = &value->misc2;
DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
return 0;
}
static void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_register_1_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_register_1_tag;
}
static void dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
u8 *bit_mask)
{
struct mlx5dr_match_misc *misc_mask = &value->misc;
DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_gvmi, misc_mask, source_port);
DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_qp, misc_mask, source_sqn);
}
static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc *misc = &value->misc;
struct mlx5dr_cmd_vport_cap *vport_cap;
struct mlx5dr_domain *dmn = sb->dmn;
struct mlx5dr_cmd_caps *caps;
u8 *bit_mask = sb->bit_mask;
DR_STE_SET_TAG(src_gvmi_qp_v1, tag, source_qp, misc, source_sqn);
if (sb->vhca_id_valid) {
/* Find port GVMI based on the eswitch_owner_vhca_id */
if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
caps = &dmn->info.caps;
else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
dmn->peer_dmn->info.caps.gvmi))
caps = &dmn->peer_dmn->info.caps;
else
return -EINVAL;
misc->source_eswitch_owner_vhca_id = 0;
} else {
caps = &dmn->info.caps;
}
if (!MLX5_GET(ste_src_gvmi_qp_v1, bit_mask, source_gvmi))
return 0;
vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
if (!vport_cap) {
mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
misc->source_port);
return -EINVAL;
}
if (vport_cap->vport_gvmi)
MLX5_SET(ste_src_gvmi_qp_v1, tag, source_gvmi, vport_cap->vport_gvmi);
misc->source_port = 0;
return 0;
}
static void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
sb->lu_type = DR_STE_V1_LU_TYPE_SRC_QP_GVMI;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_src_gvmi_qpn_tag;
}
struct mlx5dr_ste_ctx ste_ctx_v1 = {
/* Builders */
.build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
.build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init,
.build_eth_l3_ipv6_dst_init = &dr_ste_v1_build_eth_l3_ipv6_dst_init,
.build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
.build_eth_l2_src_init = &dr_ste_v1_build_eth_l2_src_init,
.build_eth_l2_dst_init = &dr_ste_v1_build_eth_l2_dst_init,
.build_eth_l2_tnl_init = &dr_ste_v1_build_eth_l2_tnl_init,
.build_eth_l3_ipv4_misc_init = &dr_ste_v1_build_eth_l3_ipv4_misc_init,
.build_eth_ipv6_l3_l4_init = &dr_ste_v1_build_eth_ipv6_l3_l4_init,
.build_mpls_init = &dr_ste_v1_build_mpls_init,
.build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init,
.build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init,
.build_icmp_init = &dr_ste_v1_build_icmp_init,
.build_general_purpose_init = &dr_ste_v1_build_general_purpose_init,
.build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init,
.build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
.build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
.build_register_0_init = &dr_ste_v1_build_register_0_init,
.build_register_1_init = &dr_ste_v1_build_register_1_init,
.build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
/* Getters and Setters */
.ste_init = &dr_ste_v1_init,
.set_next_lu_type = &dr_ste_v1_set_next_lu_type,
.get_next_lu_type = &dr_ste_v1_get_next_lu_type,
.set_miss_addr = &dr_ste_v1_set_miss_addr,
.get_miss_addr = &dr_ste_v1_get_miss_addr,
.set_hit_addr = &dr_ste_v1_set_hit_addr,
.set_byte_mask = &dr_ste_v1_set_byte_mask,
.get_byte_mask = &dr_ste_v1_get_byte_mask,
/* Actions */
.set_actions_rx = &dr_ste_v1_set_actions_rx,
.set_actions_tx = &dr_ste_v1_set_actions_tx,
.modify_field_arr_sz = ARRAY_SIZE(dr_ste_v1_action_modify_field_arr),
.modify_field_arr = dr_ste_v1_action_modify_field_arr,
.set_action_set = &dr_ste_v1_set_action_set,
.set_action_add = &dr_ste_v1_set_action_add,
.set_action_copy = &dr_ste_v1_set_action_copy,
.set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
};
......@@ -666,7 +666,8 @@ struct mlx5dr_esw_caps {
u64 drop_icm_address_tx;
u64 uplink_icm_address_rx;
u64 uplink_icm_address_tx;
bool sw_owner;
u8 sw_owner:1;
u8 sw_owner_v2:1;
};
struct mlx5dr_cmd_vport_cap {
......@@ -699,6 +700,9 @@ struct mlx5dr_cmd_caps {
bool rx_sw_owner;
bool tx_sw_owner;
bool fdb_sw_owner;
u8 rx_sw_owner_v2:1;
u8 tx_sw_owner_v2:1;
u8 fdb_sw_owner_v2:1;
u32 num_vports;
struct mlx5dr_esw_caps esw_caps;
struct mlx5dr_cmd_vport_cap *vports_caps;
......@@ -1072,6 +1076,9 @@ struct mlx5dr_icm_chunk *
mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
enum mlx5dr_icm_chunk_size chunk_size);
void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk);
void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
u8 *hw_ste_p, u32 ste_size);
int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
struct mlx5dr_domain_rx_tx *nic_dmn,
struct mlx5dr_ste_htbl *htbl,
......
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
#ifndef MLX5_IFC_DR_STE_V1_H
#define MLX5_IFC_DR_STE_V1_H
enum mlx5_ifc_ste_v1_modify_hdr_offset {
MLX5_MODIFY_HEADER_V1_QW_OFFSET = 0x20,
};
struct mlx5_ifc_ste_single_action_flow_tag_v1_bits {
u8 action_id[0x8];
u8 flow_tag[0x18];
};
struct mlx5_ifc_ste_single_action_modify_list_v1_bits {
u8 action_id[0x8];
u8 num_of_modify_actions[0x8];
u8 modify_actions_ptr[0x10];
};
struct mlx5_ifc_ste_single_action_remove_header_v1_bits {
u8 action_id[0x8];
u8 reserved_at_8[0x2];
u8 start_anchor[0x6];
u8 reserved_at_10[0x2];
u8 end_anchor[0x6];
u8 reserved_at_18[0x4];
u8 decap[0x1];
u8 vni_to_cqe[0x1];
u8 qos_profile[0x2];
};
struct mlx5_ifc_ste_single_action_remove_header_size_v1_bits {
u8 action_id[0x8];
u8 reserved_at_8[0x2];
u8 start_anchor[0x6];
u8 outer_l4_remove[0x1];
u8 reserved_at_11[0x1];
u8 start_offset[0x7];
u8 reserved_at_18[0x1];
u8 remove_size[0x6];
};
struct mlx5_ifc_ste_double_action_copy_v1_bits {
u8 action_id[0x8];
u8 destination_dw_offset[0x8];
u8 reserved_at_10[0x2];
u8 destination_left_shifter[0x6];
u8 reserved_at_17[0x2];
u8 destination_length[0x6];
u8 reserved_at_20[0x8];
u8 source_dw_offset[0x8];
u8 reserved_at_30[0x2];
u8 source_right_shifter[0x6];
u8 reserved_at_38[0x8];
};
struct mlx5_ifc_ste_double_action_set_v1_bits {
u8 action_id[0x8];
u8 destination_dw_offset[0x8];
u8 reserved_at_10[0x2];
u8 destination_left_shifter[0x6];
u8 reserved_at_18[0x2];
u8 destination_length[0x6];
u8 inline_data[0x20];
};
struct mlx5_ifc_ste_double_action_add_v1_bits {
u8 action_id[0x8];
u8 destination_dw_offset[0x8];
u8 reserved_at_10[0x2];
u8 destination_left_shifter[0x6];
u8 reserved_at_18[0x2];
u8 destination_length[0x6];
u8 add_value[0x20];
};
struct mlx5_ifc_ste_double_action_insert_with_inline_v1_bits {
u8 action_id[0x8];
u8 reserved_at_8[0x2];
u8 start_anchor[0x6];
u8 start_offset[0x7];
u8 reserved_at_17[0x9];
u8 inline_data[0x20];
};
struct mlx5_ifc_ste_double_action_insert_with_ptr_v1_bits {
u8 action_id[0x8];
u8 reserved_at_8[0x2];
u8 start_anchor[0x6];
u8 start_offset[0x7];
u8 size[0x6];
u8 attributes[0x3];
u8 pointer[0x20];
};
struct mlx5_ifc_ste_double_action_modify_action_list_v1_bits {
u8 action_id[0x8];
u8 modify_actions_pattern_pointer[0x18];
u8 number_of_modify_actions[0x8];
u8 modify_actions_argument_pointer[0x18];
};
struct mlx5_ifc_ste_match_bwc_v1_bits {
u8 entry_format[0x8];
u8 counter_id[0x18];
u8 miss_address_63_48[0x10];
u8 match_definer_ctx_idx[0x8];
u8 miss_address_39_32[0x8];
u8 miss_address_31_6[0x1a];
u8 reserved_at_5a[0x1];
u8 match_polarity[0x1];
u8 reparse[0x1];
u8 reserved_at_5d[0x3];
u8 next_table_base_63_48[0x10];
u8 hash_definer_ctx_idx[0x8];
u8 next_table_base_39_32_size[0x8];
u8 next_table_base_31_5_size[0x1b];
u8 hash_type[0x2];
u8 hash_after_actions[0x1];
u8 reserved_at_9e[0x2];
u8 byte_mask[0x10];
u8 next_entry_format[0x1];
u8 mask_mode[0x1];
u8 gvmi[0xe];
u8 action[0x40];
};
struct mlx5_ifc_ste_mask_and_match_v1_bits {
u8 entry_format[0x8];
u8 counter_id[0x18];
u8 miss_address_63_48[0x10];
u8 match_definer_ctx_idx[0x8];
u8 miss_address_39_32[0x8];
u8 miss_address_31_6[0x1a];
u8 reserved_at_5a[0x1];
u8 match_polarity[0x1];
u8 reparse[0x1];
u8 reserved_at_5d[0x3];
u8 next_table_base_63_48[0x10];
u8 hash_definer_ctx_idx[0x8];
u8 next_table_base_39_32_size[0x8];
u8 next_table_base_31_5_size[0x1b];
u8 hash_type[0x2];
u8 hash_after_actions[0x1];
u8 reserved_at_9e[0x2];
u8 action[0x60];
};
struct mlx5_ifc_ste_eth_l2_src_v1_bits {
u8 reserved_at_0[0x1];
u8 sx_sniffer[0x1];
u8 functional_loopback[0x1];
u8 ip_fragmented[0x1];
u8 qp_type[0x2];
u8 encapsulation_type[0x2];
u8 port[0x2];
u8 l3_type[0x2];
u8 l4_type[0x2];
u8 first_vlan_qualifier[0x2];
u8 first_priority[0x3];
u8 first_cfi[0x1];
u8 first_vlan_id[0xc];
u8 smac_47_16[0x20];
u8 smac_15_0[0x10];
u8 l3_ethertype[0x10];
u8 reserved_at_60[0x6];
u8 tcp_syn[0x1];
u8 reserved_at_67[0x3];
u8 force_loopback[0x1];
u8 l2_ok[0x1];
u8 l3_ok[0x1];
u8 l4_ok[0x1];
u8 second_vlan_qualifier[0x2];
u8 second_priority[0x3];
u8 second_cfi[0x1];
u8 second_vlan_id[0xc];
};
struct mlx5_ifc_ste_eth_l2_dst_v1_bits {
u8 reserved_at_0[0x1];
u8 sx_sniffer[0x1];
u8 functional_lb[0x1];
u8 ip_fragmented[0x1];
u8 qp_type[0x2];
u8 encapsulation_type[0x2];
u8 port[0x2];
u8 l3_type[0x2];
u8 l4_type[0x2];
u8 first_vlan_qualifier[0x2];
u8 first_priority[0x3];
u8 first_cfi[0x1];
u8 first_vlan_id[0xc];
u8 dmac_47_16[0x20];
u8 dmac_15_0[0x10];
u8 l3_ethertype[0x10];
u8 reserved_at_60[0x6];
u8 tcp_syn[0x1];
u8 reserved_at_67[0x3];
u8 force_lb[0x1];
u8 l2_ok[0x1];
u8 l3_ok[0x1];
u8 l4_ok[0x1];
u8 second_vlan_qualifier[0x2];
u8 second_priority[0x3];
u8 second_cfi[0x1];
u8 second_vlan_id[0xc];
};
struct mlx5_ifc_ste_eth_l2_src_dst_v1_bits {
u8 dmac_47_16[0x20];
u8 smac_47_16[0x20];
u8 dmac_15_0[0x10];
u8 reserved_at_50[0x2];
u8 functional_lb[0x1];
u8 reserved_at_53[0x5];
u8 port[0x2];
u8 l3_type[0x2];
u8 reserved_at_5c[0x2];
u8 first_vlan_qualifier[0x2];
u8 first_priority[0x3];
u8 first_cfi[0x1];
u8 first_vlan_id[0xc];
u8 smac_15_0[0x10];
};
struct mlx5_ifc_ste_eth_l3_ipv4_5_tuple_v1_bits {
u8 source_address[0x20];
u8 destination_address[0x20];
u8 source_port[0x10];
u8 destination_port[0x10];
u8 reserved_at_60[0x4];
u8 l4_ok[0x1];
u8 l3_ok[0x1];
u8 fragmented[0x1];
u8 tcp_ns[0x1];
u8 tcp_cwr[0x1];
u8 tcp_ece[0x1];
u8 tcp_urg[0x1];
u8 tcp_ack[0x1];
u8 tcp_psh[0x1];
u8 tcp_rst[0x1];
u8 tcp_syn[0x1];
u8 tcp_fin[0x1];
u8 dscp[0x6];
u8 ecn[0x2];
u8 protocol[0x8];
};
struct mlx5_ifc_ste_eth_l2_tnl_v1_bits {
u8 l2_tunneling_network_id[0x20];
u8 dmac_47_16[0x20];
u8 dmac_15_0[0x10];
u8 l3_ethertype[0x10];
u8 reserved_at_60[0x3];
u8 ip_fragmented[0x1];
u8 reserved_at_64[0x2];
u8 encp_type[0x2];
u8 reserved_at_68[0x2];
u8 l3_type[0x2];
u8 l4_type[0x2];
u8 first_vlan_qualifier[0x2];
u8 first_priority[0x3];
u8 first_cfi[0x1];
u8 first_vlan_id[0xc];
};
struct mlx5_ifc_ste_eth_l3_ipv4_misc_v1_bits {
u8 identification[0x10];
u8 flags[0x3];
u8 fragment_offset[0xd];
u8 total_length[0x10];
u8 checksum[0x10];
u8 version[0x4];
u8 ihl[0x4];
u8 time_to_live[0x8];
u8 reserved_at_50[0x10];
u8 reserved_at_60[0x1c];
u8 voq_internal_prio[0x4];
};
struct mlx5_ifc_ste_eth_l4_v1_bits {
u8 ipv6_version[0x4];
u8 reserved_at_4[0x4];
u8 dscp[0x6];
u8 ecn[0x2];
u8 ipv6_hop_limit[0x8];
u8 protocol[0x8];
u8 src_port[0x10];
u8 dst_port[0x10];
u8 first_fragment[0x1];
u8 reserved_at_41[0xb];
u8 flow_label[0x14];
u8 tcp_data_offset[0x4];
u8 l4_ok[0x1];
u8 l3_ok[0x1];
u8 fragmented[0x1];
u8 tcp_ns[0x1];
u8 tcp_cwr[0x1];
u8 tcp_ece[0x1];
u8 tcp_urg[0x1];
u8 tcp_ack[0x1];
u8 tcp_psh[0x1];
u8 tcp_rst[0x1];
u8 tcp_syn[0x1];
u8 tcp_fin[0x1];
u8 ipv6_paylen[0x10];
};
struct mlx5_ifc_ste_eth_l4_misc_v1_bits {
u8 window_size[0x10];
u8 urgent_pointer[0x10];
u8 ack_num[0x20];
u8 seq_num[0x20];
u8 length[0x10];
u8 checksum[0x10];
};
struct mlx5_ifc_ste_mpls_v1_bits {
u8 reserved_at_0[0x15];
u8 mpls_ok[0x1];
u8 mpls4_s_bit[0x1];
u8 mpls4_qualifier[0x1];
u8 mpls3_s_bit[0x1];
u8 mpls3_qualifier[0x1];
u8 mpls2_s_bit[0x1];
u8 mpls2_qualifier[0x1];
u8 mpls1_s_bit[0x1];
u8 mpls1_qualifier[0x1];
u8 mpls0_s_bit[0x1];
u8 mpls0_qualifier[0x1];
u8 mpls0_label[0x14];
u8 mpls0_exp[0x3];
u8 mpls0_s_bos[0x1];
u8 mpls0_ttl[0x8];
u8 mpls1_label[0x20];
u8 mpls2_label[0x20];
};
struct mlx5_ifc_ste_gre_v1_bits {
u8 gre_c_present[0x1];
u8 reserved_at_1[0x1];
u8 gre_k_present[0x1];
u8 gre_s_present[0x1];
u8 strict_src_route[0x1];
u8 recur[0x3];
u8 flags[0x5];
u8 version[0x3];
u8 gre_protocol[0x10];
u8 reserved_at_20[0x20];
u8 gre_key_h[0x18];
u8 gre_key_l[0x8];
u8 reserved_at_60[0x20];
};
struct mlx5_ifc_ste_src_gvmi_qp_v1_bits {
u8 loopback_synd[0x8];
u8 reserved_at_8[0x7];
u8 functional_lb[0x1];
u8 source_gvmi[0x10];
u8 force_lb[0x1];
u8 reserved_at_21[0x1];
u8 source_is_requestor[0x1];
u8 reserved_at_23[0x5];
u8 source_qp[0x18];
u8 reserved_at_40[0x20];
u8 reserved_at_60[0x20];
};
struct mlx5_ifc_ste_icmp_v1_bits {
u8 icmp_payload_data[0x20];
u8 icmp_header_data[0x20];
u8 icmp_type[0x8];
u8 icmp_code[0x8];
u8 reserved_at_50[0x10];
u8 reserved_at_60[0x20];
};
#endif /* MLX5_IFC_DR_STE_V1_H */
......@@ -124,7 +124,10 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action);
static inline bool
mlx5dr_is_supported(struct mlx5_core_dev *dev)
{
return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner);
return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
(MLX5_CAP_GEN(dev, steering_format_version) <=
MLX5_STEERING_FORMAT_CONNECTX_6DX));
}
/* buddy functions & structure */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment