Commit db471ed9 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2021-01-05' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2021-01-05

SW steering, Refactor to have a device specific STE layer below dr_ste

This series introduces some improvements and refactoring by adding a new layer
below dr_ste to allow support for different devices format.

It adds a struct of device specific callbacks for STE layer below dr_ste.
Each device will implement its HW-specific function, and a common logic
from the DR code will access these functions through the new ste_ctx API.

Connect-X5-style steering format is called STE_v0.
In the next patch series we bring the Connect-X6-style format - STE_v1.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents da2c3ee1 4781df92
......@@ -83,5 +83,6 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
steering/dr_matcher.o steering/dr_rule.o \
steering/dr_icm_pool.o steering/dr_buddy.o \
steering/dr_ste.o steering/dr_send.o \
steering/dr_ste_v0.o \
steering/dr_cmd.o steering/dr_fw.o \
steering/dr_action.o steering/fs_dr.o
......@@ -218,158 +218,6 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
},
};
struct dr_action_modify_field_conv {
u16 hw_field;
u8 start;
u8 end;
u8 l3_type;
u8 l4_type;
};
static const struct dr_action_modify_field_conv dr_action_conv_arr[] = {
[MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_1, .start = 16, .end = 47,
},
[MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_1, .start = 0, .end = 15,
},
[MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_2, .start = 32, .end = 47,
},
[MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_0, .start = 16, .end = 47,
},
[MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_0, .start = 0, .end = 15,
},
[MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 0, .end = 5,
},
[MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 48, .end = 56,
.l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP,
},
[MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 0, .end = 15,
.l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP,
},
[MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 16, .end = 31,
.l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP,
},
[MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 8, .end = 15,
.l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4,
},
[MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 8, .end = 15,
.l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
},
[MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 0, .end = 15,
.l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP,
},
[MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 16, .end = 31,
.l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP,
},
[MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_3, .start = 32, .end = 63,
.l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
},
[MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_3, .start = 0, .end = 31,
.l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
},
[MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_4, .start = 32, .end = 63,
.l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
},
[MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_4, .start = 0, .end = 31,
.l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
},
[MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 32, .end = 63,
.l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
},
[MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 0, .end = 31,
.l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
},
[MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_2, .start = 32, .end = 63,
.l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
},
[MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_2, .start = 0, .end = 31,
.l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
},
[MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 0, .end = 31,
.l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4,
},
[MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 32, .end = 63,
.l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_METADATA, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_METADATA, .start = 32, .end = 63,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_0, .start = 32, .end = 63,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_0, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_1, .start = 32, .end = 63,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_1, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_2, .start = 32, .end = 63,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_2, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_1, .start = 32, .end = 63,
},
[MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_1, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
.hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_2, .start = 0, .end = 15,
},
};
#define MAX_VLANS 2
struct dr_action_vlan_info {
int count;
u32 headers[MAX_VLANS];
};
struct dr_action_apply_attr {
u32 modify_index;
u16 modify_actions;
u32 decap_index;
u16 decap_actions;
u8 decap_with_vlan:1;
u64 final_icm_addr;
u32 flow_tag;
u32 ctr_id;
u16 gvmi;
u16 hit_gvmi;
u32 reformat_id;
u32 reformat_size;
struct dr_action_vlan_info vlans;
};
static int
dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type,
enum mlx5dr_action_type *action_type)
......@@ -394,141 +242,6 @@ dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type
return 0;
}
static void dr_actions_init_next_ste(u8 **last_ste,
u32 *added_stes,
enum mlx5dr_ste_entry_type entry_type,
u16 gvmi)
{
(*added_stes)++;
*last_ste += DR_STE_SIZE;
mlx5dr_ste_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, entry_type, gvmi);
}
static void dr_actions_apply_tx(struct mlx5dr_domain *dmn,
u8 *action_type_set,
u8 *last_ste,
struct dr_action_apply_attr *attr,
u32 *added_stes)
{
bool encap = action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2] ||
action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3];
/* We want to make sure the modify header comes before L2
* encapsulation. The reason for that is that we support
* modify headers for outer headers only
*/
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
mlx5dr_ste_set_rewrite_actions(last_ste,
attr->modify_actions,
attr->modify_index);
}
if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
int i;
for (i = 0; i < attr->vlans.count; i++) {
if (i || action_type_set[DR_ACTION_TYP_MODIFY_HDR])
dr_actions_init_next_ste(&last_ste,
added_stes,
MLX5DR_STE_TYPE_TX,
attr->gvmi);
mlx5dr_ste_set_tx_push_vlan(last_ste,
attr->vlans.headers[i],
encap);
}
}
if (encap) {
/* Modify header and encapsulation require a different STEs.
* Since modify header STE format doesn't support encapsulation
* tunneling_action.
*/
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] ||
action_type_set[DR_ACTION_TYP_PUSH_VLAN])
dr_actions_init_next_ste(&last_ste,
added_stes,
MLX5DR_STE_TYPE_TX,
attr->gvmi);
mlx5dr_ste_set_tx_encap(last_ste,
attr->reformat_id,
attr->reformat_size,
action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]);
/* Whenever prio_tag_required enabled, we can be sure that the
* previous table (ACL) already push vlan to our packet,
* And due to HW limitation we need to set this bit, otherwise
* push vlan + reformat will not work.
*/
if (MLX5_CAP_GEN(dmn->mdev, prio_tag_required))
mlx5dr_ste_set_go_back_bit(last_ste);
}
if (action_type_set[DR_ACTION_TYP_CTR])
mlx5dr_ste_set_counter_id(last_ste, attr->ctr_id);
}
static void dr_actions_apply_rx(u8 *action_type_set,
u8 *last_ste,
struct dr_action_apply_attr *attr,
u32 *added_stes)
{
if (action_type_set[DR_ACTION_TYP_CTR])
mlx5dr_ste_set_counter_id(last_ste, attr->ctr_id);
if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
mlx5dr_ste_set_rx_decap_l3(last_ste, attr->decap_with_vlan);
mlx5dr_ste_set_rewrite_actions(last_ste,
attr->decap_actions,
attr->decap_index);
}
if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2])
mlx5dr_ste_set_rx_decap(last_ste);
if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
int i;
for (i = 0; i < attr->vlans.count; i++) {
if (i ||
action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2] ||
action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2])
dr_actions_init_next_ste(&last_ste,
added_stes,
MLX5DR_STE_TYPE_RX,
attr->gvmi);
mlx5dr_ste_set_rx_pop_vlan(last_ste);
}
}
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
if (mlx5dr_ste_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
dr_actions_init_next_ste(&last_ste,
added_stes,
MLX5DR_STE_TYPE_MODIFY_PKT,
attr->gvmi);
else
mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
mlx5dr_ste_set_rewrite_actions(last_ste,
attr->modify_actions,
attr->modify_index);
}
if (action_type_set[DR_ACTION_TYP_TAG]) {
if (mlx5dr_ste_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
dr_actions_init_next_ste(&last_ste,
added_stes,
MLX5DR_STE_TYPE_RX,
attr->gvmi);
mlx5dr_ste_rx_set_flow_tag(last_ste, attr->flow_tag);
}
}
/* Apply the actions on the rule STE array starting from the last_ste.
* Actions might require more than one STE, new_num_stes will return
* the new size of the STEs array, rule with actions.
......@@ -537,21 +250,20 @@ static void dr_actions_apply(struct mlx5dr_domain *dmn,
enum mlx5dr_ste_entry_type ste_type,
u8 *action_type_set,
u8 *last_ste,
struct dr_action_apply_attr *attr,
struct mlx5dr_ste_actions_attr *attr,
u32 *new_num_stes)
{
struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
u32 added_stes = 0;
if (ste_type == MLX5DR_STE_TYPE_RX)
dr_actions_apply_rx(action_type_set, last_ste, attr, &added_stes);
mlx5dr_ste_set_actions_rx(ste_ctx, dmn, action_type_set,
last_ste, attr, &added_stes);
else
dr_actions_apply_tx(dmn, action_type_set, last_ste, attr, &added_stes);
mlx5dr_ste_set_actions_tx(ste_ctx, dmn, action_type_set,
last_ste, attr, &added_stes);
last_ste += added_stes * DR_STE_SIZE;
*new_num_stes += added_stes;
mlx5dr_ste_set_hit_gvmi(last_ste, attr->hit_gvmi);
mlx5dr_ste_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
static enum dr_action_domain
......@@ -643,9 +355,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
bool rx_rule = nic_dmn->ste_type == MLX5DR_STE_TYPE_RX;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
u8 action_type_set[DR_ACTION_TYP_MAX] = {};
struct mlx5dr_ste_actions_attr attr = {};
struct mlx5dr_action *dest_action = NULL;
u32 state = DR_ACTION_STATE_NO_ACTION;
struct dr_action_apply_attr attr = {};
enum dr_action_domain action_domain;
bool recalc_cs_required = false;
u8 *last_ste;
......@@ -756,12 +468,12 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
}
break;
case DR_ACTION_TYP_POP_VLAN:
max_actions_type = MAX_VLANS;
max_actions_type = MLX5DR_MAX_VLANS;
attr.vlans.count++;
break;
case DR_ACTION_TYP_PUSH_VLAN:
max_actions_type = MAX_VLANS;
if (attr.vlans.count == MAX_VLANS)
max_actions_type = MLX5DR_MAX_VLANS;
if (attr.vlans.count == MLX5DR_MAX_VLANS)
return -EINVAL;
attr.vlans.headers[attr.vlans.count++] = action->push_vlan.vlan_hdr;
......@@ -817,132 +529,6 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
return -EINVAL;
}
#define CVLAN_ETHERTYPE 0x8100
#define SVLAN_ETHERTYPE 0x88a8
#define HDR_LEN_L2_ONLY 14
#define HDR_LEN_L2_VLAN 18
#define REWRITE_HW_ACTION_NUM 6
static int dr_actions_l2_rewrite(struct mlx5dr_domain *dmn,
struct mlx5dr_action *action,
void *data, size_t data_sz)
{
struct mlx5_ifc_l2_hdr_bits *l2_hdr = data;
u64 ops[REWRITE_HW_ACTION_NUM] = {};
u32 hdr_fld_4b;
u16 hdr_fld_2b;
u16 vlan_type;
bool vlan;
int i = 0;
int ret;
vlan = (data_sz != HDR_LEN_L2_ONLY);
/* dmac_47_16 */
MLX5_SET(dr_action_hw_set, ops + i,
opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
MLX5_SET(dr_action_hw_set, ops + i,
destination_length, 0);
MLX5_SET(dr_action_hw_set, ops + i,
destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_0);
MLX5_SET(dr_action_hw_set, ops + i,
destination_left_shifter, 16);
hdr_fld_4b = MLX5_GET(l2_hdr, l2_hdr, dmac_47_16);
MLX5_SET(dr_action_hw_set, ops + i,
inline_data, hdr_fld_4b);
i++;
/* smac_47_16 */
MLX5_SET(dr_action_hw_set, ops + i,
opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
MLX5_SET(dr_action_hw_set, ops + i,
destination_length, 0);
MLX5_SET(dr_action_hw_set, ops + i,
destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_1);
MLX5_SET(dr_action_hw_set, ops + i,
destination_left_shifter, 16);
hdr_fld_4b = (MLX5_GET(l2_hdr, l2_hdr, smac_31_0) >> 16 |
MLX5_GET(l2_hdr, l2_hdr, smac_47_32) << 16);
MLX5_SET(dr_action_hw_set, ops + i,
inline_data, hdr_fld_4b);
i++;
/* dmac_15_0 */
MLX5_SET(dr_action_hw_set, ops + i,
opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
MLX5_SET(dr_action_hw_set, ops + i,
destination_length, 16);
MLX5_SET(dr_action_hw_set, ops + i,
destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_0);
MLX5_SET(dr_action_hw_set, ops + i,
destination_left_shifter, 0);
hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, dmac_15_0);
MLX5_SET(dr_action_hw_set, ops + i,
inline_data, hdr_fld_2b);
i++;
/* ethertype + (optional) vlan */
MLX5_SET(dr_action_hw_set, ops + i,
opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
MLX5_SET(dr_action_hw_set, ops + i,
destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_2);
MLX5_SET(dr_action_hw_set, ops + i,
destination_left_shifter, 32);
if (!vlan) {
hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
MLX5_SET(dr_action_hw_set, ops + i, inline_data, hdr_fld_2b);
MLX5_SET(dr_action_hw_set, ops + i, destination_length, 16);
} else {
hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
vlan_type = hdr_fld_2b == SVLAN_ETHERTYPE ? DR_STE_SVLAN : DR_STE_CVLAN;
hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan);
hdr_fld_4b = (vlan_type << 16) | hdr_fld_2b;
MLX5_SET(dr_action_hw_set, ops + i, inline_data, hdr_fld_4b);
MLX5_SET(dr_action_hw_set, ops + i, destination_length, 18);
}
i++;
/* smac_15_0 */
MLX5_SET(dr_action_hw_set, ops + i,
opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
MLX5_SET(dr_action_hw_set, ops + i,
destination_length, 16);
MLX5_SET(dr_action_hw_set, ops + i,
destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_1);
MLX5_SET(dr_action_hw_set, ops + i,
destination_left_shifter, 0);
hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, smac_31_0);
MLX5_SET(dr_action_hw_set, ops + i,
inline_data, hdr_fld_2b);
i++;
if (vlan) {
MLX5_SET(dr_action_hw_set, ops + i,
opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan_type);
MLX5_SET(dr_action_hw_set, ops + i,
inline_data, hdr_fld_2b);
MLX5_SET(dr_action_hw_set, ops + i,
destination_length, 16);
MLX5_SET(dr_action_hw_set, ops + i,
destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_2);
MLX5_SET(dr_action_hw_set, ops + i,
destination_left_shifter, 0);
i++;
}
action->rewrite.data = (void *)ops;
action->rewrite.num_of_actions = i;
ret = mlx5dr_send_postsend_action(dmn, action);
if (ret) {
mlx5dr_dbg(dmn, "Writing encapsulation action to ICM failed\n");
return ret;
}
return 0;
}
static struct mlx5dr_action *
dr_action_create_generic(enum mlx5dr_action_type action_type)
{
......@@ -1217,21 +803,34 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
}
case DR_ACTION_TYP_TNL_L3_TO_L2:
{
/* Only Ethernet frame is supported, with VLAN (18) or without (14) */
if (data_sz != HDR_LEN_L2_ONLY && data_sz != HDR_LEN_L2_VLAN)
return -EINVAL;
u8 hw_actions[ACTION_CACHE_LINE_SIZE] = {};
int ret;
ret = mlx5dr_ste_set_action_decap_l3_list(dmn->ste_ctx,
data, data_sz,
hw_actions,
ACTION_CACHE_LINE_SIZE,
&action->rewrite.num_of_actions);
if (ret) {
mlx5dr_dbg(dmn, "Failed creating decap l3 action list\n");
return ret;
}
action->rewrite.chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool,
DR_CHUNK_SIZE_8);
if (!action->rewrite.chunk)
if (!action->rewrite.chunk) {
mlx5dr_dbg(dmn, "Failed allocating modify header chunk\n");
return -ENOMEM;
}
action->rewrite.data = (void *)hw_actions;
action->rewrite.index = (action->rewrite.chunk->icm_addr -
dmn->info.caps.hdr_modify_icm_addr) /
ACTION_CACHE_LINE_SIZE;
ret = dr_actions_l2_rewrite(dmn, action, data, data_sz);
ret = mlx5dr_send_postsend_action(dmn, action);
if (ret) {
mlx5dr_dbg(dmn, "Writing decap l3 actions to ICM failed\n");
mlx5dr_icm_free_chunk(action->rewrite.chunk);
return ret;
}
......@@ -1243,6 +842,9 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
}
}
#define CVLAN_ETHERTYPE 0x8100
#define SVLAN_ETHERTYPE 0x88a8
struct mlx5dr_action *mlx5dr_action_create_pop_vlan(void)
{
return dr_action_create_generic(DR_ACTION_TYP_POP_VLAN);
......@@ -1315,31 +917,13 @@ mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
return NULL;
}
static const struct dr_action_modify_field_conv *
dr_action_modify_get_hw_info(u16 sw_field)
{
const struct dr_action_modify_field_conv *hw_action_info;
if (sw_field >= ARRAY_SIZE(dr_action_conv_arr))
goto not_found;
hw_action_info = &dr_action_conv_arr[sw_field];
if (!hw_action_info->end && !hw_action_info->start)
goto not_found;
return hw_action_info;
not_found:
return NULL;
}
static int
dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn,
__be64 *sw_action,
__be64 *hw_action,
const struct dr_action_modify_field_conv **ret_hw_info)
const struct mlx5dr_ste_action_modify_field **ret_hw_info)
{
const struct dr_action_modify_field_conv *hw_action_info;
const struct mlx5dr_ste_action_modify_field *hw_action_info;
u8 max_length;
u16 sw_field;
u32 data;
......@@ -1349,7 +933,7 @@ dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn,
data = MLX5_GET(set_action_in, sw_action, data);
/* Convert SW data to HW modify action format */
hw_action_info = dr_action_modify_get_hw_info(sw_field);
hw_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, sw_field);
if (!hw_action_info) {
mlx5dr_dbg(dmn, "Modify add action invalid field given\n");
return -EINVAL;
......@@ -1357,20 +941,12 @@ dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn,
max_length = hw_action_info->end - hw_action_info->start + 1;
MLX5_SET(dr_action_hw_set, hw_action,
opcode, MLX5DR_ACTION_MDFY_HW_OP_ADD);
MLX5_SET(dr_action_hw_set, hw_action, destination_field_code,
hw_action_info->hw_field);
MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter,
hw_action_info->start);
/* PRM defines that length zero specific length of 32bits */
MLX5_SET(dr_action_hw_set, hw_action, destination_length,
max_length == 32 ? 0 : max_length);
MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
mlx5dr_ste_set_action_add(dmn->ste_ctx,
hw_action,
hw_action_info->hw_field,
hw_action_info->start,
max_length,
data);
*ret_hw_info = hw_action_info;
......@@ -1381,9 +957,9 @@ static int
dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn,
__be64 *sw_action,
__be64 *hw_action,
const struct dr_action_modify_field_conv **ret_hw_info)
const struct mlx5dr_ste_action_modify_field **ret_hw_info)
{
const struct dr_action_modify_field_conv *hw_action_info;
const struct mlx5dr_ste_action_modify_field *hw_action_info;
u8 offset, length, max_length;
u16 sw_field;
u32 data;
......@@ -1395,7 +971,7 @@ dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn,
data = MLX5_GET(set_action_in, sw_action, data);
/* Convert SW data to HW modify action format */
hw_action_info = dr_action_modify_get_hw_info(sw_field);
hw_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, sw_field);
if (!hw_action_info) {
mlx5dr_dbg(dmn, "Modify set action invalid field given\n");
return -EINVAL;
......@@ -1411,19 +987,12 @@ dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn,
return -EINVAL;
}
MLX5_SET(dr_action_hw_set, hw_action,
opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
MLX5_SET(dr_action_hw_set, hw_action, destination_field_code,
hw_action_info->hw_field);
MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter,
hw_action_info->start + offset);
MLX5_SET(dr_action_hw_set, hw_action, destination_length,
length == 32 ? 0 : length);
MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
mlx5dr_ste_set_action_set(dmn->ste_ctx,
hw_action,
hw_action_info->hw_field,
hw_action_info->start + offset,
length,
data);
*ret_hw_info = hw_action_info;
......@@ -1434,12 +1003,12 @@ static int
dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn,
__be64 *sw_action,
__be64 *hw_action,
const struct dr_action_modify_field_conv **ret_dst_hw_info,
const struct dr_action_modify_field_conv **ret_src_hw_info)
const struct mlx5dr_ste_action_modify_field **ret_dst_hw_info,
const struct mlx5dr_ste_action_modify_field **ret_src_hw_info)
{
u8 src_offset, dst_offset, src_max_length, dst_max_length, length;
const struct dr_action_modify_field_conv *hw_dst_action_info;
const struct dr_action_modify_field_conv *hw_src_action_info;
const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
u16 src_field, dst_field;
/* Get SW modify action data */
......@@ -1450,8 +1019,8 @@ dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn,
length = MLX5_GET(copy_action_in, sw_action, length);
/* Convert SW data to HW modify action format */
hw_src_action_info = dr_action_modify_get_hw_info(src_field);
hw_dst_action_info = dr_action_modify_get_hw_info(dst_field);
hw_src_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, src_field);
hw_dst_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, dst_field);
if (!hw_src_action_info || !hw_dst_action_info) {
mlx5dr_dbg(dmn, "Modify copy action invalid field given\n");
return -EINVAL;
......@@ -1471,23 +1040,13 @@ dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn,
return -EINVAL;
}
MLX5_SET(dr_action_hw_copy, hw_action,
opcode, MLX5DR_ACTION_MDFY_HW_OP_COPY);
MLX5_SET(dr_action_hw_copy, hw_action, destination_field_code,
hw_dst_action_info->hw_field);
MLX5_SET(dr_action_hw_copy, hw_action, destination_left_shifter,
hw_dst_action_info->start + dst_offset);
MLX5_SET(dr_action_hw_copy, hw_action, destination_length,
length == 32 ? 0 : length);
MLX5_SET(dr_action_hw_copy, hw_action, source_field_code,
hw_src_action_info->hw_field);
MLX5_SET(dr_action_hw_copy, hw_action, source_left_shifter,
hw_src_action_info->start + dst_offset);
mlx5dr_ste_set_action_copy(dmn->ste_ctx,
hw_action,
hw_dst_action_info->hw_field,
hw_dst_action_info->start + dst_offset,
length,
hw_src_action_info->hw_field,
hw_src_action_info->start + src_offset);
*ret_dst_hw_info = hw_dst_action_info;
*ret_src_hw_info = hw_src_action_info;
......@@ -1499,8 +1058,8 @@ static int
dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn,
__be64 *sw_action,
__be64 *hw_action,
const struct dr_action_modify_field_conv **ret_dst_hw_info,
const struct dr_action_modify_field_conv **ret_src_hw_info)
const struct mlx5dr_ste_action_modify_field **ret_dst_hw_info,
const struct mlx5dr_ste_action_modify_field **ret_src_hw_info)
{
u8 action;
int ret;
......@@ -1677,15 +1236,15 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
u32 *num_hw_actions,
bool *modify_ttl)
{
const struct dr_action_modify_field_conv *hw_dst_action_info;
const struct dr_action_modify_field_conv *hw_src_action_info;
u16 hw_field = MLX5DR_ACTION_MDFY_HW_FLD_RESERVED;
u32 l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE;
u32 l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE;
const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
struct mlx5dr_domain *dmn = action->rewrite.dmn;
int ret, i, hw_idx = 0;
__be64 *sw_action;
__be64 hw_action;
u16 hw_field = 0;
u32 l3_type = 0;
u32 l4_type = 0;
*modify_ttl = false;
......
......@@ -57,6 +57,12 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
{
int ret;
dmn->ste_ctx = mlx5dr_ste_get_ctx(dmn->info.caps.sw_format_ver);
if (!dmn->ste_ctx) {
mlx5dr_err(dmn, "SW Steering on this device is unsupported\n");
return -EOPNOTSUPP;
}
ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
if (ret) {
mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret);
......
......@@ -221,6 +221,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
{
struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_match_param mask = {};
struct mlx5dr_ste_build *sb;
bool inner, rx;
......@@ -259,80 +260,89 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
inner = false;
if (dr_mask_is_wqe_metadata_set(&mask.misc2))
mlx5dr_ste_build_general_purpose(&sb[idx++], &mask, inner, rx);
mlx5dr_ste_build_general_purpose(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (dr_mask_is_reg_c_0_3_set(&mask.misc2))
mlx5dr_ste_build_register_0(&sb[idx++], &mask, inner, rx);
mlx5dr_ste_build_register_0(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (dr_mask_is_reg_c_4_7_set(&mask.misc2))
mlx5dr_ste_build_register_1(&sb[idx++], &mask, inner, rx);
mlx5dr_ste_build_register_1(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (dr_mask_is_gvmi_or_qpn_set(&mask.misc) &&
(dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX)) {
mlx5dr_ste_build_src_gvmi_qpn(&sb[idx++], &mask,
dmn, inner, rx);
mlx5dr_ste_build_src_gvmi_qpn(ste_ctx, &sb[idx++],
&mask, dmn, inner, rx);
}
if (dr_mask_is_smac_set(&mask.outer) &&
dr_mask_is_dmac_set(&mask.outer)) {
mlx5dr_ste_build_eth_l2_src_dst(&sb[idx++], &mask,
inner, rx);
mlx5dr_ste_build_eth_l2_src_dst(ste_ctx, &sb[idx++],
&mask, inner, rx);
}
if (dr_mask_is_smac_set(&mask.outer))
mlx5dr_ste_build_eth_l2_src(&sb[idx++], &mask, inner, rx);
mlx5dr_ste_build_eth_l2_src(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (DR_MASK_IS_L2_DST(mask.outer, mask.misc, outer))
mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx);
mlx5dr_ste_build_eth_l2_dst(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (outer_ipv == DR_RULE_IPV6) {
if (dr_mask_is_dst_addr_set(&mask.outer))
mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask,
inner, rx);
mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (dr_mask_is_src_addr_set(&mask.outer))
mlx5dr_ste_build_eth_l3_ipv6_src(&sb[idx++], &mask,
inner, rx);
mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (DR_MASK_IS_ETH_L4_SET(mask.outer, mask.misc, outer))
mlx5dr_ste_build_eth_ipv6_l3_l4(&sb[idx++], &mask,
inner, rx);
mlx5dr_ste_build_eth_ipv6_l3_l4(ste_ctx, &sb[idx++],
&mask, inner, rx);
} else {
if (dr_mask_is_ipv4_5_tuple_set(&mask.outer))
mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask,
inner, rx);
mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (dr_mask_is_ttl_set(&mask.outer))
mlx5dr_ste_build_eth_l3_ipv4_misc(&sb[idx++], &mask,
inner, rx);
mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
&mask, inner, rx);
}
if (dr_mask_is_tnl_vxlan_gpe(&mask, dmn))
mlx5dr_ste_build_tnl_vxlan_gpe(&sb[idx++], &mask,
inner, rx);
mlx5dr_ste_build_tnl_vxlan_gpe(ste_ctx, &sb[idx++],
&mask, inner, rx);
else if (dr_mask_is_tnl_geneve(&mask, dmn))
mlx5dr_ste_build_tnl_geneve(&sb[idx++], &mask,
inner, rx);
mlx5dr_ste_build_tnl_geneve(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer))
mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx);
mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, outer))
mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx);
mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2))
mlx5dr_ste_build_tnl_mpls(&sb[idx++], &mask, inner, rx);
mlx5dr_ste_build_tnl_mpls(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (dr_mask_is_icmp(&mask, dmn)) {
ret = mlx5dr_ste_build_icmp(&sb[idx++],
ret = mlx5dr_ste_build_icmp(ste_ctx, &sb[idx++],
&mask, &dmn->info.caps,
inner, rx);
if (ret)
return ret;
}
if (dr_mask_is_tnl_gre_set(&mask.misc))
mlx5dr_ste_build_tnl_gre(&sb[idx++], &mask, inner, rx);
mlx5dr_ste_build_tnl_gre(ste_ctx, &sb[idx++],
&mask, inner, rx);
}
/* Inner */
......@@ -343,50 +353,56 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
inner = true;
if (dr_mask_is_eth_l2_tnl_set(&mask.misc))
mlx5dr_ste_build_eth_l2_tnl(&sb[idx++], &mask, inner, rx);
mlx5dr_ste_build_eth_l2_tnl(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (dr_mask_is_smac_set(&mask.inner) &&
dr_mask_is_dmac_set(&mask.inner)) {
mlx5dr_ste_build_eth_l2_src_dst(&sb[idx++],
mlx5dr_ste_build_eth_l2_src_dst(ste_ctx, &sb[idx++],
&mask, inner, rx);
}
if (dr_mask_is_smac_set(&mask.inner))
mlx5dr_ste_build_eth_l2_src(&sb[idx++], &mask, inner, rx);
mlx5dr_ste_build_eth_l2_src(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (DR_MASK_IS_L2_DST(mask.inner, mask.misc, inner))
mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx);
mlx5dr_ste_build_eth_l2_dst(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (inner_ipv == DR_RULE_IPV6) {
if (dr_mask_is_dst_addr_set(&mask.inner))
mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask,
inner, rx);
mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (dr_mask_is_src_addr_set(&mask.inner))
mlx5dr_ste_build_eth_l3_ipv6_src(&sb[idx++], &mask,
inner, rx);
mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (DR_MASK_IS_ETH_L4_SET(mask.inner, mask.misc, inner))
mlx5dr_ste_build_eth_ipv6_l3_l4(&sb[idx++], &mask,
inner, rx);
mlx5dr_ste_build_eth_ipv6_l3_l4(ste_ctx, &sb[idx++],
&mask, inner, rx);
} else {
if (dr_mask_is_ipv4_5_tuple_set(&mask.inner))
mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask,
inner, rx);
mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (dr_mask_is_ttl_set(&mask.inner))
mlx5dr_ste_build_eth_l3_ipv4_misc(&sb[idx++], &mask,
inner, rx);
mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
&mask, inner, rx);
}
if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, inner))
mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx);
mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, inner))
mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx);
mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2))
mlx5dr_ste_build_tnl_mpls(&sb[idx++], &mask, inner, rx);
mlx5dr_ste_build_tnl_mpls(ste_ctx, &sb[idx++],
&mask, inner, rx);
}
/* Empty matcher, takes all */
if (matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY)
......
......@@ -10,7 +10,8 @@ struct mlx5dr_rule_action_member {
struct list_head list;
};
static int dr_rule_append_to_miss_list(struct mlx5dr_ste *new_last_ste,
static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste *new_last_ste,
struct list_head *miss_list,
struct list_head *send_list)
{
......@@ -25,7 +26,7 @@ static int dr_rule_append_to_miss_list(struct mlx5dr_ste *new_last_ste,
if (!ste_info_last)
return -ENOMEM;
mlx5dr_ste_set_miss_addr(last_ste->hw_ste,
mlx5dr_ste_set_miss_addr(ste_ctx, last_ste->hw_ste,
mlx5dr_ste_get_icm_addr(new_last_ste));
list_add_tail(&new_last_ste->miss_list_node, miss_list);
......@@ -42,6 +43,7 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
u8 *hw_ste)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_htbl *new_htbl;
struct mlx5dr_ste *ste;
......@@ -57,7 +59,8 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
/* One and only entry, never grows */
ste = new_htbl->ste_arr;
mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste,
nic_matcher->e_anchor->chunk->icm_addr);
mlx5dr_htbl_get(new_htbl);
return ste;
......@@ -169,6 +172,7 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
struct mlx5dr_ste *col_ste,
u8 *hw_ste)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste *new_ste;
int ret;
......@@ -180,11 +184,11 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste);
/* Update the previous from the list */
ret = dr_rule_append_to_miss_list(new_ste,
ret = dr_rule_append_to_miss_list(dmn->ste_ctx, new_ste,
mlx5dr_ste_get_miss_list(col_ste),
update_list);
if (ret) {
mlx5dr_dbg(matcher->tbl->dmn, "Failed update dup entry\n");
mlx5dr_dbg(dmn, "Failed update dup entry\n");
goto err_exit;
}
......@@ -224,6 +228,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
struct mlx5dr_ste_htbl *new_htbl,
struct list_head *update_list)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_send_info *ste_info;
bool use_update_list = false;
u8 hw_ste[DR_STE_SIZE] = {};
......@@ -237,7 +242,8 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
/* Copy STE control and tag */
memcpy(hw_ste, cur_ste->hw_ste, DR_STE_SIZE_REDUCED);
mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
nic_matcher->e_anchor->chunk->icm_addr);
new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
new_ste = &new_htbl->ste_arr[new_idx];
......@@ -253,7 +259,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
new_ste,
hw_ste);
if (!new_ste) {
mlx5dr_dbg(matcher->tbl->dmn, "Failed adding collision entry, index: %d\n",
mlx5dr_dbg(dmn, "Failed adding collision entry, index: %d\n",
new_idx);
return NULL;
}
......@@ -391,7 +397,8 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
/* Write new table to HW */
info.type = CONNECT_MISS;
info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi,
mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
dmn->info.caps.gvmi,
nic_dmn,
new_htbl,
formatted_ste,
......@@ -436,13 +443,15 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
/* It is safe to operate dr_ste_set_hit_addr on the hw_ste here
* (48B len) which works only on first 32B
*/
mlx5dr_ste_set_hit_addr(prev_htbl->ste_arr[0].hw_ste,
mlx5dr_ste_set_hit_addr(dmn->ste_ctx,
prev_htbl->ste_arr[0].hw_ste,
new_htbl->chunk->icm_addr,
new_htbl->chunk->num_of_entries);
ste_to_update = &prev_htbl->ste_arr[0];
} else {
mlx5dr_ste_set_hit_addr_by_next_htbl(cur_htbl->pointing_ste->hw_ste,
mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
cur_htbl->pointing_ste->hw_ste,
new_htbl);
ste_to_update = cur_htbl->pointing_ste;
}
......@@ -496,6 +505,8 @@ dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
struct list_head *miss_list,
struct list_head *send_list)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_send_info *ste_info;
struct mlx5dr_ste *new_ste;
......@@ -507,8 +518,9 @@ dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
if (!new_ste)
goto free_send_info;
if (dr_rule_append_to_miss_list(new_ste, miss_list, send_list)) {
mlx5dr_dbg(matcher->tbl->dmn, "Failed to update prev miss_list\n");
if (dr_rule_append_to_miss_list(ste_ctx, new_ste,
miss_list, send_list)) {
mlx5dr_dbg(dmn, "Failed to update prev miss_list\n");
goto err_exit;
}
......@@ -659,6 +671,7 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
struct mlx5dr_ste_send_info *ste_info_arr[DR_ACTION_MAX_STES];
u8 num_of_builders = nic_matcher->num_of_builders;
struct mlx5dr_matcher *matcher = rule->matcher;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
u8 *curr_hw_ste, *prev_hw_ste;
struct mlx5dr_ste *action_ste;
int i, k, ret;
......@@ -692,10 +705,12 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
goto err_exit;
/* Point current ste to the new action */
mlx5dr_ste_set_hit_addr_by_next_htbl(prev_hw_ste, action_ste->htbl);
mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
prev_hw_ste,
action_ste->htbl);
ret = dr_rule_add_member(nic_rule, action_ste);
if (ret) {
mlx5dr_dbg(matcher->tbl->dmn, "Failed adding rule member\n");
mlx5dr_dbg(dmn, "Failed adding rule member\n");
goto free_ste_info;
}
mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0,
......@@ -722,6 +737,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
struct list_head *miss_list,
struct list_head *send_list)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_send_info *ste_info;
/* Take ref on table, only on first time this ste is used */
......@@ -730,7 +746,8 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
/* new entry -> new branch */
list_add_tail(&ste->miss_list_node, miss_list);
mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
nic_matcher->e_anchor->chunk->icm_addr);
ste->ste_chain_location = ste_location;
......@@ -743,7 +760,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
ste,
hw_ste,
DR_CHUNK_SIZE_1)) {
mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
mlx5dr_dbg(dmn, "Failed allocating table\n");
goto clean_ste_info;
}
......
......@@ -3,104 +3,7 @@
#include <linux/types.h>
#include <linux/crc32.h>
#include "dr_types.h"
#define DR_STE_CRC_POLY 0xEDB88320L
#define STE_IPV4 0x1
#define STE_IPV6 0x2
#define STE_TCP 0x1
#define STE_UDP 0x2
#define STE_SPI 0x3
#define IP_VERSION_IPV4 0x4
#define IP_VERSION_IPV6 0x6
#define STE_SVLAN 0x1
#define STE_CVLAN 0x2
#define DR_STE_ENABLE_FLOW_TAG BIT(31)
/* Set to STE a specific value using DR_STE_SET */
#define DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, value) do { \
if ((spec)->s_fname) { \
MLX5_SET(ste_##lookup_type, tag, t_fname, value); \
(spec)->s_fname = 0; \
} \
} while (0)
/* Set to STE spec->s_fname to tag->t_fname */
#define DR_STE_SET_TAG(lookup_type, tag, t_fname, spec, s_fname) \
DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, spec->s_fname)
/* Set to STE -1 to bit_mask->bm_fname and set spec->s_fname as used */
#define DR_STE_SET_MASK(lookup_type, bit_mask, bm_fname, spec, s_fname) \
DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, -1)
/* Set to STE spec->s_fname to bit_mask->bm_fname and set spec->s_fname as used */
#define DR_STE_SET_MASK_V(lookup_type, bit_mask, bm_fname, spec, s_fname) \
DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, (spec)->s_fname)
#define DR_STE_SET_TCP_FLAGS(lookup_type, tag, spec) do { \
MLX5_SET(ste_##lookup_type, tag, tcp_ns, !!((spec)->tcp_flags & (1 << 8))); \
MLX5_SET(ste_##lookup_type, tag, tcp_cwr, !!((spec)->tcp_flags & (1 << 7))); \
MLX5_SET(ste_##lookup_type, tag, tcp_ece, !!((spec)->tcp_flags & (1 << 6))); \
MLX5_SET(ste_##lookup_type, tag, tcp_urg, !!((spec)->tcp_flags & (1 << 5))); \
MLX5_SET(ste_##lookup_type, tag, tcp_ack, !!((spec)->tcp_flags & (1 << 4))); \
MLX5_SET(ste_##lookup_type, tag, tcp_psh, !!((spec)->tcp_flags & (1 << 3))); \
MLX5_SET(ste_##lookup_type, tag, tcp_rst, !!((spec)->tcp_flags & (1 << 2))); \
MLX5_SET(ste_##lookup_type, tag, tcp_syn, !!((spec)->tcp_flags & (1 << 1))); \
MLX5_SET(ste_##lookup_type, tag, tcp_fin, !!((spec)->tcp_flags & (1 << 0))); \
} while (0)
#define DR_STE_SET_MPLS_MASK(lookup_type, mask, in_out, bit_mask) do { \
DR_STE_SET_MASK_V(lookup_type, mask, mpls0_label, mask, \
in_out##_first_mpls_label);\
DR_STE_SET_MASK_V(lookup_type, mask, mpls0_s_bos, mask, \
in_out##_first_mpls_s_bos); \
DR_STE_SET_MASK_V(lookup_type, mask, mpls0_exp, mask, \
in_out##_first_mpls_exp); \
DR_STE_SET_MASK_V(lookup_type, mask, mpls0_ttl, mask, \
in_out##_first_mpls_ttl); \
} while (0)
#define DR_STE_SET_MPLS_TAG(lookup_type, mask, in_out, tag) do { \
DR_STE_SET_TAG(lookup_type, tag, mpls0_label, mask, \
in_out##_first_mpls_label);\
DR_STE_SET_TAG(lookup_type, tag, mpls0_s_bos, mask, \
in_out##_first_mpls_s_bos); \
DR_STE_SET_TAG(lookup_type, tag, mpls0_exp, mask, \
in_out##_first_mpls_exp); \
DR_STE_SET_TAG(lookup_type, tag, mpls0_ttl, mask, \
in_out##_first_mpls_ttl); \
} while (0)
#define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
(_misc)->outer_first_mpls_over_gre_label || \
(_misc)->outer_first_mpls_over_gre_exp || \
(_misc)->outer_first_mpls_over_gre_s_bos || \
(_misc)->outer_first_mpls_over_gre_ttl)
#define DR_STE_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
(_misc)->outer_first_mpls_over_udp_label || \
(_misc)->outer_first_mpls_over_udp_exp || \
(_misc)->outer_first_mpls_over_udp_s_bos || \
(_misc)->outer_first_mpls_over_udp_ttl)
#define DR_STE_CALC_LU_TYPE(lookup_type, rx, inner) \
((inner) ? MLX5DR_STE_LU_TYPE_##lookup_type##_I : \
(rx) ? MLX5DR_STE_LU_TYPE_##lookup_type##_D : \
MLX5DR_STE_LU_TYPE_##lookup_type##_O)
enum dr_ste_tunl_action {
DR_STE_TUNL_ACTION_NONE = 0,
DR_STE_TUNL_ACTION_ENABLE = 1,
DR_STE_TUNL_ACTION_DECAP = 2,
DR_STE_TUNL_ACTION_L3_DECAP = 3,
DR_STE_TUNL_ACTION_POP_VLAN = 4,
};
enum dr_ste_action_type {
DR_STE_ACTION_TYPE_PUSH_VLAN = 1,
DR_STE_ACTION_TYPE_ENCAP_L3 = 3,
DR_STE_ACTION_TYPE_ENCAP = 4,
};
#include "dr_ste.h"
struct dr_hw_ste_format {
u8 ctrl[DR_STE_SIZE_CTRL];
......@@ -142,7 +45,7 @@ u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
return index;
}
static u16 dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
{
u16 byte_mask = 0;
int i;
......@@ -155,7 +58,7 @@ static u16 dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
return byte_mask;
}
static u8 *mlx5dr_ste_get_tag(u8 *hw_ste_p)
static u8 *dr_ste_get_tag(u8 *hw_ste_p)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
......@@ -169,104 +72,6 @@ void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask)
memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK);
}
void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag)
{
MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer,
DR_STE_ENABLE_FLOW_TAG | flow_tag);
}
void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
{
/* This can be used for both rx_steering_mult and for sx_transmit */
MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_15_0, ctr_id);
MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_23_16, ctr_id >> 16);
}
void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p)
{
MLX5_SET(ste_sx_transmit, hw_ste_p, go_back, 1);
}
void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr,
bool go_back)
{
MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
DR_STE_ACTION_TYPE_PUSH_VLAN);
MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr);
/* Due to HW limitation we need to set this bit, otherwise reforamt +
* push vlan will not work.
*/
if (go_back)
mlx5dr_ste_set_go_back_bit(hw_ste_p);
}
void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id, int size, bool encap_l3)
{
MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
encap_l3 ? DR_STE_ACTION_TYPE_ENCAP_L3 : DR_STE_ACTION_TYPE_ENCAP);
/* The hardware expects here size in words (2 byte) */
MLX5_SET(ste_sx_transmit, hw_ste_p, action_description, size / 2);
MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, reformat_id);
}
void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p)
{
MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
DR_STE_TUNL_ACTION_DECAP);
}
void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p)
{
MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
DR_STE_TUNL_ACTION_POP_VLAN);
}
void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
{
MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
DR_STE_TUNL_ACTION_L3_DECAP);
MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
}
void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type)
{
MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
}
u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p)
{
return MLX5_GET(ste_general, hw_ste_p, entry_type);
}
void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
u32 re_write_index)
{
MLX5_SET(ste_modify_packet, hw_ste_p, number_of_re_write_actions,
num_of_actions);
MLX5_SET(ste_modify_packet, hw_ste_p, header_re_write_actions_pointer,
re_write_index);
}
void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
{
MLX5_SET(ste_general, hw_ste_p, next_table_base_63_48, gvmi);
}
void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type,
u16 gvmi)
{
MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
MLX5_SET(ste_general, hw_ste_p, entry_sub_type, lu_type);
MLX5_SET(ste_general, hw_ste_p, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE);
/* Set GVMI once, this is the same for RX/TX
* bits 63_48 of next table base / miss address encode the next GVMI
*/
MLX5_SET(ste_rx_steering_mult, hw_ste_p, gvmi, gvmi);
MLX5_SET(ste_rx_steering_mult, hw_ste_p, next_table_base_63_48, gvmi);
MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi);
}
static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste)
{
memset(&hw_ste->tag, 0, sizeof(hw_ste->tag));
......@@ -279,21 +84,26 @@ static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste)
hw_ste->mask[0] = 0;
}
u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste)
void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
u8 *hw_ste_p, u64 miss_addr)
{
u64 index =
(MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_31_6) |
MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_39_32) << 26);
return index << 6;
ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
}
void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size)
static void dr_ste_always_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste *ste, u64 miss_addr)
{
u64 index = (icm_addr >> 5) | ht_size;
u8 *hw_ste_p = ste->hw_ste;
MLX5_SET(ste_general, hw_ste, next_table_base_39_32_size, index >> 27);
MLX5_SET(ste_general, hw_ste, next_table_base_31_5_size, index);
ste_ctx->set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
}
void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
u8 *hw_ste, u64 icm_addr, u32 ht_size)
{
ste_ctx->set_hit_addr(hw_ste, icm_addr, ht_size);
}
u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
......@@ -317,15 +127,16 @@ struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
return &ste->htbl->miss_list[index];
}
static void dr_ste_always_hit_htbl(struct mlx5dr_ste *ste,
static void dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste *ste,
struct mlx5dr_ste_htbl *next_htbl)
{
struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
u8 *hw_ste = ste->hw_ste;
MLX5_SET(ste_general, hw_ste, byte_mask, next_htbl->byte_mask);
MLX5_SET(ste_general, hw_ste, next_lu_type, next_htbl->lu_type);
mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
ste_ctx->set_byte_mask(hw_ste, next_htbl->byte_mask);
ste_ctx->set_next_lu_type(hw_ste, next_htbl->lu_type);
ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
dr_ste_set_always_hit((struct dr_hw_ste_format *)ste->hw_ste);
}
......@@ -363,7 +174,8 @@ static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
/* Free ste which is the head and the only one in miss_list */
static void
dr_ste_remove_head_ste(struct mlx5dr_ste *ste,
dr_ste_remove_head_ste(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste *ste,
struct mlx5dr_matcher_rx_tx *nic_matcher,
struct mlx5dr_ste_send_info *ste_info_head,
struct list_head *send_ste_list,
......@@ -380,7 +192,7 @@ dr_ste_remove_head_ste(struct mlx5dr_ste *ste,
*/
memcpy(tmp_ste.hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
miss_addr = nic_matcher->e_anchor->chunk->icm_addr;
mlx5dr_ste_always_miss_addr(&tmp_ste, miss_addr);
dr_ste_always_miss_addr(ste_ctx, &tmp_ste, miss_addr);
memcpy(ste->hw_ste, tmp_ste.hw_ste, DR_STE_SIZE_REDUCED);
list_del_init(&ste->miss_list_node);
......@@ -436,7 +248,8 @@ dr_ste_replace_head_ste(struct mlx5dr_ste *ste, struct mlx5dr_ste *next_ste,
/* Free ste that is located in the middle of the miss list:
* |__| -->|_prev_ste_|->|_ste_|-->|_next_ste_|
*/
static void dr_ste_remove_middle_ste(struct mlx5dr_ste *ste,
static void dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste *ste,
struct mlx5dr_ste_send_info *ste_info,
struct list_head *send_ste_list,
struct mlx5dr_ste_htbl *stats_tbl)
......@@ -448,8 +261,8 @@ static void dr_ste_remove_middle_ste(struct mlx5dr_ste *ste,
if (WARN_ON(!prev_ste))
return;
miss_addr = mlx5dr_ste_get_miss_addr(ste->hw_ste);
mlx5dr_ste_set_miss_addr(prev_ste->hw_ste, miss_addr);
miss_addr = ste_ctx->get_miss_addr(ste->hw_ste);
ste_ctx->set_miss_addr(prev_ste->hw_ste, miss_addr);
mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_REDUCED, 0,
prev_ste->hw_ste, ste_info,
......@@ -467,6 +280,7 @@ void mlx5dr_ste_free(struct mlx5dr_ste *ste,
{
struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_send_info ste_info_head;
struct mlx5dr_ste *next_ste, *first_ste;
bool put_on_origin_table = true;
......@@ -495,7 +309,8 @@ void mlx5dr_ste_free(struct mlx5dr_ste *ste,
if (!next_ste) {
/* One and only entry in the list */
dr_ste_remove_head_ste(ste, nic_matcher,
dr_ste_remove_head_ste(ste_ctx, ste,
nic_matcher,
&ste_info_head,
&send_ste_list,
stats_tbl);
......@@ -506,7 +321,9 @@ void mlx5dr_ste_free(struct mlx5dr_ste *ste,
put_on_origin_table = false;
}
} else { /* Ste in the middle of the list */
dr_ste_remove_middle_ste(ste, &ste_info_head, &send_ste_list, stats_tbl);
dr_ste_remove_middle_ste(ste_ctx, ste,
&ste_info_head, &send_ste_list,
stats_tbl);
}
/* Update HW */
......@@ -530,34 +347,18 @@ bool mlx5dr_ste_equal_tag(void *src, void *dst)
return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG);
}
void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
u8 *hw_ste,
struct mlx5dr_ste_htbl *next_htbl)
{
struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
}
void mlx5dr_ste_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
{
u64 index = miss_addr >> 6;
/* Miss address for TX and RX STEs located in the same offsets */
MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32, index >> 26);
MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6, index);
}
void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr)
{
u8 *hw_ste = ste->hw_ste;
MLX5_SET(ste_rx_steering_mult, hw_ste, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE);
mlx5dr_ste_set_miss_addr(hw_ste, miss_addr);
dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
}
/* Init one ste as a pattern for ste data array */
void mlx5dr_ste_set_formatted_ste(u16 gvmi,
void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
u16 gvmi,
struct mlx5dr_domain_rx_tx *nic_dmn,
struct mlx5dr_ste_htbl *htbl,
u8 *formatted_ste,
......@@ -565,13 +366,13 @@ void mlx5dr_ste_set_formatted_ste(u16 gvmi,
{
struct mlx5dr_ste ste = {};
mlx5dr_ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi);
ste_ctx->ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi);
ste.hw_ste = formatted_ste;
if (connect_info->type == CONNECT_HIT)
dr_ste_always_hit_htbl(&ste, connect_info->hit_next_htbl);
dr_ste_always_hit_htbl(ste_ctx, &ste, connect_info->hit_next_htbl);
else
mlx5dr_ste_always_miss_addr(&ste, connect_info->miss_icm_addr);
dr_ste_always_miss_addr(ste_ctx, &ste, connect_info->miss_icm_addr);
}
int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
......@@ -582,7 +383,8 @@ int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
{
u8 formatted_ste[DR_STE_SIZE] = {};
mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi,
mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
dmn->info.caps.gvmi,
nic_dmn,
htbl,
formatted_ste,
......@@ -597,18 +399,18 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
u8 *cur_hw_ste,
enum mlx5dr_icm_chunk_size log_table_size)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)cur_hw_ste;
struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_htbl_connect_info info;
struct mlx5dr_ste_htbl *next_htbl;
if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
u8 next_lu_type;
u16 next_lu_type;
u16 byte_mask;
next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type);
byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask);
next_lu_type = ste_ctx->get_next_lu_type(cur_hw_ste);
byte_mask = ste_ctx->get_byte_mask(cur_hw_ste);
next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
log_table_size,
......@@ -628,7 +430,8 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
goto free_table;
}
mlx5dr_ste_set_hit_addr_by_next_htbl(cur_hw_ste, next_htbl);
mlx5dr_ste_set_hit_addr_by_next_htbl(ste_ctx,
cur_hw_ste, next_htbl);
ste->next_htbl = next_htbl;
next_htbl->pointing_ste = ste;
}
......@@ -657,7 +460,7 @@ static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl)
struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
enum mlx5dr_icm_chunk_size chunk_size,
u8 lu_type, u16 byte_mask)
u16 lu_type, u16 byte_mask)
{
struct mlx5dr_icm_chunk *chunk;
struct mlx5dr_ste_htbl *htbl;
......@@ -709,6 +512,92 @@ int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
return 0;
}
void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_domain *dmn,
u8 *action_type_set,
u8 *hw_ste_arr,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
{
ste_ctx->set_actions_tx(dmn, action_type_set, hw_ste_arr,
attr, added_stes);
}
void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_domain *dmn,
u8 *action_type_set,
u8 *hw_ste_arr,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
{
ste_ctx->set_actions_rx(dmn, action_type_set, hw_ste_arr,
attr, added_stes);
}
const struct mlx5dr_ste_action_modify_field *
mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field)
{
const struct mlx5dr_ste_action_modify_field *hw_field;
if (sw_field >= ste_ctx->modify_field_arr_sz)
return NULL;
hw_field = &ste_ctx->modify_field_arr[sw_field];
if (!hw_field->end && !hw_field->start)
return NULL;
return hw_field;
}
void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx,
__be64 *hw_action,
u8 hw_field,
u8 shifter,
u8 length,
u32 data)
{
ste_ctx->set_action_set((u8 *)hw_action,
hw_field, shifter, length, data);
}
void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx,
__be64 *hw_action,
u8 hw_field,
u8 shifter,
u8 length,
u32 data)
{
ste_ctx->set_action_add((u8 *)hw_action,
hw_field, shifter, length, data);
}
void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx,
__be64 *hw_action,
u8 dst_hw_field,
u8 dst_shifter,
u8 dst_len,
u8 src_hw_field,
u8 src_shifter)
{
ste_ctx->set_action_copy((u8 *)hw_action,
dst_hw_field, dst_shifter, dst_len,
src_hw_field, src_shifter);
}
int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
void *data, u32 data_sz,
u8 *hw_action, u32 hw_action_sz,
u16 *used_hw_action_num)
{
/* Only Ethernet frame is supported, with VLAN (18) or without (14) */
if (data_sz != HDR_LEN_L2 && data_sz != HDR_LEN_L2_W_VLAN)
return -EINVAL;
return ste_ctx->set_action_decap_l3_list(data, data_sz,
hw_action, hw_action_sz,
used_hw_action_num);
}
int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
u8 match_criteria,
struct mlx5dr_match_param *mask,
......@@ -738,6 +627,7 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
{
struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_build *sb;
int ret, i;
......@@ -748,14 +638,14 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
sb = nic_matcher->ste_builder;
for (i = 0; i < nic_matcher->num_of_builders; i++) {
mlx5dr_ste_init(ste_arr,
sb->lu_type,
nic_dmn->ste_type,
dmn->info.caps.gvmi);
ste_ctx->ste_init(ste_arr,
sb->lu_type,
nic_dmn->ste_type,
dmn->info.caps.gvmi);
mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
ret = sb->ste_build_tag_func(value, sb, mlx5dr_ste_get_tag(ste_arr));
ret = sb->ste_build_tag_func(value, sb, dr_ste_get_tag(ste_arr));
if (ret)
return ret;
......@@ -765,45 +655,14 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
* not relevant for the last ste in the chain.
*/
sb++;
MLX5_SET(ste_general, ste_arr, next_lu_type, sb->lu_type);
MLX5_SET(ste_general, ste_arr, byte_mask, sb->byte_mask);
ste_ctx->set_next_lu_type(ste_arr, sb->lu_type);
ste_ctx->set_byte_mask(ste_arr, sb->byte_mask);
}
ste_arr += DR_STE_SIZE;
}
return 0;
}
static void dr_ste_build_eth_l2_src_des_bit_mask(struct mlx5dr_match_param *value,
bool inner, u8 *bit_mask)
{
struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
if (mask->smac_47_16 || mask->smac_15_0) {
MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_47_32,
mask->smac_47_16 >> 16);
MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_31_0,
mask->smac_47_16 << 16 | mask->smac_15_0);
mask->smac_47_16 = 0;
mask->smac_15_0 = 0;
}
DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_vlan_id, mask, first_vid);
DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_cfi, mask, first_cfi);
DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_priority, mask, first_prio);
DR_STE_SET_MASK(eth_l2_src_dst, bit_mask, l3_type, mask, ip_version);
if (mask->cvlan_tag) {
MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
mask->cvlan_tag = 0;
} else if (mask->svlan_tag) {
MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
mask->svlan_tag = 0;
}
}
static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec)
{
spec->gre_c_present = MLX5_GET(fte_match_set_misc, mask, gre_c_present);
......@@ -1045,566 +904,93 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
}
}
static int dr_ste_build_eth_l2_src_des_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16);
DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0);
if (spec->smac_47_16 || spec->smac_15_0) {
MLX5_SET(ste_eth_l2_src_dst, tag, smac_47_32,
spec->smac_47_16 >> 16);
MLX5_SET(ste_eth_l2_src_dst, tag, smac_31_0,
spec->smac_47_16 << 16 | spec->smac_15_0);
spec->smac_47_16 = 0;
spec->smac_15_0 = 0;
}
if (spec->ip_version) {
if (spec->ip_version == IP_VERSION_IPV4) {
MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV4);
spec->ip_version = 0;
} else if (spec->ip_version == IP_VERSION_IPV6) {
MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV6);
spec->ip_version = 0;
} else {
pr_info("Unsupported ip_version value\n");
return -EINVAL;
}
}
DR_STE_SET_TAG(eth_l2_src_dst, tag, first_vlan_id, spec, first_vid);
DR_STE_SET_TAG(eth_l2_src_dst, tag, first_cfi, spec, first_cfi);
DR_STE_SET_TAG(eth_l2_src_dst, tag, first_priority, spec, first_prio);
if (spec->cvlan_tag) {
MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_CVLAN);
spec->cvlan_tag = 0;
} else if (spec->svlan_tag) {
MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_SVLAN);
spec->svlan_tag = 0;
}
return 0;
}
void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
dr_ste_build_eth_l2_src_des_bit_mask(mask, inner, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, rx, inner);
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_des_tag;
}
static void dr_ste_build_eth_l3_ipv6_dst_bit_mask(struct mlx5dr_match_param *value,
bool inner, u8 *bit_mask)
{
struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_127_96, mask, dst_ip_127_96);
DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_95_64, mask, dst_ip_95_64);
DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_63_32, mask, dst_ip_63_32);
DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_31_0, mask, dst_ip_31_0);
}
static int dr_ste_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
return 0;
ste_ctx->build_eth_l2_src_dst_init(sb, mask);
}
void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
dr_ste_build_eth_l3_ipv6_dst_bit_mask(mask, inner, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_DST, rx, inner);
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_dst_tag;
}
static void dr_ste_build_eth_l3_ipv6_src_bit_mask(struct mlx5dr_match_param *value,
bool inner, u8 *bit_mask)
{
struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_127_96, mask, src_ip_127_96);
DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_95_64, mask, src_ip_95_64);
DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_63_32, mask, src_ip_63_32);
DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_31_0, mask, src_ip_31_0);
}
static int dr_ste_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
return 0;
ste_ctx->build_eth_l3_ipv6_dst_init(sb, mask);
}
void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
dr_ste_build_eth_l3_ipv6_src_bit_mask(mask, inner, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_SRC, rx, inner);
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_src_tag;
}
static void dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(struct mlx5dr_match_param *value,
bool inner,
u8 *bit_mask)
{
struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
destination_address, mask, dst_ip_31_0);
DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
source_address, mask, src_ip_31_0);
DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
destination_port, mask, tcp_dport);
DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
destination_port, mask, udp_dport);
DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
source_port, mask, tcp_sport);
DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
source_port, mask, udp_sport);
DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
protocol, mask, ip_protocol);
DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
fragmented, mask, frag);
DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
dscp, mask, ip_dscp);
DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
ecn, mask, ip_ecn);
if (mask->tcp_flags) {
DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, bit_mask, mask);
mask->tcp_flags = 0;
}
}
static int dr_ste_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, tcp_dport);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, udp_dport);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, tcp_sport);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, udp_sport);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, protocol, spec, ip_protocol);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, fragmented, spec, frag);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, dscp, spec, ip_dscp);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, ecn, spec, ip_ecn);
if (spec->tcp_flags) {
DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, tag, spec);
spec->tcp_flags = 0;
}
return 0;
ste_ctx->build_eth_l3_ipv6_src_init(sb, mask);
}
void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(mask, inner, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_5_TUPLE, rx, inner);
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_5_tuple_tag;
}
static void
dr_ste_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
bool inner, u8 *bit_mask)
{
struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
struct mlx5dr_match_misc *misc_mask = &value->misc;
DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid);
DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_cfi, mask, first_cfi);
DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_priority, mask, first_prio);
DR_STE_SET_MASK_V(eth_l2_src, bit_mask, ip_fragmented, mask, frag);
DR_STE_SET_MASK_V(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype);
DR_STE_SET_MASK(eth_l2_src, bit_mask, l3_type, mask, ip_version);
if (mask->svlan_tag || mask->cvlan_tag) {
MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1);
mask->cvlan_tag = 0;
mask->svlan_tag = 0;
}
if (inner) {
if (misc_mask->inner_second_cvlan_tag ||
misc_mask->inner_second_svlan_tag) {
MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
misc_mask->inner_second_cvlan_tag = 0;
misc_mask->inner_second_svlan_tag = 0;
}
DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
second_vlan_id, misc_mask, inner_second_vid);
DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
second_cfi, misc_mask, inner_second_cfi);
DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
second_priority, misc_mask, inner_second_prio);
} else {
if (misc_mask->outer_second_cvlan_tag ||
misc_mask->outer_second_svlan_tag) {
MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
misc_mask->outer_second_cvlan_tag = 0;
misc_mask->outer_second_svlan_tag = 0;
}
DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
second_vlan_id, misc_mask, outer_second_vid);
DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
second_cfi, misc_mask, outer_second_cfi);
DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
second_priority, misc_mask, outer_second_prio);
}
}
static int dr_ste_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
bool inner, u8 *tag)
{
struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
struct mlx5dr_match_misc *misc_spec = &value->misc;
DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid);
DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi);
DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio);
DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag);
DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype);
if (spec->ip_version) {
if (spec->ip_version == IP_VERSION_IPV4) {
MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4);
spec->ip_version = 0;
} else if (spec->ip_version == IP_VERSION_IPV6) {
MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6);
spec->ip_version = 0;
} else {
pr_info("Unsupported ip_version value\n");
return -EINVAL;
}
}
if (spec->cvlan_tag) {
MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN);
spec->cvlan_tag = 0;
} else if (spec->svlan_tag) {
MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN);
spec->svlan_tag = 0;
}
if (inner) {
if (misc_spec->inner_second_cvlan_tag) {
MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
misc_spec->inner_second_cvlan_tag = 0;
} else if (misc_spec->inner_second_svlan_tag) {
MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
misc_spec->inner_second_svlan_tag = 0;
}
DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_vid);
DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi);
DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second_prio);
} else {
if (misc_spec->outer_second_cvlan_tag) {
MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
misc_spec->outer_second_cvlan_tag = 0;
} else if (misc_spec->outer_second_svlan_tag) {
MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
misc_spec->outer_second_svlan_tag = 0;
}
DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_vid);
DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi);
DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second_prio);
}
return 0;
}
static void dr_ste_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
bool inner, u8 *bit_mask)
{
struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_47_16, mask, smac_47_16);
DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_15_0, mask, smac_15_0);
dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
}
static int dr_ste_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16);
DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0);
return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
ste_ctx->build_eth_l3_ipv4_5_tuple_init(sb, mask);
}
void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
dr_ste_build_eth_l2_src_bit_mask(mask, inner, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC, rx, inner);
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_tag;
}
static void dr_ste_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
bool inner, u8 *bit_mask)
{
struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
}
static int dr_ste_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16);
DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0);
return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
ste_ctx->build_eth_l2_src_init(sb, mask);
}
void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
dr_ste_build_eth_l2_dst_bit_mask(mask, inner, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_DST, rx, inner);
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_eth_l2_dst_tag;
}
static void dr_ste_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
bool inner, u8 *bit_mask)
{
struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
struct mlx5dr_match_misc *misc = &value->misc;
DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_47_16, mask, dmac_47_16);
DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_15_0, mask, dmac_15_0);
DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_vlan_id, mask, first_vid);
DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_cfi, mask, first_cfi);
DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_priority, mask, first_prio);
DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, ip_fragmented, mask, frag);
DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, l3_ethertype, mask, ethertype);
DR_STE_SET_MASK(eth_l2_tnl, bit_mask, l3_type, mask, ip_version);
if (misc->vxlan_vni) {
MLX5_SET(ste_eth_l2_tnl, bit_mask,
l2_tunneling_network_id, (misc->vxlan_vni << 8));
misc->vxlan_vni = 0;
}
if (mask->svlan_tag || mask->cvlan_tag) {
MLX5_SET(ste_eth_l2_tnl, bit_mask, first_vlan_qualifier, -1);
mask->cvlan_tag = 0;
mask->svlan_tag = 0;
}
ste_ctx->build_eth_l2_dst_init(sb, mask);
}
static int dr_ste_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
struct mlx5dr_match_misc *misc = &value->misc;
DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16);
DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0);
DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid);
DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi);
DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag);
DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio);
DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype);
if (misc->vxlan_vni) {
MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id,
(misc->vxlan_vni << 8));
misc->vxlan_vni = 0;
}
if (spec->cvlan_tag) {
MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN);
spec->cvlan_tag = 0;
} else if (spec->svlan_tag) {
MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN);
spec->svlan_tag = 0;
}
if (spec->ip_version) {
if (spec->ip_version == IP_VERSION_IPV4) {
MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4);
spec->ip_version = 0;
} else if (spec->ip_version == IP_VERSION_IPV6) {
MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6);
spec->ip_version = 0;
} else {
return -EINVAL;
}
}
return 0;
}
void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, bool inner, bool rx)
{
dr_ste_build_eth_l2_tnl_bit_mask(mask, inner, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
sb->lu_type = MLX5DR_STE_LU_TYPE_ETHL2_TUNNELING_I;
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_eth_l2_tnl_tag;
}
static void dr_ste_build_eth_l3_ipv4_misc_bit_mask(struct mlx5dr_match_param *value,
bool inner, u8 *bit_mask)
{
struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
DR_STE_SET_MASK_V(eth_l3_ipv4_misc, bit_mask, time_to_live, mask, ttl_hoplimit);
}
static int dr_ste_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
return 0;
ste_ctx->build_eth_l2_tnl_init(sb, mask);
}
void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
dr_ste_build_eth_l3_ipv4_misc_bit_mask(mask, inner, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_MISC, rx, inner);
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_misc_tag;
}
static void dr_ste_build_ipv6_l3_l4_bit_mask(struct mlx5dr_match_param *value,
bool inner, u8 *bit_mask)
{
struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, tcp_dport);
DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, tcp_sport);
DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, udp_dport);
DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, udp_sport);
DR_STE_SET_MASK_V(eth_l4, bit_mask, protocol, mask, ip_protocol);
DR_STE_SET_MASK_V(eth_l4, bit_mask, fragmented, mask, frag);
DR_STE_SET_MASK_V(eth_l4, bit_mask, dscp, mask, ip_dscp);
DR_STE_SET_MASK_V(eth_l4, bit_mask, ecn, mask, ip_ecn);
DR_STE_SET_MASK_V(eth_l4, bit_mask, ipv6_hop_limit, mask, ttl_hoplimit);
if (mask->tcp_flags) {
DR_STE_SET_TCP_FLAGS(eth_l4, bit_mask, mask);
mask->tcp_flags = 0;
}
ste_ctx->build_eth_l3_ipv4_misc_init(sb, mask);
}
static int dr_ste_build_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, udp_dport);
DR_STE_SET_TAG(eth_l4, tag, src_port, spec, udp_sport);
DR_STE_SET_TAG(eth_l4, tag, protocol, spec, ip_protocol);
DR_STE_SET_TAG(eth_l4, tag, fragmented, spec, frag);
DR_STE_SET_TAG(eth_l4, tag, dscp, spec, ip_dscp);
DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn);
DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit);
if (spec->tcp_flags) {
DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec);
spec->tcp_flags = 0;
}
return 0;
}
void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
dr_ste_build_ipv6_l3_l4_bit_mask(mask, inner, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4, rx, inner);
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_ipv6_l3_l4_tag;
ste_ctx->build_eth_ipv6_l3_l4_init(sb, mask);
}
static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value,
......@@ -1622,653 +1008,110 @@ void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx)
sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag;
}
static void dr_ste_build_mpls_bit_mask(struct mlx5dr_match_param *value,
bool inner, u8 *bit_mask)
{
struct mlx5dr_match_misc2 *misc2_mask = &value->misc2;
if (inner)
DR_STE_SET_MPLS_MASK(mpls, misc2_mask, inner, bit_mask);
else
DR_STE_SET_MPLS_MASK(mpls, misc2_mask, outer, bit_mask);
}
static int dr_ste_build_mpls_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc2 *misc2_mask = &value->misc2;
if (sb->inner)
DR_STE_SET_MPLS_TAG(mpls, misc2_mask, inner, tag);
else
DR_STE_SET_MPLS_TAG(mpls, misc2_mask, outer, tag);
return 0;
}
void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
dr_ste_build_mpls_bit_mask(mask, inner, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
sb->lu_type = DR_STE_CALC_LU_TYPE(MPLS_FIRST, rx, inner);
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_mpls_tag;
}
static void dr_ste_build_gre_bit_mask(struct mlx5dr_match_param *value,
bool inner, u8 *bit_mask)
{
struct mlx5dr_match_misc *misc_mask = &value->misc;
DR_STE_SET_MASK_V(gre, bit_mask, gre_protocol, misc_mask, gre_protocol);
DR_STE_SET_MASK_V(gre, bit_mask, gre_k_present, misc_mask, gre_k_present);
DR_STE_SET_MASK_V(gre, bit_mask, gre_key_h, misc_mask, gre_key_h);
DR_STE_SET_MASK_V(gre, bit_mask, gre_key_l, misc_mask, gre_key_l);
DR_STE_SET_MASK_V(gre, bit_mask, gre_c_present, misc_mask, gre_c_present);
DR_STE_SET_MASK_V(gre, bit_mask, gre_s_present, misc_mask, gre_s_present);
ste_ctx->build_mpls_init(sb, mask);
}
static int dr_ste_build_gre_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
struct mlx5dr_match_misc *misc = &value->misc;
DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol);
DR_STE_SET_TAG(gre, tag, gre_k_present, misc, gre_k_present);
DR_STE_SET_TAG(gre, tag, gre_key_h, misc, gre_key_h);
DR_STE_SET_TAG(gre, tag, gre_key_l, misc, gre_key_l);
DR_STE_SET_TAG(gre, tag, gre_c_present, misc, gre_c_present);
DR_STE_SET_TAG(gre, tag, gre_s_present, misc, gre_s_present);
return 0;
}
void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, bool inner, bool rx)
{
dr_ste_build_gre_bit_mask(mask, inner, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
sb->lu_type = MLX5DR_STE_LU_TYPE_GRE;
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_gre_tag;
}
static void dr_ste_build_flex_parser_0_bit_mask(struct mlx5dr_match_param *value,
bool inner, u8 *bit_mask)
{
struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) {
DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label,
misc_2_mask, outer_first_mpls_over_gre_label);
DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp,
misc_2_mask, outer_first_mpls_over_gre_exp);
DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos,
misc_2_mask, outer_first_mpls_over_gre_s_bos);
DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl,
misc_2_mask, outer_first_mpls_over_gre_ttl);
} else {
DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label,
misc_2_mask, outer_first_mpls_over_udp_label);
DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp,
misc_2_mask, outer_first_mpls_over_udp_exp);
DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos,
misc_2_mask, outer_first_mpls_over_udp_s_bos);
DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl,
misc_2_mask, outer_first_mpls_over_udp_ttl);
}
}
static int dr_ste_build_flex_parser_0_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) {
DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
misc_2_mask, outer_first_mpls_over_gre_label);
DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
misc_2_mask, outer_first_mpls_over_gre_exp);
DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
misc_2_mask, outer_first_mpls_over_gre_s_bos);
DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
misc_2_mask, outer_first_mpls_over_gre_ttl);
} else {
DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
misc_2_mask, outer_first_mpls_over_udp_label);
DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
misc_2_mask, outer_first_mpls_over_udp_exp);
DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
misc_2_mask, outer_first_mpls_over_udp_s_bos);
DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
misc_2_mask, outer_first_mpls_over_udp_ttl);
}
return 0;
ste_ctx->build_tnl_gre_init(sb, mask);
}
void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
dr_ste_build_flex_parser_0_bit_mask(mask, inner, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_0;
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_flex_parser_0_tag;
}
#define ICMP_TYPE_OFFSET_FIRST_DW 24
#define ICMP_CODE_OFFSET_FIRST_DW 16
#define ICMP_HEADER_DATA_OFFSET_SECOND_DW 0
static int dr_ste_build_flex_parser_1_bit_mask(struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
u8 *bit_mask)
{
bool is_ipv4_mask = DR_MASK_IS_ICMPV4_SET(&mask->misc3);
struct mlx5dr_match_misc3 *misc_3_mask = &mask->misc3;
u32 icmp_header_data_mask;
u32 icmp_type_mask;
u32 icmp_code_mask;
int dw0_location;
int dw1_location;
if (is_ipv4_mask) {
icmp_header_data_mask = misc_3_mask->icmpv4_header_data;
icmp_type_mask = misc_3_mask->icmpv4_type;
icmp_code_mask = misc_3_mask->icmpv4_code;
dw0_location = caps->flex_parser_id_icmp_dw0;
dw1_location = caps->flex_parser_id_icmp_dw1;
} else {
icmp_header_data_mask = misc_3_mask->icmpv6_header_data;
icmp_type_mask = misc_3_mask->icmpv6_type;
icmp_code_mask = misc_3_mask->icmpv6_code;
dw0_location = caps->flex_parser_id_icmpv6_dw0;
dw1_location = caps->flex_parser_id_icmpv6_dw1;
}
switch (dw0_location) {
case 4:
if (icmp_type_mask) {
MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4,
(icmp_type_mask << ICMP_TYPE_OFFSET_FIRST_DW));
if (is_ipv4_mask)
misc_3_mask->icmpv4_type = 0;
else
misc_3_mask->icmpv6_type = 0;
}
if (icmp_code_mask) {
u32 cur_val = MLX5_GET(ste_flex_parser_1, bit_mask,
flex_parser_4);
MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4,
cur_val | (icmp_code_mask << ICMP_CODE_OFFSET_FIRST_DW));
if (is_ipv4_mask)
misc_3_mask->icmpv4_code = 0;
else
misc_3_mask->icmpv6_code = 0;
}
break;
default:
return -EINVAL;
}
switch (dw1_location) {
case 5:
if (icmp_header_data_mask) {
MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_5,
(icmp_header_data_mask << ICMP_HEADER_DATA_OFFSET_SECOND_DW));
if (is_ipv4_mask)
misc_3_mask->icmpv4_header_data = 0;
else
misc_3_mask->icmpv6_header_data = 0;
}
break;
default:
return -EINVAL;
}
return 0;
ste_ctx->build_tnl_mpls_init(sb, mask);
}
static int dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc3 *misc_3 = &value->misc3;
u32 icmp_header_data;
int dw0_location;
int dw1_location;
u32 icmp_type;
u32 icmp_code;
bool is_ipv4;
is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc_3);
if (is_ipv4) {
icmp_header_data = misc_3->icmpv4_header_data;
icmp_type = misc_3->icmpv4_type;
icmp_code = misc_3->icmpv4_code;
dw0_location = sb->caps->flex_parser_id_icmp_dw0;
dw1_location = sb->caps->flex_parser_id_icmp_dw1;
} else {
icmp_header_data = misc_3->icmpv6_header_data;
icmp_type = misc_3->icmpv6_type;
icmp_code = misc_3->icmpv6_code;
dw0_location = sb->caps->flex_parser_id_icmpv6_dw0;
dw1_location = sb->caps->flex_parser_id_icmpv6_dw1;
}
switch (dw0_location) {
case 4:
if (icmp_type) {
MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
(icmp_type << ICMP_TYPE_OFFSET_FIRST_DW));
if (is_ipv4)
misc_3->icmpv4_type = 0;
else
misc_3->icmpv6_type = 0;
}
if (icmp_code) {
u32 cur_val = MLX5_GET(ste_flex_parser_1, tag,
flex_parser_4);
MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
cur_val | (icmp_code << ICMP_CODE_OFFSET_FIRST_DW));
if (is_ipv4)
misc_3->icmpv4_code = 0;
else
misc_3->icmpv6_code = 0;
}
break;
default:
return -EINVAL;
}
switch (dw1_location) {
case 5:
if (icmp_header_data) {
MLX5_SET(ste_flex_parser_1, tag, flex_parser_5,
(icmp_header_data << ICMP_HEADER_DATA_OFFSET_SECOND_DW));
if (is_ipv4)
misc_3->icmpv4_header_data = 0;
else
misc_3->icmpv6_header_data = 0;
}
break;
default:
return -EINVAL;
}
return 0;
}
int mlx5dr_ste_build_icmp(struct mlx5dr_ste_build *sb,
int mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx)
{
int ret;
ret = dr_ste_build_flex_parser_1_bit_mask(mask, caps, sb->bit_mask);
if (ret)
return ret;
sb->rx = rx;
sb->inner = inner;
sb->caps = caps;
sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_1;
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_flex_parser_1_tag;
return 0;
}
static void dr_ste_build_general_purpose_bit_mask(struct mlx5dr_match_param *value,
bool inner, u8 *bit_mask)
{
struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
DR_STE_SET_MASK_V(general_purpose, bit_mask,
general_purpose_lookup_field, misc_2_mask,
metadata_reg_a);
}
static int dr_ste_build_general_purpose_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
misc_2_mask, metadata_reg_a);
return 0;
return ste_ctx->build_icmp_init(sb, mask);
}
void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
dr_ste_build_general_purpose_bit_mask(mask, inner, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
sb->lu_type = MLX5DR_STE_LU_TYPE_GENERAL_PURPOSE;
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_general_purpose_tag;
}
static void dr_ste_build_eth_l4_misc_bit_mask(struct mlx5dr_match_param *value,
bool inner, u8 *bit_mask)
{
struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3;
if (inner) {
DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask,
inner_tcp_seq_num);
DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask,
inner_tcp_ack_num);
} else {
DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask,
outer_tcp_seq_num);
DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask,
outer_tcp_ack_num);
}
ste_ctx->build_general_purpose_init(sb, mask);
}
static int dr_ste_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc3 *misc3 = &value->misc3;
if (sb->inner) {
DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num);
DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, inner_tcp_ack_num);
} else {
DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, outer_tcp_seq_num);
DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, outer_tcp_ack_num);
}
return 0;
}
void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
dr_ste_build_eth_l4_misc_bit_mask(mask, inner, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4_MISC, rx, inner);
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_eth_l4_misc_tag;
ste_ctx->build_eth_l4_misc_init(sb, mask);
}
static void
dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(struct mlx5dr_match_param *value,
bool inner, u8 *bit_mask)
{
struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3;
DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
outer_vxlan_gpe_flags,
misc_3_mask, outer_vxlan_gpe_flags);
DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
outer_vxlan_gpe_next_protocol,
misc_3_mask, outer_vxlan_gpe_next_protocol);
DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
outer_vxlan_gpe_vni,
misc_3_mask, outer_vxlan_gpe_vni);
}
static int
dr_ste_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc3 *misc3 = &value->misc3;
DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
outer_vxlan_gpe_flags, misc3,
outer_vxlan_gpe_flags);
DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
outer_vxlan_gpe_next_protocol, misc3,
outer_vxlan_gpe_next_protocol);
DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
outer_vxlan_gpe_vni, misc3,
outer_vxlan_gpe_vni);
return 0;
}
void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(mask, inner,
sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER;
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_vxlan_gpe_tag;
ste_ctx->build_tnl_vxlan_gpe_init(sb, mask);
}
static void
dr_ste_build_flex_parser_tnl_geneve_bit_mask(struct mlx5dr_match_param *value,
u8 *bit_mask)
{
struct mlx5dr_match_misc *misc_mask = &value->misc;
DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
geneve_protocol_type,
misc_mask, geneve_protocol_type);
DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
geneve_oam,
misc_mask, geneve_oam);
DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
geneve_opt_len,
misc_mask, geneve_opt_len);
DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
geneve_vni,
misc_mask, geneve_vni);
}
static int
dr_ste_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc *misc = &value->misc;
DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
geneve_protocol_type, misc, geneve_protocol_type);
DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
geneve_oam, misc, geneve_oam);
DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
geneve_opt_len, misc, geneve_opt_len);
DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
geneve_vni, misc, geneve_vni);
return 0;
}
void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
dr_ste_build_flex_parser_tnl_geneve_bit_mask(mask, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER;
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_geneve_tag;
ste_ctx->build_tnl_geneve_init(sb, mask);
}
static void dr_ste_build_register_0_bit_mask(struct mlx5dr_match_param *value,
u8 *bit_mask)
{
struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
DR_STE_SET_MASK_V(register_0, bit_mask, register_0_h,
misc_2_mask, metadata_reg_c_0);
DR_STE_SET_MASK_V(register_0, bit_mask, register_0_l,
misc_2_mask, metadata_reg_c_1);
DR_STE_SET_MASK_V(register_0, bit_mask, register_1_h,
misc_2_mask, metadata_reg_c_2);
DR_STE_SET_MASK_V(register_0, bit_mask, register_1_l,
misc_2_mask, metadata_reg_c_3);
}
static int dr_ste_build_register_0_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc2 *misc2 = &value->misc2;
DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
return 0;
}
void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
dr_ste_build_register_0_bit_mask(mask, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_0;
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_register_0_tag;
}
static void dr_ste_build_register_1_bit_mask(struct mlx5dr_match_param *value,
u8 *bit_mask)
{
struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
DR_STE_SET_MASK_V(register_1, bit_mask, register_2_h,
misc_2_mask, metadata_reg_c_4);
DR_STE_SET_MASK_V(register_1, bit_mask, register_2_l,
misc_2_mask, metadata_reg_c_5);
DR_STE_SET_MASK_V(register_1, bit_mask, register_3_h,
misc_2_mask, metadata_reg_c_6);
DR_STE_SET_MASK_V(register_1, bit_mask, register_3_l,
misc_2_mask, metadata_reg_c_7);
ste_ctx->build_register_0_init(sb, mask);
}
static int dr_ste_build_register_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc2 *misc2 = &value->misc2;
DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
return 0;
}
void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
dr_ste_build_register_1_bit_mask(mask, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_1;
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_register_1_tag;
ste_ctx->build_register_1_init(sb, mask);
}
static void dr_ste_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
u8 *bit_mask)
{
struct mlx5dr_match_misc *misc_mask = &value->misc;
DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
misc_mask->source_eswitch_owner_vhca_id = 0;
}
static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc *misc = &value->misc;
struct mlx5dr_cmd_vport_cap *vport_cap;
struct mlx5dr_domain *dmn = sb->dmn;
struct mlx5dr_cmd_caps *caps;
u8 *bit_mask = sb->bit_mask;
bool source_gvmi_set;
DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
if (sb->vhca_id_valid) {
/* Find port GVMI based on the eswitch_owner_vhca_id */
if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
caps = &dmn->info.caps;
else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
dmn->peer_dmn->info.caps.gvmi))
caps = &dmn->peer_dmn->info.caps;
else
return -EINVAL;
} else {
caps = &dmn->info.caps;
}
vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
if (!vport_cap)
return -EINVAL;
source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
if (vport_cap->vport_gvmi && source_gvmi_set)
MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
misc->source_eswitch_owner_vhca_id = 0;
misc->source_port = 0;
return 0;
}
void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn,
bool inner, bool rx)
......@@ -2276,12 +1119,21 @@ void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
/* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */
sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
dr_ste_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
sb->rx = rx;
sb->dmn = dmn;
sb->inner = inner;
sb->lu_type = MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP;
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_src_gvmi_qpn_tag;
ste_ctx->build_src_gvmi_qpn_init(sb, mask);
}
static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = {
[MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0,
[MLX5_STEERING_FORMAT_CONNECTX_6DX] = NULL,
};
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
{
if (version > MLX5_STEERING_FORMAT_CONNECTX_6DX)
return NULL;
return mlx5dr_ste_ctx_arr[version];
}
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
#ifndef _DR_STE_
#define _DR_STE_
#include "dr_types.h"
#define STE_IPV4 0x1
#define STE_IPV6 0x2
#define STE_TCP 0x1
#define STE_UDP 0x2
#define STE_SPI 0x3
#define IP_VERSION_IPV4 0x4
#define IP_VERSION_IPV6 0x6
#define STE_SVLAN 0x1
#define STE_CVLAN 0x2
#define HDR_LEN_L2_MACS 0xC
#define HDR_LEN_L2_VLAN 0x4
#define HDR_LEN_L2_ETHER 0x2
#define HDR_LEN_L2 (HDR_LEN_L2_MACS + HDR_LEN_L2_ETHER)
#define HDR_LEN_L2_W_VLAN (HDR_LEN_L2 + HDR_LEN_L2_VLAN)
/* Set to STE a specific value using DR_STE_SET */
#define DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, value) do { \
if ((spec)->s_fname) { \
MLX5_SET(ste_##lookup_type, tag, t_fname, value); \
(spec)->s_fname = 0; \
} \
} while (0)
/* Set to STE spec->s_fname to tag->t_fname set spec->s_fname as used */
#define DR_STE_SET_TAG(lookup_type, tag, t_fname, spec, s_fname) \
DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, spec->s_fname)
/* Set to STE -1 to tag->t_fname and set spec->s_fname as used */
#define DR_STE_SET_ONES(lookup_type, tag, t_fname, spec, s_fname) \
DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, -1)
#define DR_STE_SET_TCP_FLAGS(lookup_type, tag, spec) do { \
MLX5_SET(ste_##lookup_type, tag, tcp_ns, !!((spec)->tcp_flags & (1 << 8))); \
MLX5_SET(ste_##lookup_type, tag, tcp_cwr, !!((spec)->tcp_flags & (1 << 7))); \
MLX5_SET(ste_##lookup_type, tag, tcp_ece, !!((spec)->tcp_flags & (1 << 6))); \
MLX5_SET(ste_##lookup_type, tag, tcp_urg, !!((spec)->tcp_flags & (1 << 5))); \
MLX5_SET(ste_##lookup_type, tag, tcp_ack, !!((spec)->tcp_flags & (1 << 4))); \
MLX5_SET(ste_##lookup_type, tag, tcp_psh, !!((spec)->tcp_flags & (1 << 3))); \
MLX5_SET(ste_##lookup_type, tag, tcp_rst, !!((spec)->tcp_flags & (1 << 2))); \
MLX5_SET(ste_##lookup_type, tag, tcp_syn, !!((spec)->tcp_flags & (1 << 1))); \
MLX5_SET(ste_##lookup_type, tag, tcp_fin, !!((spec)->tcp_flags & (1 << 0))); \
} while (0)
#define DR_STE_SET_MPLS(lookup_type, mask, in_out, tag) do { \
struct mlx5dr_match_misc2 *_mask = mask; \
u8 *_tag = tag; \
DR_STE_SET_TAG(lookup_type, _tag, mpls0_label, _mask, \
in_out##_first_mpls_label);\
DR_STE_SET_TAG(lookup_type, _tag, mpls0_s_bos, _mask, \
in_out##_first_mpls_s_bos); \
DR_STE_SET_TAG(lookup_type, _tag, mpls0_exp, _mask, \
in_out##_first_mpls_exp); \
DR_STE_SET_TAG(lookup_type, _tag, mpls0_ttl, _mask, \
in_out##_first_mpls_ttl); \
} while (0)
#define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
(_misc)->outer_first_mpls_over_gre_label || \
(_misc)->outer_first_mpls_over_gre_exp || \
(_misc)->outer_first_mpls_over_gre_s_bos || \
(_misc)->outer_first_mpls_over_gre_ttl)
#define DR_STE_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
(_misc)->outer_first_mpls_over_udp_label || \
(_misc)->outer_first_mpls_over_udp_exp || \
(_misc)->outer_first_mpls_over_udp_s_bos || \
(_misc)->outer_first_mpls_over_udp_ttl)
enum dr_ste_action_modify_type_l3 {
DR_STE_ACTION_MDFY_TYPE_L3_NONE = 0x0,
DR_STE_ACTION_MDFY_TYPE_L3_IPV4 = 0x1,
DR_STE_ACTION_MDFY_TYPE_L3_IPV6 = 0x2,
};
enum dr_ste_action_modify_type_l4 {
DR_STE_ACTION_MDFY_TYPE_L4_NONE = 0x0,
DR_STE_ACTION_MDFY_TYPE_L4_TCP = 0x1,
DR_STE_ACTION_MDFY_TYPE_L4_UDP = 0x2,
};
u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask);
#define DR_STE_CTX_BUILDER(fname) \
((*build_##fname##_init)(struct mlx5dr_ste_build *sb, \
struct mlx5dr_match_param *mask))
struct mlx5dr_ste_ctx {
/* Builders */
void DR_STE_CTX_BUILDER(eth_l2_src_dst);
void DR_STE_CTX_BUILDER(eth_l3_ipv6_src);
void DR_STE_CTX_BUILDER(eth_l3_ipv6_dst);
void DR_STE_CTX_BUILDER(eth_l3_ipv4_5_tuple);
void DR_STE_CTX_BUILDER(eth_l2_src);
void DR_STE_CTX_BUILDER(eth_l2_dst);
void DR_STE_CTX_BUILDER(eth_l2_tnl);
void DR_STE_CTX_BUILDER(eth_l3_ipv4_misc);
void DR_STE_CTX_BUILDER(eth_ipv6_l3_l4);
void DR_STE_CTX_BUILDER(mpls);
void DR_STE_CTX_BUILDER(tnl_gre);
void DR_STE_CTX_BUILDER(tnl_mpls);
int DR_STE_CTX_BUILDER(icmp);
void DR_STE_CTX_BUILDER(general_purpose);
void DR_STE_CTX_BUILDER(eth_l4_misc);
void DR_STE_CTX_BUILDER(tnl_vxlan_gpe);
void DR_STE_CTX_BUILDER(tnl_geneve);
void DR_STE_CTX_BUILDER(register_0);
void DR_STE_CTX_BUILDER(register_1);
void DR_STE_CTX_BUILDER(src_gvmi_qpn);
/* Getters and Setters */
void (*ste_init)(u8 *hw_ste_p, u16 lu_type,
u8 entry_type, u16 gvmi);
void (*set_next_lu_type)(u8 *hw_ste_p, u16 lu_type);
u16 (*get_next_lu_type)(u8 *hw_ste_p);
void (*set_miss_addr)(u8 *hw_ste_p, u64 miss_addr);
u64 (*get_miss_addr)(u8 *hw_ste_p);
void (*set_hit_addr)(u8 *hw_ste_p, u64 icm_addr, u32 ht_size);
void (*set_byte_mask)(u8 *hw_ste_p, u16 byte_mask);
u16 (*get_byte_mask)(u8 *hw_ste_p);
/* Actions */
void (*set_actions_rx)(struct mlx5dr_domain *dmn,
u8 *action_type_set,
u8 *hw_ste_arr,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes);
void (*set_actions_tx)(struct mlx5dr_domain *dmn,
u8 *action_type_set,
u8 *hw_ste_arr,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes);
u32 modify_field_arr_sz;
const struct mlx5dr_ste_action_modify_field *modify_field_arr;
void (*set_action_set)(u8 *hw_action,
u8 hw_field,
u8 shifter,
u8 length,
u32 data);
void (*set_action_add)(u8 *hw_action,
u8 hw_field,
u8 shifter,
u8 length,
u32 data);
void (*set_action_copy)(u8 *hw_action,
u8 dst_hw_field,
u8 dst_shifter,
u8 dst_len,
u8 src_hw_field,
u8 src_shifter);
int (*set_action_decap_l3_list)(void *data,
u32 data_sz,
u8 *hw_action,
u32 hw_action_sz,
u16 *used_hw_action_num);
};
extern struct mlx5dr_ste_ctx ste_ctx_v0;
#endif /* _DR_STE_ */
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
#include <linux/types.h>
#include <linux/crc32.h>
#include "dr_ste.h"
#define SVLAN_ETHERTYPE 0x88a8
#define DR_STE_ENABLE_FLOW_TAG BIT(31)
enum dr_ste_v0_action_tunl {
DR_STE_TUNL_ACTION_NONE = 0,
DR_STE_TUNL_ACTION_ENABLE = 1,
DR_STE_TUNL_ACTION_DECAP = 2,
DR_STE_TUNL_ACTION_L3_DECAP = 3,
DR_STE_TUNL_ACTION_POP_VLAN = 4,
};
enum dr_ste_v0_action_type {
DR_STE_ACTION_TYPE_PUSH_VLAN = 1,
DR_STE_ACTION_TYPE_ENCAP_L3 = 3,
DR_STE_ACTION_TYPE_ENCAP = 4,
};
enum dr_ste_v0_action_mdfy_op {
DR_STE_ACTION_MDFY_OP_COPY = 0x1,
DR_STE_ACTION_MDFY_OP_SET = 0x2,
DR_STE_ACTION_MDFY_OP_ADD = 0x3,
};
#define DR_STE_CALC_LU_TYPE(lookup_type, rx, inner) \
((inner) ? DR_STE_V0_LU_TYPE_##lookup_type##_I : \
(rx) ? DR_STE_V0_LU_TYPE_##lookup_type##_D : \
DR_STE_V0_LU_TYPE_##lookup_type##_O)
enum {
DR_STE_V0_LU_TYPE_NOP = 0x00,
DR_STE_V0_LU_TYPE_SRC_GVMI_AND_QP = 0x05,
DR_STE_V0_LU_TYPE_ETHL2_TUNNELING_I = 0x0a,
DR_STE_V0_LU_TYPE_ETHL2_DST_O = 0x06,
DR_STE_V0_LU_TYPE_ETHL2_DST_I = 0x07,
DR_STE_V0_LU_TYPE_ETHL2_DST_D = 0x1b,
DR_STE_V0_LU_TYPE_ETHL2_SRC_O = 0x08,
DR_STE_V0_LU_TYPE_ETHL2_SRC_I = 0x09,
DR_STE_V0_LU_TYPE_ETHL2_SRC_D = 0x1c,
DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_O = 0x36,
DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_I = 0x37,
DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_D = 0x38,
DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_O = 0x0d,
DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_I = 0x0e,
DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_D = 0x1e,
DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_O = 0x0f,
DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_I = 0x10,
DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_D = 0x1f,
DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x11,
DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x12,
DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_D = 0x20,
DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_O = 0x29,
DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_I = 0x2a,
DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_D = 0x2b,
DR_STE_V0_LU_TYPE_ETHL4_O = 0x13,
DR_STE_V0_LU_TYPE_ETHL4_I = 0x14,
DR_STE_V0_LU_TYPE_ETHL4_D = 0x21,
DR_STE_V0_LU_TYPE_ETHL4_MISC_O = 0x2c,
DR_STE_V0_LU_TYPE_ETHL4_MISC_I = 0x2d,
DR_STE_V0_LU_TYPE_ETHL4_MISC_D = 0x2e,
DR_STE_V0_LU_TYPE_MPLS_FIRST_O = 0x15,
DR_STE_V0_LU_TYPE_MPLS_FIRST_I = 0x24,
DR_STE_V0_LU_TYPE_MPLS_FIRST_D = 0x25,
DR_STE_V0_LU_TYPE_GRE = 0x16,
DR_STE_V0_LU_TYPE_FLEX_PARSER_0 = 0x22,
DR_STE_V0_LU_TYPE_FLEX_PARSER_1 = 0x23,
DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x19,
DR_STE_V0_LU_TYPE_GENERAL_PURPOSE = 0x18,
DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0 = 0x2f,
DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1 = 0x30,
DR_STE_V0_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
};
enum {
DR_STE_V0_ACTION_MDFY_FLD_L2_0 = 0,
DR_STE_V0_ACTION_MDFY_FLD_L2_1 = 1,
DR_STE_V0_ACTION_MDFY_FLD_L2_2 = 2,
DR_STE_V0_ACTION_MDFY_FLD_L3_0 = 3,
DR_STE_V0_ACTION_MDFY_FLD_L3_1 = 4,
DR_STE_V0_ACTION_MDFY_FLD_L3_2 = 5,
DR_STE_V0_ACTION_MDFY_FLD_L3_3 = 6,
DR_STE_V0_ACTION_MDFY_FLD_L3_4 = 7,
DR_STE_V0_ACTION_MDFY_FLD_L4_0 = 8,
DR_STE_V0_ACTION_MDFY_FLD_L4_1 = 9,
DR_STE_V0_ACTION_MDFY_FLD_MPLS = 10,
DR_STE_V0_ACTION_MDFY_FLD_L2_TNL_0 = 11,
DR_STE_V0_ACTION_MDFY_FLD_REG_0 = 12,
DR_STE_V0_ACTION_MDFY_FLD_REG_1 = 13,
DR_STE_V0_ACTION_MDFY_FLD_REG_2 = 14,
DR_STE_V0_ACTION_MDFY_FLD_REG_3 = 15,
DR_STE_V0_ACTION_MDFY_FLD_L4_2 = 16,
DR_STE_V0_ACTION_MDFY_FLD_FLEX_0 = 17,
DR_STE_V0_ACTION_MDFY_FLD_FLEX_1 = 18,
DR_STE_V0_ACTION_MDFY_FLD_FLEX_2 = 19,
DR_STE_V0_ACTION_MDFY_FLD_FLEX_3 = 20,
DR_STE_V0_ACTION_MDFY_FLD_L2_TNL_1 = 21,
DR_STE_V0_ACTION_MDFY_FLD_METADATA = 22,
DR_STE_V0_ACTION_MDFY_FLD_RESERVED = 23,
};
static const struct mlx5dr_ste_action_modify_field dr_ste_v0_action_modify_field_arr[] = {
[MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_1, .start = 16, .end = 47,
},
[MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_1, .start = 0, .end = 15,
},
[MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_2, .start = 32, .end = 47,
},
[MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_0, .start = 16, .end = 47,
},
[MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_0, .start = 0, .end = 15,
},
[MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 0, .end = 5,
},
[MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 48, .end = 56,
.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
},
[MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 0, .end = 15,
.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
},
[MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 16, .end = 31,
.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
},
[MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 8, .end = 15,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
},
[MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 8, .end = 15,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
},
[MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 0, .end = 15,
.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
},
[MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 16, .end = 31,
.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
},
[MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_3, .start = 32, .end = 63,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
},
[MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_3, .start = 0, .end = 31,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
},
[MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_4, .start = 32, .end = 63,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
},
[MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_4, .start = 0, .end = 31,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
},
[MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 32, .end = 63,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
},
[MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 0, .end = 31,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
},
[MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_2, .start = 32, .end = 63,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
},
[MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_2, .start = 0, .end = 31,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
},
[MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 0, .end = 31,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
},
[MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 32, .end = 63,
.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_METADATA, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_METADATA, .start = 32, .end = 63,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_0, .start = 32, .end = 63,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_0, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_1, .start = 32, .end = 63,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_1, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_2, .start = 32, .end = 63,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_2, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_1, .start = 32, .end = 63,
},
[MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_1, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_2, .start = 0, .end = 15,
},
};
static void dr_ste_v0_set_entry_type(u8 *hw_ste_p, u8 entry_type)
{
MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
}
static u8 dr_ste_v0_get_entry_type(u8 *hw_ste_p)
{
return MLX5_GET(ste_general, hw_ste_p, entry_type);
}
static void dr_ste_v0_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
{
u64 index = miss_addr >> 6;
/* Miss address for TX and RX STEs located in the same offsets */
MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32, index >> 26);
MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6, index);
}
static u64 dr_ste_v0_get_miss_addr(u8 *hw_ste_p)
{
u64 index =
(MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6) |
MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32) << 26);
return index << 6;
}
static void dr_ste_v0_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
{
MLX5_SET(ste_general, hw_ste_p, byte_mask, byte_mask);
}
static u16 dr_ste_v0_get_byte_mask(u8 *hw_ste_p)
{
return MLX5_GET(ste_general, hw_ste_p, byte_mask);
}
static void dr_ste_v0_set_lu_type(u8 *hw_ste_p, u16 lu_type)
{
MLX5_SET(ste_general, hw_ste_p, entry_sub_type, lu_type);
}
static void dr_ste_v0_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
{
MLX5_SET(ste_general, hw_ste_p, next_lu_type, lu_type);
}
static u16 dr_ste_v0_get_next_lu_type(u8 *hw_ste_p)
{
return MLX5_GET(ste_general, hw_ste_p, next_lu_type);
}
static void dr_ste_v0_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
{
MLX5_SET(ste_general, hw_ste_p, next_table_base_63_48, gvmi);
}
static void dr_ste_v0_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
{
u64 index = (icm_addr >> 5) | ht_size;
MLX5_SET(ste_general, hw_ste_p, next_table_base_39_32_size, index >> 27);
MLX5_SET(ste_general, hw_ste_p, next_table_base_31_5_size, index);
}
static void dr_ste_v0_init(u8 *hw_ste_p, u16 lu_type,
u8 entry_type, u16 gvmi)
{
dr_ste_v0_set_entry_type(hw_ste_p, entry_type);
dr_ste_v0_set_lu_type(hw_ste_p, lu_type);
dr_ste_v0_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
/* Set GVMI once, this is the same for RX/TX
* bits 63_48 of next table base / miss address encode the next GVMI
*/
MLX5_SET(ste_rx_steering_mult, hw_ste_p, gvmi, gvmi);
MLX5_SET(ste_rx_steering_mult, hw_ste_p, next_table_base_63_48, gvmi);
MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi);
}
static void dr_ste_v0_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag)
{
MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer,
DR_STE_ENABLE_FLOW_TAG | flow_tag);
}
static void dr_ste_v0_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
{
/* This can be used for both rx_steering_mult and for sx_transmit */
MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_15_0, ctr_id);
MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_23_16, ctr_id >> 16);
}
static void dr_ste_v0_set_go_back_bit(u8 *hw_ste_p)
{
MLX5_SET(ste_sx_transmit, hw_ste_p, go_back, 1);
}
static void dr_ste_v0_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr,
bool go_back)
{
MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
DR_STE_ACTION_TYPE_PUSH_VLAN);
MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr);
/* Due to HW limitation we need to set this bit, otherwise reforamt +
* push vlan will not work.
*/
if (go_back)
dr_ste_v0_set_go_back_bit(hw_ste_p);
}
static void dr_ste_v0_set_tx_encap(void *hw_ste_p, u32 reformat_id,
int size, bool encap_l3)
{
MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
encap_l3 ? DR_STE_ACTION_TYPE_ENCAP_L3 : DR_STE_ACTION_TYPE_ENCAP);
/* The hardware expects here size in words (2 byte) */
MLX5_SET(ste_sx_transmit, hw_ste_p, action_description, size / 2);
MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, reformat_id);
}
static void dr_ste_v0_set_rx_decap(u8 *hw_ste_p)
{
MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
DR_STE_TUNL_ACTION_DECAP);
}
static void dr_ste_v0_set_rx_pop_vlan(u8 *hw_ste_p)
{
MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
DR_STE_TUNL_ACTION_POP_VLAN);
}
static void dr_ste_v0_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
{
MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
DR_STE_TUNL_ACTION_L3_DECAP);
MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
}
static void dr_ste_v0_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
u32 re_write_index)
{
MLX5_SET(ste_modify_packet, hw_ste_p, number_of_re_write_actions,
num_of_actions);
MLX5_SET(ste_modify_packet, hw_ste_p, header_re_write_actions_pointer,
re_write_index);
}
static void dr_ste_v0_arr_init_next(u8 **last_ste,
u32 *added_stes,
enum mlx5dr_ste_entry_type entry_type,
u16 gvmi)
{
(*added_stes)++;
*last_ste += DR_STE_SIZE;
dr_ste_v0_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE,
entry_type, gvmi);
}
static void
dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
u8 *action_type_set,
u8 *last_ste,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
{
bool encap = action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2] ||
action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3];
/* We want to make sure the modify header comes before L2
* encapsulation. The reason for that is that we support
* modify headers for outer headers only
*/
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
dr_ste_v0_set_rewrite_actions(last_ste,
attr->modify_actions,
attr->modify_index);
}
if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
int i;
for (i = 0; i < attr->vlans.count; i++) {
if (i || action_type_set[DR_ACTION_TYP_MODIFY_HDR])
dr_ste_v0_arr_init_next(&last_ste,
added_stes,
MLX5DR_STE_TYPE_TX,
attr->gvmi);
dr_ste_v0_set_tx_push_vlan(last_ste,
attr->vlans.headers[i],
encap);
}
}
if (encap) {
/* Modify header and encapsulation require a different STEs.
* Since modify header STE format doesn't support encapsulation
* tunneling_action.
*/
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] ||
action_type_set[DR_ACTION_TYP_PUSH_VLAN])
dr_ste_v0_arr_init_next(&last_ste,
added_stes,
MLX5DR_STE_TYPE_TX,
attr->gvmi);
dr_ste_v0_set_tx_encap(last_ste,
attr->reformat_id,
attr->reformat_size,
action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]);
/* Whenever prio_tag_required enabled, we can be sure that the
* previous table (ACL) already push vlan to our packet,
* And due to HW limitation we need to set this bit, otherwise
* push vlan + reformat will not work.
*/
if (MLX5_CAP_GEN(dmn->mdev, prio_tag_required))
dr_ste_v0_set_go_back_bit(last_ste);
}
if (action_type_set[DR_ACTION_TYP_CTR])
dr_ste_v0_set_counter_id(last_ste, attr->ctr_id);
dr_ste_v0_set_hit_gvmi(last_ste, attr->hit_gvmi);
dr_ste_v0_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
static void
dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
u8 *action_type_set,
u8 *last_ste,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
{
if (action_type_set[DR_ACTION_TYP_CTR])
dr_ste_v0_set_counter_id(last_ste, attr->ctr_id);
if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
dr_ste_v0_set_rx_decap_l3(last_ste, attr->decap_with_vlan);
dr_ste_v0_set_rewrite_actions(last_ste,
attr->decap_actions,
attr->decap_index);
}
if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2])
dr_ste_v0_set_rx_decap(last_ste);
if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
int i;
for (i = 0; i < attr->vlans.count; i++) {
if (i ||
action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2] ||
action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2])
dr_ste_v0_arr_init_next(&last_ste,
added_stes,
MLX5DR_STE_TYPE_RX,
attr->gvmi);
dr_ste_v0_set_rx_pop_vlan(last_ste);
}
}
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
if (dr_ste_v0_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
dr_ste_v0_arr_init_next(&last_ste,
added_stes,
MLX5DR_STE_TYPE_MODIFY_PKT,
attr->gvmi);
else
dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
dr_ste_v0_set_rewrite_actions(last_ste,
attr->modify_actions,
attr->modify_index);
}
if (action_type_set[DR_ACTION_TYP_TAG]) {
if (dr_ste_v0_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
dr_ste_v0_arr_init_next(&last_ste,
added_stes,
MLX5DR_STE_TYPE_RX,
attr->gvmi);
dr_ste_v0_rx_set_flow_tag(last_ste, attr->flow_tag);
}
dr_ste_v0_set_hit_gvmi(last_ste, attr->hit_gvmi);
dr_ste_v0_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
static void dr_ste_v0_set_action_set(u8 *hw_action,
u8 hw_field,
u8 shifter,
u8 length,
u32 data)
{
length = (length == 32) ? 0 : length;
MLX5_SET(dr_action_hw_set, hw_action, opcode, DR_STE_ACTION_MDFY_OP_SET);
MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, hw_field);
MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, shifter);
MLX5_SET(dr_action_hw_set, hw_action, destination_length, length);
MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
}
static void dr_ste_v0_set_action_add(u8 *hw_action,
u8 hw_field,
u8 shifter,
u8 length,
u32 data)
{
length = (length == 32) ? 0 : length;
MLX5_SET(dr_action_hw_set, hw_action, opcode, DR_STE_ACTION_MDFY_OP_ADD);
MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, hw_field);
MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, shifter);
MLX5_SET(dr_action_hw_set, hw_action, destination_length, length);
MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
}
static void dr_ste_v0_set_action_copy(u8 *hw_action,
u8 dst_hw_field,
u8 dst_shifter,
u8 dst_len,
u8 src_hw_field,
u8 src_shifter)
{
MLX5_SET(dr_action_hw_copy, hw_action, opcode, DR_STE_ACTION_MDFY_OP_COPY);
MLX5_SET(dr_action_hw_copy, hw_action, destination_field_code, dst_hw_field);
MLX5_SET(dr_action_hw_copy, hw_action, destination_left_shifter, dst_shifter);
MLX5_SET(dr_action_hw_copy, hw_action, destination_length, dst_len);
MLX5_SET(dr_action_hw_copy, hw_action, source_field_code, src_hw_field);
MLX5_SET(dr_action_hw_copy, hw_action, source_left_shifter, src_shifter);
}
#define DR_STE_DECAP_L3_MIN_ACTION_NUM 5
static int
dr_ste_v0_set_action_decap_l3_list(void *data, u32 data_sz,
u8 *hw_action, u32 hw_action_sz,
u16 *used_hw_action_num)
{
struct mlx5_ifc_l2_hdr_bits *l2_hdr = data;
u32 hw_action_num;
int required_actions;
u32 hdr_fld_4b;
u16 hdr_fld_2b;
u16 vlan_type;
bool vlan;
vlan = (data_sz != HDR_LEN_L2);
hw_action_num = hw_action_sz / MLX5_ST_SZ_BYTES(dr_action_hw_set);
required_actions = DR_STE_DECAP_L3_MIN_ACTION_NUM + !!vlan;
if (hw_action_num < required_actions)
return -ENOMEM;
/* dmac_47_16 */
MLX5_SET(dr_action_hw_set, hw_action,
opcode, DR_STE_ACTION_MDFY_OP_SET);
MLX5_SET(dr_action_hw_set, hw_action,
destination_length, 0);
MLX5_SET(dr_action_hw_set, hw_action,
destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_0);
MLX5_SET(dr_action_hw_set, hw_action,
destination_left_shifter, 16);
hdr_fld_4b = MLX5_GET(l2_hdr, l2_hdr, dmac_47_16);
MLX5_SET(dr_action_hw_set, hw_action,
inline_data, hdr_fld_4b);
hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
/* smac_47_16 */
MLX5_SET(dr_action_hw_set, hw_action,
opcode, DR_STE_ACTION_MDFY_OP_SET);
MLX5_SET(dr_action_hw_set, hw_action,
destination_length, 0);
MLX5_SET(dr_action_hw_set, hw_action,
destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_1);
MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, 16);
hdr_fld_4b = (MLX5_GET(l2_hdr, l2_hdr, smac_31_0) >> 16 |
MLX5_GET(l2_hdr, l2_hdr, smac_47_32) << 16);
MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_4b);
hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
/* dmac_15_0 */
MLX5_SET(dr_action_hw_set, hw_action,
opcode, DR_STE_ACTION_MDFY_OP_SET);
MLX5_SET(dr_action_hw_set, hw_action,
destination_length, 16);
MLX5_SET(dr_action_hw_set, hw_action,
destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_0);
MLX5_SET(dr_action_hw_set, hw_action,
destination_left_shifter, 0);
hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, dmac_15_0);
MLX5_SET(dr_action_hw_set, hw_action,
inline_data, hdr_fld_2b);
hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
/* ethertype + (optional) vlan */
MLX5_SET(dr_action_hw_set, hw_action,
opcode, DR_STE_ACTION_MDFY_OP_SET);
MLX5_SET(dr_action_hw_set, hw_action,
destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_2);
MLX5_SET(dr_action_hw_set, hw_action,
destination_left_shifter, 32);
if (!vlan) {
hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_2b);
MLX5_SET(dr_action_hw_set, hw_action, destination_length, 16);
} else {
hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
vlan_type = hdr_fld_2b == SVLAN_ETHERTYPE ? DR_STE_SVLAN : DR_STE_CVLAN;
hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan);
hdr_fld_4b = (vlan_type << 16) | hdr_fld_2b;
MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_4b);
MLX5_SET(dr_action_hw_set, hw_action, destination_length, 18);
}
hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
/* smac_15_0 */
MLX5_SET(dr_action_hw_set, hw_action,
opcode, DR_STE_ACTION_MDFY_OP_SET);
MLX5_SET(dr_action_hw_set, hw_action,
destination_length, 16);
MLX5_SET(dr_action_hw_set, hw_action,
destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_1);
MLX5_SET(dr_action_hw_set, hw_action,
destination_left_shifter, 0);
hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, smac_31_0);
MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_2b);
hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
if (vlan) {
MLX5_SET(dr_action_hw_set, hw_action,
opcode, DR_STE_ACTION_MDFY_OP_SET);
hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan_type);
MLX5_SET(dr_action_hw_set, hw_action,
inline_data, hdr_fld_2b);
MLX5_SET(dr_action_hw_set, hw_action,
destination_length, 16);
MLX5_SET(dr_action_hw_set, hw_action,
destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_2);
MLX5_SET(dr_action_hw_set, hw_action,
destination_left_shifter, 0);
}
*used_hw_action_num = required_actions;
return 0;
}
static void
dr_ste_v0_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param *value,
bool inner, u8 *bit_mask)
{
struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
if (mask->smac_47_16 || mask->smac_15_0) {
MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_47_32,
mask->smac_47_16 >> 16);
MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_31_0,
mask->smac_47_16 << 16 | mask->smac_15_0);
mask->smac_47_16 = 0;
mask->smac_15_0 = 0;
}
DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_vlan_id, mask, first_vid);
DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_cfi, mask, first_cfi);
DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_priority, mask, first_prio);
DR_STE_SET_ONES(eth_l2_src_dst, bit_mask, l3_type, mask, ip_version);
if (mask->cvlan_tag) {
MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
mask->cvlan_tag = 0;
} else if (mask->svlan_tag) {
MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
mask->svlan_tag = 0;
}
}
static int
dr_ste_v0_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16);
DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0);
if (spec->smac_47_16 || spec->smac_15_0) {
MLX5_SET(ste_eth_l2_src_dst, tag, smac_47_32,
spec->smac_47_16 >> 16);
MLX5_SET(ste_eth_l2_src_dst, tag, smac_31_0,
spec->smac_47_16 << 16 | spec->smac_15_0);
spec->smac_47_16 = 0;
spec->smac_15_0 = 0;
}
if (spec->ip_version) {
if (spec->ip_version == IP_VERSION_IPV4) {
MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV4);
spec->ip_version = 0;
} else if (spec->ip_version == IP_VERSION_IPV6) {
MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV6);
spec->ip_version = 0;
} else {
return -EINVAL;
}
}
DR_STE_SET_TAG(eth_l2_src_dst, tag, first_vlan_id, spec, first_vid);
DR_STE_SET_TAG(eth_l2_src_dst, tag, first_cfi, spec, first_cfi);
DR_STE_SET_TAG(eth_l2_src_dst, tag, first_priority, spec, first_prio);
if (spec->cvlan_tag) {
MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_CVLAN);
spec->cvlan_tag = 0;
} else if (spec->svlan_tag) {
MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_SVLAN);
spec->svlan_tag = 0;
}
return 0;
}
static void
dr_ste_v0_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v0_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask);
sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, sb->rx, sb->inner);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_src_dst_tag;
}
static int
dr_ste_v0_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
return 0;
}
static void
dr_ste_v0_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v0_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_DST, sb->rx, sb->inner);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv6_dst_tag;
}
static int
dr_ste_v0_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
return 0;
}
static void
dr_ste_v0_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v0_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_SRC, sb->rx, sb->inner);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv6_src_tag;
}
static int
dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, tcp_dport);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, udp_dport);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, tcp_sport);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, udp_sport);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, protocol, spec, ip_protocol);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, fragmented, spec, frag);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, dscp, spec, ip_dscp);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, ecn, spec, ip_ecn);
if (spec->tcp_flags) {
DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, tag, spec);
spec->tcp_flags = 0;
}
return 0;
}
static void
dr_ste_v0_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_5_TUPLE, sb->rx, sb->inner);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag;
}
static void
dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
bool inner, u8 *bit_mask)
{
struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
struct mlx5dr_match_misc *misc_mask = &value->misc;
DR_STE_SET_TAG(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid);
DR_STE_SET_TAG(eth_l2_src, bit_mask, first_cfi, mask, first_cfi);
DR_STE_SET_TAG(eth_l2_src, bit_mask, first_priority, mask, first_prio);
DR_STE_SET_TAG(eth_l2_src, bit_mask, ip_fragmented, mask, frag);
DR_STE_SET_TAG(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype);
DR_STE_SET_ONES(eth_l2_src, bit_mask, l3_type, mask, ip_version);
if (mask->svlan_tag || mask->cvlan_tag) {
MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1);
mask->cvlan_tag = 0;
mask->svlan_tag = 0;
}
if (inner) {
if (misc_mask->inner_second_cvlan_tag ||
misc_mask->inner_second_svlan_tag) {
MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
misc_mask->inner_second_cvlan_tag = 0;
misc_mask->inner_second_svlan_tag = 0;
}
DR_STE_SET_TAG(eth_l2_src, bit_mask,
second_vlan_id, misc_mask, inner_second_vid);
DR_STE_SET_TAG(eth_l2_src, bit_mask,
second_cfi, misc_mask, inner_second_cfi);
DR_STE_SET_TAG(eth_l2_src, bit_mask,
second_priority, misc_mask, inner_second_prio);
} else {
if (misc_mask->outer_second_cvlan_tag ||
misc_mask->outer_second_svlan_tag) {
MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
misc_mask->outer_second_cvlan_tag = 0;
misc_mask->outer_second_svlan_tag = 0;
}
DR_STE_SET_TAG(eth_l2_src, bit_mask,
second_vlan_id, misc_mask, outer_second_vid);
DR_STE_SET_TAG(eth_l2_src, bit_mask,
second_cfi, misc_mask, outer_second_cfi);
DR_STE_SET_TAG(eth_l2_src, bit_mask,
second_priority, misc_mask, outer_second_prio);
}
}
static int
dr_ste_v0_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
bool inner, u8 *tag)
{
struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
struct mlx5dr_match_misc *misc_spec = &value->misc;
DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid);
DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi);
DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio);
DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag);
DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype);
if (spec->ip_version) {
if (spec->ip_version == IP_VERSION_IPV4) {
MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4);
spec->ip_version = 0;
} else if (spec->ip_version == IP_VERSION_IPV6) {
MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6);
spec->ip_version = 0;
} else {
return -EINVAL;
}
}
if (spec->cvlan_tag) {
MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN);
spec->cvlan_tag = 0;
} else if (spec->svlan_tag) {
MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN);
spec->svlan_tag = 0;
}
if (inner) {
if (misc_spec->inner_second_cvlan_tag) {
MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
misc_spec->inner_second_cvlan_tag = 0;
} else if (misc_spec->inner_second_svlan_tag) {
MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
misc_spec->inner_second_svlan_tag = 0;
}
DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_vid);
DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi);
DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second_prio);
} else {
if (misc_spec->outer_second_cvlan_tag) {
MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
misc_spec->outer_second_cvlan_tag = 0;
} else if (misc_spec->outer_second_svlan_tag) {
MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
misc_spec->outer_second_svlan_tag = 0;
}
DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_vid);
DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi);
DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second_prio);
}
return 0;
}
static void
dr_ste_v0_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
bool inner, u8 *bit_mask)
{
struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l2_src, bit_mask, smac_47_16, mask, smac_47_16);
DR_STE_SET_TAG(eth_l2_src, bit_mask, smac_15_0, mask, smac_15_0);
dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
}
static int
dr_ste_v0_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16);
DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0);
return dr_ste_v0_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
}
static void
dr_ste_v0_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v0_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask);
sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC, sb->rx, sb->inner);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_src_tag;
}
static void
dr_ste_v0_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *bit_mask)
{
struct mlx5dr_match_spec *mask = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l2_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
DR_STE_SET_TAG(eth_l2_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(value, sb->inner, bit_mask);
}
static int
dr_ste_v0_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16);
DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0);
return dr_ste_v0_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
}
static void
dr_ste_v0_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v0_build_eth_l2_dst_bit_mask(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_DST, sb->rx, sb->inner);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_dst_tag;
}
static void
dr_ste_v0_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
bool inner, u8 *bit_mask)
{
struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
struct mlx5dr_match_misc *misc = &value->misc;
DR_STE_SET_TAG(eth_l2_tnl, bit_mask, dmac_47_16, mask, dmac_47_16);
DR_STE_SET_TAG(eth_l2_tnl, bit_mask, dmac_15_0, mask, dmac_15_0);
DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_vlan_id, mask, first_vid);
DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_cfi, mask, first_cfi);
DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_priority, mask, first_prio);
DR_STE_SET_TAG(eth_l2_tnl, bit_mask, ip_fragmented, mask, frag);
DR_STE_SET_TAG(eth_l2_tnl, bit_mask, l3_ethertype, mask, ethertype);
DR_STE_SET_ONES(eth_l2_tnl, bit_mask, l3_type, mask, ip_version);
if (misc->vxlan_vni) {
MLX5_SET(ste_eth_l2_tnl, bit_mask,
l2_tunneling_network_id, (misc->vxlan_vni << 8));
misc->vxlan_vni = 0;
}
if (mask->svlan_tag || mask->cvlan_tag) {
MLX5_SET(ste_eth_l2_tnl, bit_mask, first_vlan_qualifier, -1);
mask->cvlan_tag = 0;
mask->svlan_tag = 0;
}
}
static int
dr_ste_v0_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
struct mlx5dr_match_misc *misc = &value->misc;
DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16);
DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0);
DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid);
DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi);
DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag);
DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio);
DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype);
if (misc->vxlan_vni) {
MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id,
(misc->vxlan_vni << 8));
misc->vxlan_vni = 0;
}
if (spec->cvlan_tag) {
MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN);
spec->cvlan_tag = 0;
} else if (spec->svlan_tag) {
MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN);
spec->svlan_tag = 0;
}
if (spec->ip_version) {
if (spec->ip_version == IP_VERSION_IPV4) {
MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4);
spec->ip_version = 0;
} else if (spec->ip_version == IP_VERSION_IPV6) {
MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6);
spec->ip_version = 0;
} else {
return -EINVAL;
}
}
return 0;
}
static void
dr_ste_v0_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v0_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask);
sb->lu_type = DR_STE_V0_LU_TYPE_ETHL2_TUNNELING_I;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_tnl_tag;
}
static int
dr_ste_v0_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
return 0;
}
static void
dr_ste_v0_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v0_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_MISC, sb->rx, sb->inner);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv4_misc_tag;
}
static int
dr_ste_v0_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, udp_dport);
DR_STE_SET_TAG(eth_l4, tag, src_port, spec, udp_sport);
DR_STE_SET_TAG(eth_l4, tag, protocol, spec, ip_protocol);
DR_STE_SET_TAG(eth_l4, tag, fragmented, spec, frag);
DR_STE_SET_TAG(eth_l4, tag, dscp, spec, ip_dscp);
DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn);
DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit);
if (spec->tcp_flags) {
DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec);
spec->tcp_flags = 0;
}
return 0;
}
static void
dr_ste_v0_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v0_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4, sb->rx, sb->inner);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_eth_ipv6_l3_l4_tag;
}
static int
dr_ste_v0_build_mpls_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc2 *misc2 = &value->misc2;
if (sb->inner)
DR_STE_SET_MPLS(mpls, misc2, inner, tag);
else
DR_STE_SET_MPLS(mpls, misc2, outer, tag);
return 0;
}
static void
dr_ste_v0_build_mpls_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v0_build_mpls_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_CALC_LU_TYPE(MPLS_FIRST, sb->rx, sb->inner);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_mpls_tag;
}
static int
dr_ste_v0_build_tnl_gre_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc *misc = &value->misc;
DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol);
DR_STE_SET_TAG(gre, tag, gre_k_present, misc, gre_k_present);
DR_STE_SET_TAG(gre, tag, gre_key_h, misc, gre_key_h);
DR_STE_SET_TAG(gre, tag, gre_key_l, misc, gre_key_l);
DR_STE_SET_TAG(gre, tag, gre_c_present, misc, gre_c_present);
DR_STE_SET_TAG(gre, tag, gre_s_present, misc, gre_s_present);
return 0;
}
static void
dr_ste_v0_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v0_build_tnl_gre_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_V0_LU_TYPE_GRE;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gre_tag;
}
static int
dr_ste_v0_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc2 *misc_2 = &value->misc2;
if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2)) {
DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
misc_2, outer_first_mpls_over_gre_label);
DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
misc_2, outer_first_mpls_over_gre_exp);
DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
misc_2, outer_first_mpls_over_gre_s_bos);
DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
misc_2, outer_first_mpls_over_gre_ttl);
} else {
DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
misc_2, outer_first_mpls_over_udp_label);
DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
misc_2, outer_first_mpls_over_udp_exp);
DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
misc_2, outer_first_mpls_over_udp_s_bos);
DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
misc_2, outer_first_mpls_over_udp_ttl);
}
return 0;
}
static void
dr_ste_v0_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v0_build_tnl_mpls_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_tag;
}
#define ICMP_TYPE_OFFSET_FIRST_DW 24
#define ICMP_CODE_OFFSET_FIRST_DW 16
static int
dr_ste_v0_build_icmp_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc3 *misc_3 = &value->misc3;
u32 *icmp_header_data;
int dw0_location;
int dw1_location;
u8 *icmp_type;
u8 *icmp_code;
bool is_ipv4;
is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc_3);
if (is_ipv4) {
icmp_header_data = &misc_3->icmpv4_header_data;
icmp_type = &misc_3->icmpv4_type;
icmp_code = &misc_3->icmpv4_code;
dw0_location = sb->caps->flex_parser_id_icmp_dw0;
dw1_location = sb->caps->flex_parser_id_icmp_dw1;
} else {
icmp_header_data = &misc_3->icmpv6_header_data;
icmp_type = &misc_3->icmpv6_type;
icmp_code = &misc_3->icmpv6_code;
dw0_location = sb->caps->flex_parser_id_icmpv6_dw0;
dw1_location = sb->caps->flex_parser_id_icmpv6_dw1;
}
switch (dw0_location) {
case 4:
MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
(*icmp_type << ICMP_TYPE_OFFSET_FIRST_DW) |
(*icmp_code << ICMP_TYPE_OFFSET_FIRST_DW));
*icmp_type = 0;
*icmp_code = 0;
break;
default:
return -EINVAL;
}
switch (dw1_location) {
case 5:
MLX5_SET(ste_flex_parser_1, tag, flex_parser_5,
*icmp_header_data);
*icmp_header_data = 0;
break;
default:
return -EINVAL;
}
return 0;
}
static int
dr_ste_v0_build_icmp_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
int ret;
ret = dr_ste_v0_build_icmp_tag(mask, sb, sb->bit_mask);
if (ret)
return ret;
sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_icmp_tag;
return 0;
}
static int
dr_ste_v0_build_general_purpose_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc2 *misc_2 = &value->misc2;
DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
misc_2, metadata_reg_a);
return 0;
}
static void
dr_ste_v0_build_general_purpose_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v0_build_general_purpose_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_V0_LU_TYPE_GENERAL_PURPOSE;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_general_purpose_tag;
}
static int
dr_ste_v0_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc3 *misc3 = &value->misc3;
if (sb->inner) {
DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num);
DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, inner_tcp_ack_num);
} else {
DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, outer_tcp_seq_num);
DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, outer_tcp_ack_num);
}
return 0;
}
static void
dr_ste_v0_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v0_build_eth_l4_misc_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4_MISC, sb->rx, sb->inner);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_eth_l4_misc_tag;
}
static int
dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc3 *misc3 = &value->misc3;
DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
outer_vxlan_gpe_flags, misc3,
outer_vxlan_gpe_flags);
DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
outer_vxlan_gpe_next_protocol, misc3,
outer_vxlan_gpe_next_protocol);
DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
outer_vxlan_gpe_vni, misc3,
outer_vxlan_gpe_vni);
return 0;
}
static void
dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag;
}
static int
dr_ste_v0_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc *misc = &value->misc;
DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
geneve_protocol_type, misc, geneve_protocol_type);
DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
geneve_oam, misc, geneve_oam);
DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
geneve_opt_len, misc, geneve_opt_len);
DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
geneve_vni, misc, geneve_vni);
return 0;
}
static void
dr_ste_v0_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v0_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_geneve_tag;
}
static int
dr_ste_v0_build_register_0_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc2 *misc2 = &value->misc2;
DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
return 0;
}
static void
dr_ste_v0_build_register_0_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v0_build_register_0_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_register_0_tag;
}
static int
dr_ste_v0_build_register_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc2 *misc2 = &value->misc2;
DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
return 0;
}
static void
dr_ste_v0_build_register_1_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v0_build_register_1_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_register_1_tag;
}
static void
dr_ste_v0_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
u8 *bit_mask)
{
struct mlx5dr_match_misc *misc_mask = &value->misc;
DR_STE_SET_ONES(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
DR_STE_SET_ONES(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
misc_mask->source_eswitch_owner_vhca_id = 0;
}
static int
dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc *misc = &value->misc;
struct mlx5dr_cmd_vport_cap *vport_cap;
struct mlx5dr_domain *dmn = sb->dmn;
struct mlx5dr_cmd_caps *caps;
u8 *bit_mask = sb->bit_mask;
bool source_gvmi_set;
DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
if (sb->vhca_id_valid) {
/* Find port GVMI based on the eswitch_owner_vhca_id */
if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
caps = &dmn->info.caps;
else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
dmn->peer_dmn->info.caps.gvmi))
caps = &dmn->peer_dmn->info.caps;
else
return -EINVAL;
misc->source_eswitch_owner_vhca_id = 0;
} else {
caps = &dmn->info.caps;
}
source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
if (source_gvmi_set) {
vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
if (!vport_cap) {
mlx5dr_err(dmn, "Vport 0x%x is invalid\n",
misc->source_port);
return -EINVAL;
}
if (vport_cap->vport_gvmi)
MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
misc->source_port = 0;
}
return 0;
}
static void
dr_ste_v0_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v0_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
sb->lu_type = DR_STE_V0_LU_TYPE_SRC_GVMI_AND_QP;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_src_gvmi_qpn_tag;
}
struct mlx5dr_ste_ctx ste_ctx_v0 = {
/* Builders */
.build_eth_l2_src_dst_init = &dr_ste_v0_build_eth_l2_src_dst_init,
.build_eth_l3_ipv6_src_init = &dr_ste_v0_build_eth_l3_ipv6_src_init,
.build_eth_l3_ipv6_dst_init = &dr_ste_v0_build_eth_l3_ipv6_dst_init,
.build_eth_l3_ipv4_5_tuple_init = &dr_ste_v0_build_eth_l3_ipv4_5_tuple_init,
.build_eth_l2_src_init = &dr_ste_v0_build_eth_l2_src_init,
.build_eth_l2_dst_init = &dr_ste_v0_build_eth_l2_dst_init,
.build_eth_l2_tnl_init = &dr_ste_v0_build_eth_l2_tnl_init,
.build_eth_l3_ipv4_misc_init = &dr_ste_v0_build_eth_l3_ipv4_misc_init,
.build_eth_ipv6_l3_l4_init = &dr_ste_v0_build_eth_ipv6_l3_l4_init,
.build_mpls_init = &dr_ste_v0_build_mpls_init,
.build_tnl_gre_init = &dr_ste_v0_build_tnl_gre_init,
.build_tnl_mpls_init = &dr_ste_v0_build_tnl_mpls_init,
.build_icmp_init = &dr_ste_v0_build_icmp_init,
.build_general_purpose_init = &dr_ste_v0_build_general_purpose_init,
.build_eth_l4_misc_init = &dr_ste_v0_build_eth_l4_misc_init,
.build_tnl_vxlan_gpe_init = &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init,
.build_tnl_geneve_init = &dr_ste_v0_build_flex_parser_tnl_geneve_init,
.build_register_0_init = &dr_ste_v0_build_register_0_init,
.build_register_1_init = &dr_ste_v0_build_register_1_init,
.build_src_gvmi_qpn_init = &dr_ste_v0_build_src_gvmi_qpn_init,
/* Getters and Setters */
.ste_init = &dr_ste_v0_init,
.set_next_lu_type = &dr_ste_v0_set_next_lu_type,
.get_next_lu_type = &dr_ste_v0_get_next_lu_type,
.set_miss_addr = &dr_ste_v0_set_miss_addr,
.get_miss_addr = &dr_ste_v0_get_miss_addr,
.set_hit_addr = &dr_ste_v0_set_hit_addr,
.set_byte_mask = &dr_ste_v0_set_byte_mask,
.get_byte_mask = &dr_ste_v0_get_byte_mask,
/* Actions */
.set_actions_rx = &dr_ste_v0_set_actions_rx,
.set_actions_tx = &dr_ste_v0_set_actions_tx,
.modify_field_arr_sz = ARRAY_SIZE(dr_ste_v0_action_modify_field_arr),
.modify_field_arr = dr_ste_v0_action_modify_field_arr,
.set_action_set = &dr_ste_v0_set_action_set,
.set_action_add = &dr_ste_v0_set_action_add,
.set_action_copy = &dr_ste_v0_set_action_copy,
.set_action_decap_l3_list = &dr_ste_v0_set_action_decap_l3_list,
};
......@@ -120,6 +120,7 @@ struct mlx5dr_ste_htbl;
struct mlx5dr_match_param;
struct mlx5dr_cmd_caps;
struct mlx5dr_matcher_rx_tx;
struct mlx5dr_ste_ctx;
struct mlx5dr_ste {
u8 *hw_ste;
......@@ -154,7 +155,7 @@ struct mlx5dr_ste_htbl_ctrl {
};
struct mlx5dr_ste_htbl {
u8 lu_type;
u16 lu_type;
u16 byte_mask;
u32 refcount;
struct mlx5dr_icm_chunk *chunk;
......@@ -190,7 +191,7 @@ struct mlx5dr_ste_build {
u8 vhca_id_valid:1;
struct mlx5dr_domain *dmn;
struct mlx5dr_cmd_caps *caps;
u8 lu_type;
u16 lu_type;
u16 byte_mask;
u8 bit_mask[DR_STE_SIZE_MASK];
int (*ste_build_tag_func)(struct mlx5dr_match_param *spec,
......@@ -201,7 +202,7 @@ struct mlx5dr_ste_build {
struct mlx5dr_ste_htbl *
mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
enum mlx5dr_icm_chunk_size chunk_size,
u8 lu_type, u16 byte_mask);
u16 lu_type, u16 byte_mask);
int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl);
......@@ -219,35 +220,84 @@ static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl)
/* STE utils */
u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl);
void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type, u16 gvmi);
void mlx5dr_ste_always_hit_htbl(struct mlx5dr_ste *ste,
struct mlx5dr_ste_htbl *next_htbl);
void mlx5dr_ste_set_miss_addr(u8 *hw_ste, u64 miss_addr);
u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste);
void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi);
void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size);
void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr);
void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
u8 *hw_ste, u64 miss_addr);
void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
u8 *hw_ste, u64 icm_addr, u32 ht_size);
void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
u8 *hw_ste,
struct mlx5dr_ste_htbl *next_htbl);
void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask);
bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
u8 ste_location);
void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag);
void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id);
void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id,
int size, bool encap_l3);
void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p);
void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan);
void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p);
void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_tpid_pcp_dei_vid,
bool go_back);
void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type);
u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p);
void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
u32 re_write_index);
void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p);
u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste);
u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste);
struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste);
#define MLX5DR_MAX_VLANS 2
struct mlx5dr_ste_actions_attr {
u32 modify_index;
u16 modify_actions;
u32 decap_index;
u16 decap_actions;
u8 decap_with_vlan:1;
u64 final_icm_addr;
u32 flow_tag;
u32 ctr_id;
u16 gvmi;
u16 hit_gvmi;
u32 reformat_id;
u32 reformat_size;
struct {
int count;
u32 headers[MLX5DR_MAX_VLANS];
} vlans;
};
void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_domain *dmn,
u8 *action_type_set,
u8 *last_ste,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes);
void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_domain *dmn,
u8 *action_type_set,
u8 *last_ste,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes);
void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx,
__be64 *hw_action,
u8 hw_field,
u8 shifter,
u8 length,
u32 data);
void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx,
__be64 *hw_action,
u8 hw_field,
u8 shifter,
u8 length,
u32 data);
void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx,
__be64 *hw_action,
u8 dst_hw_field,
u8 dst_shifter,
u8 dst_len,
u8 src_hw_field,
u8 src_shifter);
int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
void *data,
u32 data_sz,
u8 *hw_action,
u32 hw_action_sz,
u16 *used_hw_action_num);
const struct mlx5dr_ste_action_modify_field *
mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field);
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version);
void mlx5dr_ste_free(struct mlx5dr_ste *ste,
struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher);
......@@ -271,8 +321,6 @@ static inline bool mlx5dr_ste_is_not_used(struct mlx5dr_ste *ste)
return !ste->refcount;
}
void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
struct mlx5dr_ste_htbl *next_htbl);
bool mlx5dr_ste_equal_tag(void *src, void *dst);
int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
......@@ -289,65 +337,85 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
struct mlx5dr_match_param *value,
u8 *ste_arr);
void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_build *builder,
void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *builder,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
int mlx5dr_ste_build_icmp(struct mlx5dr_ste_build *sb,
int mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx);
void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn,
bool inner, bool rx);
......@@ -574,10 +642,10 @@ struct mlx5dr_match_misc3 {
u32 outer_vxlan_gpe_next_protocol:8;
u32 icmpv4_header_data;
u32 icmpv6_header_data;
u32 icmpv6_code:8;
u32 icmpv6_type:8;
u32 icmpv4_code:8;
u32 icmpv4_type:8;
u8 icmpv6_code;
u8 icmpv6_type;
u8 icmpv4_code;
u8 icmpv4_type;
u8 reserved_auto3[0x1c];
};
......@@ -671,6 +739,7 @@ struct mlx5dr_domain {
struct mlx5dr_send_ring *send_ring;
struct mlx5dr_domain_info info;
struct mlx5dr_domain_cache cache;
struct mlx5dr_ste_ctx *ste_ctx;
};
struct mlx5dr_table_rx_tx {
......@@ -725,6 +794,14 @@ struct mlx5dr_rule_member {
struct list_head use_ste_list;
};
struct mlx5dr_ste_action_modify_field {
u16 hw_field;
u8 start;
u8 end;
u8 l3_type;
u8 l4_type;
};
struct mlx5dr_action {
enum mlx5dr_action_type action_type;
refcount_t refcount;
......@@ -1000,7 +1077,8 @@ int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
struct mlx5dr_ste_htbl *htbl,
struct mlx5dr_htbl_connect_info *connect_info,
bool update_hw_ste);
void mlx5dr_ste_set_formatted_ste(u16 gvmi,
void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
u16 gvmi,
struct mlx5dr_domain_rx_tx *nic_dmn,
struct mlx5dr_ste_htbl *htbl,
u8 *formatted_ste,
......
......@@ -5,91 +5,6 @@
#define MLX5_IFC_DR_H
enum {
MLX5DR_ACTION_MDFY_HW_FLD_L2_0 = 0,
MLX5DR_ACTION_MDFY_HW_FLD_L2_1 = 1,
MLX5DR_ACTION_MDFY_HW_FLD_L2_2 = 2,
MLX5DR_ACTION_MDFY_HW_FLD_L3_0 = 3,
MLX5DR_ACTION_MDFY_HW_FLD_L3_1 = 4,
MLX5DR_ACTION_MDFY_HW_FLD_L3_2 = 5,
MLX5DR_ACTION_MDFY_HW_FLD_L3_3 = 6,
MLX5DR_ACTION_MDFY_HW_FLD_L3_4 = 7,
MLX5DR_ACTION_MDFY_HW_FLD_L4_0 = 8,
MLX5DR_ACTION_MDFY_HW_FLD_L4_1 = 9,
MLX5DR_ACTION_MDFY_HW_FLD_MPLS = 10,
MLX5DR_ACTION_MDFY_HW_FLD_L2_TNL_0 = 11,
MLX5DR_ACTION_MDFY_HW_FLD_REG_0 = 12,
MLX5DR_ACTION_MDFY_HW_FLD_REG_1 = 13,
MLX5DR_ACTION_MDFY_HW_FLD_REG_2 = 14,
MLX5DR_ACTION_MDFY_HW_FLD_REG_3 = 15,
MLX5DR_ACTION_MDFY_HW_FLD_L4_2 = 16,
MLX5DR_ACTION_MDFY_HW_FLD_FLEX_0 = 17,
MLX5DR_ACTION_MDFY_HW_FLD_FLEX_1 = 18,
MLX5DR_ACTION_MDFY_HW_FLD_FLEX_2 = 19,
MLX5DR_ACTION_MDFY_HW_FLD_FLEX_3 = 20,
MLX5DR_ACTION_MDFY_HW_FLD_L2_TNL_1 = 21,
MLX5DR_ACTION_MDFY_HW_FLD_METADATA = 22,
MLX5DR_ACTION_MDFY_HW_FLD_RESERVED = 23,
};
enum {
MLX5DR_ACTION_MDFY_HW_OP_COPY = 0x1,
MLX5DR_ACTION_MDFY_HW_OP_SET = 0x2,
MLX5DR_ACTION_MDFY_HW_OP_ADD = 0x3,
};
enum {
MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE = 0x0,
MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4 = 0x1,
MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6 = 0x2,
};
enum {
MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE = 0x0,
MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP = 0x1,
MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP = 0x2,
};
enum {
MLX5DR_STE_LU_TYPE_NOP = 0x00,
MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP = 0x05,
MLX5DR_STE_LU_TYPE_ETHL2_TUNNELING_I = 0x0a,
MLX5DR_STE_LU_TYPE_ETHL2_DST_O = 0x06,
MLX5DR_STE_LU_TYPE_ETHL2_DST_I = 0x07,
MLX5DR_STE_LU_TYPE_ETHL2_DST_D = 0x1b,
MLX5DR_STE_LU_TYPE_ETHL2_SRC_O = 0x08,
MLX5DR_STE_LU_TYPE_ETHL2_SRC_I = 0x09,
MLX5DR_STE_LU_TYPE_ETHL2_SRC_D = 0x1c,
MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_O = 0x36,
MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_I = 0x37,
MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_D = 0x38,
MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_O = 0x0d,
MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_I = 0x0e,
MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_D = 0x1e,
MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_O = 0x0f,
MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_I = 0x10,
MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_D = 0x1f,
MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x11,
MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x12,
MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_D = 0x20,
MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_O = 0x29,
MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_I = 0x2a,
MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_D = 0x2b,
MLX5DR_STE_LU_TYPE_ETHL4_O = 0x13,
MLX5DR_STE_LU_TYPE_ETHL4_I = 0x14,
MLX5DR_STE_LU_TYPE_ETHL4_D = 0x21,
MLX5DR_STE_LU_TYPE_ETHL4_MISC_O = 0x2c,
MLX5DR_STE_LU_TYPE_ETHL4_MISC_I = 0x2d,
MLX5DR_STE_LU_TYPE_ETHL4_MISC_D = 0x2e,
MLX5DR_STE_LU_TYPE_MPLS_FIRST_O = 0x15,
MLX5DR_STE_LU_TYPE_MPLS_FIRST_I = 0x24,
MLX5DR_STE_LU_TYPE_MPLS_FIRST_D = 0x25,
MLX5DR_STE_LU_TYPE_GRE = 0x16,
MLX5DR_STE_LU_TYPE_FLEX_PARSER_0 = 0x22,
MLX5DR_STE_LU_TYPE_FLEX_PARSER_1 = 0x23,
MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x19,
MLX5DR_STE_LU_TYPE_GENERAL_PURPOSE = 0x18,
MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_0 = 0x2f,
MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_1 = 0x30,
MLX5DR_STE_LU_TYPE_DONT_CARE = 0x0f,
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment