Commit db471ed9 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2021-01-05' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2021-01-05

SW steering, Refactor to have a device specific STE layer below dr_ste

This series introduces some improvements and refactoring by adding a new layer
below dr_ste to allow support for different devices format.

It adds a struct of device specific callbacks for STE layer below dr_ste.
Each device will implement its HW-specific function, and a common logic
from the DR code will access these functions through the new ste_ctx API.

Connect-X5-style steering format is called STE_v0.
In the next patch series we bring the Connect-X6-style format - STE_v1.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents da2c3ee1 4781df92
...@@ -83,5 +83,6 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o ...@@ -83,5 +83,6 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
steering/dr_matcher.o steering/dr_rule.o \ steering/dr_matcher.o steering/dr_rule.o \
steering/dr_icm_pool.o steering/dr_buddy.o \ steering/dr_icm_pool.o steering/dr_buddy.o \
steering/dr_ste.o steering/dr_send.o \ steering/dr_ste.o steering/dr_send.o \
steering/dr_ste_v0.o \
steering/dr_cmd.o steering/dr_fw.o \ steering/dr_cmd.o steering/dr_fw.o \
steering/dr_action.o steering/fs_dr.o steering/dr_action.o steering/fs_dr.o
...@@ -57,6 +57,12 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn) ...@@ -57,6 +57,12 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
{ {
int ret; int ret;
dmn->ste_ctx = mlx5dr_ste_get_ctx(dmn->info.caps.sw_format_ver);
if (!dmn->ste_ctx) {
mlx5dr_err(dmn, "SW Steering on this device is unsupported\n");
return -EOPNOTSUPP;
}
ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn); ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
if (ret) { if (ret) {
mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret); mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret);
......
...@@ -221,6 +221,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, ...@@ -221,6 +221,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
{ {
struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn; struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
struct mlx5dr_domain *dmn = matcher->tbl->dmn; struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_match_param mask = {}; struct mlx5dr_match_param mask = {};
struct mlx5dr_ste_build *sb; struct mlx5dr_ste_build *sb;
bool inner, rx; bool inner, rx;
...@@ -259,80 +260,89 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, ...@@ -259,80 +260,89 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
inner = false; inner = false;
if (dr_mask_is_wqe_metadata_set(&mask.misc2)) if (dr_mask_is_wqe_metadata_set(&mask.misc2))
mlx5dr_ste_build_general_purpose(&sb[idx++], &mask, inner, rx); mlx5dr_ste_build_general_purpose(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (dr_mask_is_reg_c_0_3_set(&mask.misc2)) if (dr_mask_is_reg_c_0_3_set(&mask.misc2))
mlx5dr_ste_build_register_0(&sb[idx++], &mask, inner, rx); mlx5dr_ste_build_register_0(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (dr_mask_is_reg_c_4_7_set(&mask.misc2)) if (dr_mask_is_reg_c_4_7_set(&mask.misc2))
mlx5dr_ste_build_register_1(&sb[idx++], &mask, inner, rx); mlx5dr_ste_build_register_1(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (dr_mask_is_gvmi_or_qpn_set(&mask.misc) && if (dr_mask_is_gvmi_or_qpn_set(&mask.misc) &&
(dmn->type == MLX5DR_DOMAIN_TYPE_FDB || (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX)) { dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX)) {
mlx5dr_ste_build_src_gvmi_qpn(&sb[idx++], &mask, mlx5dr_ste_build_src_gvmi_qpn(ste_ctx, &sb[idx++],
dmn, inner, rx); &mask, dmn, inner, rx);
} }
if (dr_mask_is_smac_set(&mask.outer) && if (dr_mask_is_smac_set(&mask.outer) &&
dr_mask_is_dmac_set(&mask.outer)) { dr_mask_is_dmac_set(&mask.outer)) {
mlx5dr_ste_build_eth_l2_src_dst(&sb[idx++], &mask, mlx5dr_ste_build_eth_l2_src_dst(ste_ctx, &sb[idx++],
inner, rx); &mask, inner, rx);
} }
if (dr_mask_is_smac_set(&mask.outer)) if (dr_mask_is_smac_set(&mask.outer))
mlx5dr_ste_build_eth_l2_src(&sb[idx++], &mask, inner, rx); mlx5dr_ste_build_eth_l2_src(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (DR_MASK_IS_L2_DST(mask.outer, mask.misc, outer)) if (DR_MASK_IS_L2_DST(mask.outer, mask.misc, outer))
mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx); mlx5dr_ste_build_eth_l2_dst(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (outer_ipv == DR_RULE_IPV6) { if (outer_ipv == DR_RULE_IPV6) {
if (dr_mask_is_dst_addr_set(&mask.outer)) if (dr_mask_is_dst_addr_set(&mask.outer))
mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask, mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
inner, rx); &mask, inner, rx);
if (dr_mask_is_src_addr_set(&mask.outer)) if (dr_mask_is_src_addr_set(&mask.outer))
mlx5dr_ste_build_eth_l3_ipv6_src(&sb[idx++], &mask, mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
inner, rx); &mask, inner, rx);
if (DR_MASK_IS_ETH_L4_SET(mask.outer, mask.misc, outer)) if (DR_MASK_IS_ETH_L4_SET(mask.outer, mask.misc, outer))
mlx5dr_ste_build_eth_ipv6_l3_l4(&sb[idx++], &mask, mlx5dr_ste_build_eth_ipv6_l3_l4(ste_ctx, &sb[idx++],
inner, rx); &mask, inner, rx);
} else { } else {
if (dr_mask_is_ipv4_5_tuple_set(&mask.outer)) if (dr_mask_is_ipv4_5_tuple_set(&mask.outer))
mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask, mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
inner, rx); &mask, inner, rx);
if (dr_mask_is_ttl_set(&mask.outer)) if (dr_mask_is_ttl_set(&mask.outer))
mlx5dr_ste_build_eth_l3_ipv4_misc(&sb[idx++], &mask, mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
inner, rx); &mask, inner, rx);
} }
if (dr_mask_is_tnl_vxlan_gpe(&mask, dmn)) if (dr_mask_is_tnl_vxlan_gpe(&mask, dmn))
mlx5dr_ste_build_tnl_vxlan_gpe(&sb[idx++], &mask, mlx5dr_ste_build_tnl_vxlan_gpe(ste_ctx, &sb[idx++],
inner, rx); &mask, inner, rx);
else if (dr_mask_is_tnl_geneve(&mask, dmn)) else if (dr_mask_is_tnl_geneve(&mask, dmn))
mlx5dr_ste_build_tnl_geneve(&sb[idx++], &mask, mlx5dr_ste_build_tnl_geneve(ste_ctx, &sb[idx++],
inner, rx); &mask, inner, rx);
if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer)) if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer))
mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx); mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, outer)) if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, outer))
mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx); mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2)) if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2))
mlx5dr_ste_build_tnl_mpls(&sb[idx++], &mask, inner, rx); mlx5dr_ste_build_tnl_mpls(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (dr_mask_is_icmp(&mask, dmn)) { if (dr_mask_is_icmp(&mask, dmn)) {
ret = mlx5dr_ste_build_icmp(&sb[idx++], ret = mlx5dr_ste_build_icmp(ste_ctx, &sb[idx++],
&mask, &dmn->info.caps, &mask, &dmn->info.caps,
inner, rx); inner, rx);
if (ret) if (ret)
return ret; return ret;
} }
if (dr_mask_is_tnl_gre_set(&mask.misc)) if (dr_mask_is_tnl_gre_set(&mask.misc))
mlx5dr_ste_build_tnl_gre(&sb[idx++], &mask, inner, rx); mlx5dr_ste_build_tnl_gre(ste_ctx, &sb[idx++],
&mask, inner, rx);
} }
/* Inner */ /* Inner */
...@@ -343,50 +353,56 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, ...@@ -343,50 +353,56 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
inner = true; inner = true;
if (dr_mask_is_eth_l2_tnl_set(&mask.misc)) if (dr_mask_is_eth_l2_tnl_set(&mask.misc))
mlx5dr_ste_build_eth_l2_tnl(&sb[idx++], &mask, inner, rx); mlx5dr_ste_build_eth_l2_tnl(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (dr_mask_is_smac_set(&mask.inner) && if (dr_mask_is_smac_set(&mask.inner) &&
dr_mask_is_dmac_set(&mask.inner)) { dr_mask_is_dmac_set(&mask.inner)) {
mlx5dr_ste_build_eth_l2_src_dst(&sb[idx++], mlx5dr_ste_build_eth_l2_src_dst(ste_ctx, &sb[idx++],
&mask, inner, rx); &mask, inner, rx);
} }
if (dr_mask_is_smac_set(&mask.inner)) if (dr_mask_is_smac_set(&mask.inner))
mlx5dr_ste_build_eth_l2_src(&sb[idx++], &mask, inner, rx); mlx5dr_ste_build_eth_l2_src(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (DR_MASK_IS_L2_DST(mask.inner, mask.misc, inner)) if (DR_MASK_IS_L2_DST(mask.inner, mask.misc, inner))
mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx); mlx5dr_ste_build_eth_l2_dst(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (inner_ipv == DR_RULE_IPV6) { if (inner_ipv == DR_RULE_IPV6) {
if (dr_mask_is_dst_addr_set(&mask.inner)) if (dr_mask_is_dst_addr_set(&mask.inner))
mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask, mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
inner, rx); &mask, inner, rx);
if (dr_mask_is_src_addr_set(&mask.inner)) if (dr_mask_is_src_addr_set(&mask.inner))
mlx5dr_ste_build_eth_l3_ipv6_src(&sb[idx++], &mask, mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
inner, rx); &mask, inner, rx);
if (DR_MASK_IS_ETH_L4_SET(mask.inner, mask.misc, inner)) if (DR_MASK_IS_ETH_L4_SET(mask.inner, mask.misc, inner))
mlx5dr_ste_build_eth_ipv6_l3_l4(&sb[idx++], &mask, mlx5dr_ste_build_eth_ipv6_l3_l4(ste_ctx, &sb[idx++],
inner, rx); &mask, inner, rx);
} else { } else {
if (dr_mask_is_ipv4_5_tuple_set(&mask.inner)) if (dr_mask_is_ipv4_5_tuple_set(&mask.inner))
mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask, mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
inner, rx); &mask, inner, rx);
if (dr_mask_is_ttl_set(&mask.inner)) if (dr_mask_is_ttl_set(&mask.inner))
mlx5dr_ste_build_eth_l3_ipv4_misc(&sb[idx++], &mask, mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
inner, rx); &mask, inner, rx);
} }
if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, inner)) if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, inner))
mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx); mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, inner)) if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, inner))
mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx); mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
&mask, inner, rx);
if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2)) if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2))
mlx5dr_ste_build_tnl_mpls(&sb[idx++], &mask, inner, rx); mlx5dr_ste_build_tnl_mpls(ste_ctx, &sb[idx++],
&mask, inner, rx);
} }
/* Empty matcher, takes all */ /* Empty matcher, takes all */
if (matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY) if (matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY)
......
...@@ -10,7 +10,8 @@ struct mlx5dr_rule_action_member { ...@@ -10,7 +10,8 @@ struct mlx5dr_rule_action_member {
struct list_head list; struct list_head list;
}; };
static int dr_rule_append_to_miss_list(struct mlx5dr_ste *new_last_ste, static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste *new_last_ste,
struct list_head *miss_list, struct list_head *miss_list,
struct list_head *send_list) struct list_head *send_list)
{ {
...@@ -25,7 +26,7 @@ static int dr_rule_append_to_miss_list(struct mlx5dr_ste *new_last_ste, ...@@ -25,7 +26,7 @@ static int dr_rule_append_to_miss_list(struct mlx5dr_ste *new_last_ste,
if (!ste_info_last) if (!ste_info_last)
return -ENOMEM; return -ENOMEM;
mlx5dr_ste_set_miss_addr(last_ste->hw_ste, mlx5dr_ste_set_miss_addr(ste_ctx, last_ste->hw_ste,
mlx5dr_ste_get_icm_addr(new_last_ste)); mlx5dr_ste_get_icm_addr(new_last_ste));
list_add_tail(&new_last_ste->miss_list_node, miss_list); list_add_tail(&new_last_ste->miss_list_node, miss_list);
...@@ -42,6 +43,7 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher, ...@@ -42,6 +43,7 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
u8 *hw_ste) u8 *hw_ste)
{ {
struct mlx5dr_domain *dmn = matcher->tbl->dmn; struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_htbl *new_htbl; struct mlx5dr_ste_htbl *new_htbl;
struct mlx5dr_ste *ste; struct mlx5dr_ste *ste;
...@@ -57,7 +59,8 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher, ...@@ -57,7 +59,8 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
/* One and only entry, never grows */ /* One and only entry, never grows */
ste = new_htbl->ste_arr; ste = new_htbl->ste_arr;
mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr); mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste,
nic_matcher->e_anchor->chunk->icm_addr);
mlx5dr_htbl_get(new_htbl); mlx5dr_htbl_get(new_htbl);
return ste; return ste;
...@@ -169,6 +172,7 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher, ...@@ -169,6 +172,7 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
struct mlx5dr_ste *col_ste, struct mlx5dr_ste *col_ste,
u8 *hw_ste) u8 *hw_ste)
{ {
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste *new_ste; struct mlx5dr_ste *new_ste;
int ret; int ret;
...@@ -180,11 +184,11 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher, ...@@ -180,11 +184,11 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste); new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste);
/* Update the previous from the list */ /* Update the previous from the list */
ret = dr_rule_append_to_miss_list(new_ste, ret = dr_rule_append_to_miss_list(dmn->ste_ctx, new_ste,
mlx5dr_ste_get_miss_list(col_ste), mlx5dr_ste_get_miss_list(col_ste),
update_list); update_list);
if (ret) { if (ret) {
mlx5dr_dbg(matcher->tbl->dmn, "Failed update dup entry\n"); mlx5dr_dbg(dmn, "Failed update dup entry\n");
goto err_exit; goto err_exit;
} }
...@@ -224,6 +228,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher, ...@@ -224,6 +228,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
struct mlx5dr_ste_htbl *new_htbl, struct mlx5dr_ste_htbl *new_htbl,
struct list_head *update_list) struct list_head *update_list)
{ {
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_send_info *ste_info; struct mlx5dr_ste_send_info *ste_info;
bool use_update_list = false; bool use_update_list = false;
u8 hw_ste[DR_STE_SIZE] = {}; u8 hw_ste[DR_STE_SIZE] = {};
...@@ -237,7 +242,8 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher, ...@@ -237,7 +242,8 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
/* Copy STE control and tag */ /* Copy STE control and tag */
memcpy(hw_ste, cur_ste->hw_ste, DR_STE_SIZE_REDUCED); memcpy(hw_ste, cur_ste->hw_ste, DR_STE_SIZE_REDUCED);
mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr); mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
nic_matcher->e_anchor->chunk->icm_addr);
new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl); new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
new_ste = &new_htbl->ste_arr[new_idx]; new_ste = &new_htbl->ste_arr[new_idx];
...@@ -253,7 +259,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher, ...@@ -253,7 +259,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
new_ste, new_ste,
hw_ste); hw_ste);
if (!new_ste) { if (!new_ste) {
mlx5dr_dbg(matcher->tbl->dmn, "Failed adding collision entry, index: %d\n", mlx5dr_dbg(dmn, "Failed adding collision entry, index: %d\n",
new_idx); new_idx);
return NULL; return NULL;
} }
...@@ -391,7 +397,8 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule, ...@@ -391,7 +397,8 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
/* Write new table to HW */ /* Write new table to HW */
info.type = CONNECT_MISS; info.type = CONNECT_MISS;
info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr; info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi, mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
dmn->info.caps.gvmi,
nic_dmn, nic_dmn,
new_htbl, new_htbl,
formatted_ste, formatted_ste,
...@@ -436,13 +443,15 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule, ...@@ -436,13 +443,15 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
/* It is safe to operate dr_ste_set_hit_addr on the hw_ste here /* It is safe to operate dr_ste_set_hit_addr on the hw_ste here
* (48B len) which works only on first 32B * (48B len) which works only on first 32B
*/ */
mlx5dr_ste_set_hit_addr(prev_htbl->ste_arr[0].hw_ste, mlx5dr_ste_set_hit_addr(dmn->ste_ctx,
prev_htbl->ste_arr[0].hw_ste,
new_htbl->chunk->icm_addr, new_htbl->chunk->icm_addr,
new_htbl->chunk->num_of_entries); new_htbl->chunk->num_of_entries);
ste_to_update = &prev_htbl->ste_arr[0]; ste_to_update = &prev_htbl->ste_arr[0];
} else { } else {
mlx5dr_ste_set_hit_addr_by_next_htbl(cur_htbl->pointing_ste->hw_ste, mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
cur_htbl->pointing_ste->hw_ste,
new_htbl); new_htbl);
ste_to_update = cur_htbl->pointing_ste; ste_to_update = cur_htbl->pointing_ste;
} }
...@@ -496,6 +505,8 @@ dr_rule_handle_collision(struct mlx5dr_matcher *matcher, ...@@ -496,6 +505,8 @@ dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
struct list_head *miss_list, struct list_head *miss_list,
struct list_head *send_list) struct list_head *send_list)
{ {
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_send_info *ste_info; struct mlx5dr_ste_send_info *ste_info;
struct mlx5dr_ste *new_ste; struct mlx5dr_ste *new_ste;
...@@ -507,8 +518,9 @@ dr_rule_handle_collision(struct mlx5dr_matcher *matcher, ...@@ -507,8 +518,9 @@ dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
if (!new_ste) if (!new_ste)
goto free_send_info; goto free_send_info;
if (dr_rule_append_to_miss_list(new_ste, miss_list, send_list)) { if (dr_rule_append_to_miss_list(ste_ctx, new_ste,
mlx5dr_dbg(matcher->tbl->dmn, "Failed to update prev miss_list\n"); miss_list, send_list)) {
mlx5dr_dbg(dmn, "Failed to update prev miss_list\n");
goto err_exit; goto err_exit;
} }
...@@ -659,6 +671,7 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule, ...@@ -659,6 +671,7 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
struct mlx5dr_ste_send_info *ste_info_arr[DR_ACTION_MAX_STES]; struct mlx5dr_ste_send_info *ste_info_arr[DR_ACTION_MAX_STES];
u8 num_of_builders = nic_matcher->num_of_builders; u8 num_of_builders = nic_matcher->num_of_builders;
struct mlx5dr_matcher *matcher = rule->matcher; struct mlx5dr_matcher *matcher = rule->matcher;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
u8 *curr_hw_ste, *prev_hw_ste; u8 *curr_hw_ste, *prev_hw_ste;
struct mlx5dr_ste *action_ste; struct mlx5dr_ste *action_ste;
int i, k, ret; int i, k, ret;
...@@ -692,10 +705,12 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule, ...@@ -692,10 +705,12 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
goto err_exit; goto err_exit;
/* Point current ste to the new action */ /* Point current ste to the new action */
mlx5dr_ste_set_hit_addr_by_next_htbl(prev_hw_ste, action_ste->htbl); mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
prev_hw_ste,
action_ste->htbl);
ret = dr_rule_add_member(nic_rule, action_ste); ret = dr_rule_add_member(nic_rule, action_ste);
if (ret) { if (ret) {
mlx5dr_dbg(matcher->tbl->dmn, "Failed adding rule member\n"); mlx5dr_dbg(dmn, "Failed adding rule member\n");
goto free_ste_info; goto free_ste_info;
} }
mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0, mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0,
...@@ -722,6 +737,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher, ...@@ -722,6 +737,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
struct list_head *miss_list, struct list_head *miss_list,
struct list_head *send_list) struct list_head *send_list)
{ {
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_send_info *ste_info; struct mlx5dr_ste_send_info *ste_info;
/* Take ref on table, only on first time this ste is used */ /* Take ref on table, only on first time this ste is used */
...@@ -730,7 +746,8 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher, ...@@ -730,7 +746,8 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
/* new entry -> new branch */ /* new entry -> new branch */
list_add_tail(&ste->miss_list_node, miss_list); list_add_tail(&ste->miss_list_node, miss_list);
mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr); mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
nic_matcher->e_anchor->chunk->icm_addr);
ste->ste_chain_location = ste_location; ste->ste_chain_location = ste_location;
...@@ -743,7 +760,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher, ...@@ -743,7 +760,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
ste, ste,
hw_ste, hw_ste,
DR_CHUNK_SIZE_1)) { DR_CHUNK_SIZE_1)) {
mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n"); mlx5dr_dbg(dmn, "Failed allocating table\n");
goto clean_ste_info; goto clean_ste_info;
} }
......
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
#ifndef _DR_STE_
#define _DR_STE_
#include "dr_types.h"
#define STE_IPV4 0x1
#define STE_IPV6 0x2
#define STE_TCP 0x1
#define STE_UDP 0x2
#define STE_SPI 0x3
#define IP_VERSION_IPV4 0x4
#define IP_VERSION_IPV6 0x6
#define STE_SVLAN 0x1
#define STE_CVLAN 0x2
#define HDR_LEN_L2_MACS 0xC
#define HDR_LEN_L2_VLAN 0x4
#define HDR_LEN_L2_ETHER 0x2
#define HDR_LEN_L2 (HDR_LEN_L2_MACS + HDR_LEN_L2_ETHER)
#define HDR_LEN_L2_W_VLAN (HDR_LEN_L2 + HDR_LEN_L2_VLAN)
/* Set to STE a specific value using DR_STE_SET */
#define DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, value) do { \
if ((spec)->s_fname) { \
MLX5_SET(ste_##lookup_type, tag, t_fname, value); \
(spec)->s_fname = 0; \
} \
} while (0)
/* Set to STE spec->s_fname to tag->t_fname set spec->s_fname as used */
#define DR_STE_SET_TAG(lookup_type, tag, t_fname, spec, s_fname) \
DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, spec->s_fname)
/* Set to STE -1 to tag->t_fname and set spec->s_fname as used */
#define DR_STE_SET_ONES(lookup_type, tag, t_fname, spec, s_fname) \
DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, -1)
#define DR_STE_SET_TCP_FLAGS(lookup_type, tag, spec) do { \
MLX5_SET(ste_##lookup_type, tag, tcp_ns, !!((spec)->tcp_flags & (1 << 8))); \
MLX5_SET(ste_##lookup_type, tag, tcp_cwr, !!((spec)->tcp_flags & (1 << 7))); \
MLX5_SET(ste_##lookup_type, tag, tcp_ece, !!((spec)->tcp_flags & (1 << 6))); \
MLX5_SET(ste_##lookup_type, tag, tcp_urg, !!((spec)->tcp_flags & (1 << 5))); \
MLX5_SET(ste_##lookup_type, tag, tcp_ack, !!((spec)->tcp_flags & (1 << 4))); \
MLX5_SET(ste_##lookup_type, tag, tcp_psh, !!((spec)->tcp_flags & (1 << 3))); \
MLX5_SET(ste_##lookup_type, tag, tcp_rst, !!((spec)->tcp_flags & (1 << 2))); \
MLX5_SET(ste_##lookup_type, tag, tcp_syn, !!((spec)->tcp_flags & (1 << 1))); \
MLX5_SET(ste_##lookup_type, tag, tcp_fin, !!((spec)->tcp_flags & (1 << 0))); \
} while (0)
#define DR_STE_SET_MPLS(lookup_type, mask, in_out, tag) do { \
struct mlx5dr_match_misc2 *_mask = mask; \
u8 *_tag = tag; \
DR_STE_SET_TAG(lookup_type, _tag, mpls0_label, _mask, \
in_out##_first_mpls_label);\
DR_STE_SET_TAG(lookup_type, _tag, mpls0_s_bos, _mask, \
in_out##_first_mpls_s_bos); \
DR_STE_SET_TAG(lookup_type, _tag, mpls0_exp, _mask, \
in_out##_first_mpls_exp); \
DR_STE_SET_TAG(lookup_type, _tag, mpls0_ttl, _mask, \
in_out##_first_mpls_ttl); \
} while (0)
#define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
(_misc)->outer_first_mpls_over_gre_label || \
(_misc)->outer_first_mpls_over_gre_exp || \
(_misc)->outer_first_mpls_over_gre_s_bos || \
(_misc)->outer_first_mpls_over_gre_ttl)
#define DR_STE_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
(_misc)->outer_first_mpls_over_udp_label || \
(_misc)->outer_first_mpls_over_udp_exp || \
(_misc)->outer_first_mpls_over_udp_s_bos || \
(_misc)->outer_first_mpls_over_udp_ttl)
enum dr_ste_action_modify_type_l3 {
DR_STE_ACTION_MDFY_TYPE_L3_NONE = 0x0,
DR_STE_ACTION_MDFY_TYPE_L3_IPV4 = 0x1,
DR_STE_ACTION_MDFY_TYPE_L3_IPV6 = 0x2,
};
enum dr_ste_action_modify_type_l4 {
DR_STE_ACTION_MDFY_TYPE_L4_NONE = 0x0,
DR_STE_ACTION_MDFY_TYPE_L4_TCP = 0x1,
DR_STE_ACTION_MDFY_TYPE_L4_UDP = 0x2,
};
u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask);
#define DR_STE_CTX_BUILDER(fname) \
((*build_##fname##_init)(struct mlx5dr_ste_build *sb, \
struct mlx5dr_match_param *mask))
struct mlx5dr_ste_ctx {
/* Builders */
void DR_STE_CTX_BUILDER(eth_l2_src_dst);
void DR_STE_CTX_BUILDER(eth_l3_ipv6_src);
void DR_STE_CTX_BUILDER(eth_l3_ipv6_dst);
void DR_STE_CTX_BUILDER(eth_l3_ipv4_5_tuple);
void DR_STE_CTX_BUILDER(eth_l2_src);
void DR_STE_CTX_BUILDER(eth_l2_dst);
void DR_STE_CTX_BUILDER(eth_l2_tnl);
void DR_STE_CTX_BUILDER(eth_l3_ipv4_misc);
void DR_STE_CTX_BUILDER(eth_ipv6_l3_l4);
void DR_STE_CTX_BUILDER(mpls);
void DR_STE_CTX_BUILDER(tnl_gre);
void DR_STE_CTX_BUILDER(tnl_mpls);
int DR_STE_CTX_BUILDER(icmp);
void DR_STE_CTX_BUILDER(general_purpose);
void DR_STE_CTX_BUILDER(eth_l4_misc);
void DR_STE_CTX_BUILDER(tnl_vxlan_gpe);
void DR_STE_CTX_BUILDER(tnl_geneve);
void DR_STE_CTX_BUILDER(register_0);
void DR_STE_CTX_BUILDER(register_1);
void DR_STE_CTX_BUILDER(src_gvmi_qpn);
/* Getters and Setters */
void (*ste_init)(u8 *hw_ste_p, u16 lu_type,
u8 entry_type, u16 gvmi);
void (*set_next_lu_type)(u8 *hw_ste_p, u16 lu_type);
u16 (*get_next_lu_type)(u8 *hw_ste_p);
void (*set_miss_addr)(u8 *hw_ste_p, u64 miss_addr);
u64 (*get_miss_addr)(u8 *hw_ste_p);
void (*set_hit_addr)(u8 *hw_ste_p, u64 icm_addr, u32 ht_size);
void (*set_byte_mask)(u8 *hw_ste_p, u16 byte_mask);
u16 (*get_byte_mask)(u8 *hw_ste_p);
/* Actions */
void (*set_actions_rx)(struct mlx5dr_domain *dmn,
u8 *action_type_set,
u8 *hw_ste_arr,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes);
void (*set_actions_tx)(struct mlx5dr_domain *dmn,
u8 *action_type_set,
u8 *hw_ste_arr,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes);
u32 modify_field_arr_sz;
const struct mlx5dr_ste_action_modify_field *modify_field_arr;
void (*set_action_set)(u8 *hw_action,
u8 hw_field,
u8 shifter,
u8 length,
u32 data);
void (*set_action_add)(u8 *hw_action,
u8 hw_field,
u8 shifter,
u8 length,
u32 data);
void (*set_action_copy)(u8 *hw_action,
u8 dst_hw_field,
u8 dst_shifter,
u8 dst_len,
u8 src_hw_field,
u8 src_shifter);
int (*set_action_decap_l3_list)(void *data,
u32 data_sz,
u8 *hw_action,
u32 hw_action_sz,
u16 *used_hw_action_num);
};
extern struct mlx5dr_ste_ctx ste_ctx_v0;
#endif /* _DR_STE_ */
This diff is collapsed.
...@@ -5,91 +5,6 @@ ...@@ -5,91 +5,6 @@
#define MLX5_IFC_DR_H #define MLX5_IFC_DR_H
enum { enum {
MLX5DR_ACTION_MDFY_HW_FLD_L2_0 = 0,
MLX5DR_ACTION_MDFY_HW_FLD_L2_1 = 1,
MLX5DR_ACTION_MDFY_HW_FLD_L2_2 = 2,
MLX5DR_ACTION_MDFY_HW_FLD_L3_0 = 3,
MLX5DR_ACTION_MDFY_HW_FLD_L3_1 = 4,
MLX5DR_ACTION_MDFY_HW_FLD_L3_2 = 5,
MLX5DR_ACTION_MDFY_HW_FLD_L3_3 = 6,
MLX5DR_ACTION_MDFY_HW_FLD_L3_4 = 7,
MLX5DR_ACTION_MDFY_HW_FLD_L4_0 = 8,
MLX5DR_ACTION_MDFY_HW_FLD_L4_1 = 9,
MLX5DR_ACTION_MDFY_HW_FLD_MPLS = 10,
MLX5DR_ACTION_MDFY_HW_FLD_L2_TNL_0 = 11,
MLX5DR_ACTION_MDFY_HW_FLD_REG_0 = 12,
MLX5DR_ACTION_MDFY_HW_FLD_REG_1 = 13,
MLX5DR_ACTION_MDFY_HW_FLD_REG_2 = 14,
MLX5DR_ACTION_MDFY_HW_FLD_REG_3 = 15,
MLX5DR_ACTION_MDFY_HW_FLD_L4_2 = 16,
MLX5DR_ACTION_MDFY_HW_FLD_FLEX_0 = 17,
MLX5DR_ACTION_MDFY_HW_FLD_FLEX_1 = 18,
MLX5DR_ACTION_MDFY_HW_FLD_FLEX_2 = 19,
MLX5DR_ACTION_MDFY_HW_FLD_FLEX_3 = 20,
MLX5DR_ACTION_MDFY_HW_FLD_L2_TNL_1 = 21,
MLX5DR_ACTION_MDFY_HW_FLD_METADATA = 22,
MLX5DR_ACTION_MDFY_HW_FLD_RESERVED = 23,
};
enum {
MLX5DR_ACTION_MDFY_HW_OP_COPY = 0x1,
MLX5DR_ACTION_MDFY_HW_OP_SET = 0x2,
MLX5DR_ACTION_MDFY_HW_OP_ADD = 0x3,
};
enum {
MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE = 0x0,
MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4 = 0x1,
MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6 = 0x2,
};
enum {
MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE = 0x0,
MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP = 0x1,
MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP = 0x2,
};
enum {
MLX5DR_STE_LU_TYPE_NOP = 0x00,
MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP = 0x05,
MLX5DR_STE_LU_TYPE_ETHL2_TUNNELING_I = 0x0a,
MLX5DR_STE_LU_TYPE_ETHL2_DST_O = 0x06,
MLX5DR_STE_LU_TYPE_ETHL2_DST_I = 0x07,
MLX5DR_STE_LU_TYPE_ETHL2_DST_D = 0x1b,
MLX5DR_STE_LU_TYPE_ETHL2_SRC_O = 0x08,
MLX5DR_STE_LU_TYPE_ETHL2_SRC_I = 0x09,
MLX5DR_STE_LU_TYPE_ETHL2_SRC_D = 0x1c,
MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_O = 0x36,
MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_I = 0x37,
MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_D = 0x38,
MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_O = 0x0d,
MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_I = 0x0e,
MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_D = 0x1e,
MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_O = 0x0f,
MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_I = 0x10,
MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_D = 0x1f,
MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x11,
MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x12,
MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_D = 0x20,
MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_O = 0x29,
MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_I = 0x2a,
MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_D = 0x2b,
MLX5DR_STE_LU_TYPE_ETHL4_O = 0x13,
MLX5DR_STE_LU_TYPE_ETHL4_I = 0x14,
MLX5DR_STE_LU_TYPE_ETHL4_D = 0x21,
MLX5DR_STE_LU_TYPE_ETHL4_MISC_O = 0x2c,
MLX5DR_STE_LU_TYPE_ETHL4_MISC_I = 0x2d,
MLX5DR_STE_LU_TYPE_ETHL4_MISC_D = 0x2e,
MLX5DR_STE_LU_TYPE_MPLS_FIRST_O = 0x15,
MLX5DR_STE_LU_TYPE_MPLS_FIRST_I = 0x24,
MLX5DR_STE_LU_TYPE_MPLS_FIRST_D = 0x25,
MLX5DR_STE_LU_TYPE_GRE = 0x16,
MLX5DR_STE_LU_TYPE_FLEX_PARSER_0 = 0x22,
MLX5DR_STE_LU_TYPE_FLEX_PARSER_1 = 0x23,
MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x19,
MLX5DR_STE_LU_TYPE_GENERAL_PURPOSE = 0x18,
MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_0 = 0x2f,
MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_1 = 0x30,
MLX5DR_STE_LU_TYPE_DONT_CARE = 0x0f, MLX5DR_STE_LU_TYPE_DONT_CARE = 0x0f,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment