Commit c232f81b authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2021-03-12' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2021-03-12

1) TC support for ICMP parameters
2) TC connection tracking with mirroring
3) A round of trivial fixups and cleanups
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents bfdfe7fc a3222a2d
...@@ -263,15 +263,15 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent) ...@@ -263,15 +263,15 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent)
return 0; return 0;
} }
static void dump_buf(void *buf, int size, int data_only, int offset) static void dump_buf(void *buf, int size, int data_only, int offset, int idx)
{ {
__be32 *p = buf; __be32 *p = buf;
int i; int i;
for (i = 0; i < size; i += 16) { for (i = 0; i < size; i += 16) {
pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]), pr_debug("cmd[%d]: %03x: %08x %08x %08x %08x\n", idx, offset,
be32_to_cpu(p[1]), be32_to_cpu(p[2]), be32_to_cpu(p[0]), be32_to_cpu(p[1]),
be32_to_cpu(p[3])); be32_to_cpu(p[2]), be32_to_cpu(p[3]));
p += 4; p += 4;
offset += 16; offset += 16;
} }
...@@ -802,39 +802,41 @@ static void dump_command(struct mlx5_core_dev *dev, ...@@ -802,39 +802,41 @@ static void dump_command(struct mlx5_core_dev *dev,
int dump_len; int dump_len;
int i; int i;
mlx5_core_dbg(dev, "cmd[%d]: start dump\n", ent->idx);
data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
if (data_only) if (data_only)
mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA, mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
"dump command data %s(0x%x) %s\n", "cmd[%d]: dump command data %s(0x%x) %s\n",
mlx5_command_str(op), op, ent->idx, mlx5_command_str(op), op,
input ? "INPUT" : "OUTPUT"); input ? "INPUT" : "OUTPUT");
else else
mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n", mlx5_core_dbg(dev, "cmd[%d]: dump command %s(0x%x) %s\n",
mlx5_command_str(op), op, ent->idx, mlx5_command_str(op), op,
input ? "INPUT" : "OUTPUT"); input ? "INPUT" : "OUTPUT");
if (data_only) { if (data_only) {
if (input) { if (input) {
dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset); dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset, ent->idx);
offset += sizeof(ent->lay->in); offset += sizeof(ent->lay->in);
} else { } else {
dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset); dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset, ent->idx);
offset += sizeof(ent->lay->out); offset += sizeof(ent->lay->out);
} }
} else { } else {
dump_buf(ent->lay, sizeof(*ent->lay), 0, offset); dump_buf(ent->lay, sizeof(*ent->lay), 0, offset, ent->idx);
offset += sizeof(*ent->lay); offset += sizeof(*ent->lay);
} }
for (i = 0; i < n && next; i++) { for (i = 0; i < n && next; i++) {
if (data_only) { if (data_only) {
dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset); dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
dump_buf(next->buf, dump_len, 1, offset); dump_buf(next->buf, dump_len, 1, offset, ent->idx);
offset += MLX5_CMD_DATA_BLOCK_SIZE; offset += MLX5_CMD_DATA_BLOCK_SIZE;
} else { } else {
mlx5_core_dbg(dev, "command block:\n"); mlx5_core_dbg(dev, "cmd[%d]: command block:\n", ent->idx);
dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset); dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset,
ent->idx);
offset += sizeof(struct mlx5_cmd_prot_block); offset += sizeof(struct mlx5_cmd_prot_block);
} }
next = next->next; next = next->next;
...@@ -842,6 +844,8 @@ static void dump_command(struct mlx5_core_dev *dev, ...@@ -842,6 +844,8 @@ static void dump_command(struct mlx5_core_dev *dev,
if (data_only) if (data_only)
pr_debug("\n"); pr_debug("\n");
mlx5_core_dbg(dev, "cmd[%d]: end dump\n", ent->idx);
} }
static u16 msg_to_opcode(struct mlx5_cmd_msg *in) static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
......
...@@ -137,12 +137,12 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change, ...@@ -137,12 +137,12 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
* unregistering devlink instance while holding devlink_mutext. * unregistering devlink instance while holding devlink_mutext.
* Hence, do not support reload. * Hence, do not support reload.
*/ */
NL_SET_ERR_MSG_MOD(extack, "reload is unsupported when SFs are allocated\n"); NL_SET_ERR_MSG_MOD(extack, "reload is unsupported when SFs are allocated");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (mlx5_lag_is_active(dev)) { if (mlx5_lag_is_active(dev)) {
NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode\n"); NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
......
...@@ -1797,6 +1797,10 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, ...@@ -1797,6 +1797,10 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
ct_flow->post_ct_attr->prio = 0; ct_flow->post_ct_attr->prio = 0;
ct_flow->post_ct_attr->ft = ct_priv->post_ct; ct_flow->post_ct_attr->ft = ct_priv->post_ct;
/* Splits were handled before CT */
if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB)
ct_flow->post_ct_attr->esw_attr->split_count = 0;
ct_flow->post_ct_attr->inner_match_level = MLX5_MATCH_NONE; ct_flow->post_ct_attr->inner_match_level = MLX5_MATCH_NONE;
ct_flow->post_ct_attr->outer_match_level = MLX5_MATCH_NONE; ct_flow->post_ct_attr->outer_match_level = MLX5_MATCH_NONE;
ct_flow->post_ct_attr->action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP); ct_flow->post_ct_attr->action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP);
......
...@@ -669,6 +669,7 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) ...@@ -669,6 +669,7 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
get_cqe_opcode(cqe)); get_cqe_opcode(cqe));
mlx5e_dump_error_cqe(&sq->cq, sq->sqn, mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
(struct mlx5_err_cqe *)cqe); (struct mlx5_err_cqe *)cqe);
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
queue_work(cq->priv->wq, &sq->recover_work); queue_work(cq->priv->wq, &sq->recover_work);
break; break;
......
...@@ -445,12 +445,16 @@ static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp) ...@@ -445,12 +445,16 @@ static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn); mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
} }
static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc) static int mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
{ {
u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn; u32 *indirection_rqt, rqn;
struct mlx5e_priv *priv = hp->func_priv; struct mlx5e_priv *priv = hp->func_priv;
int i, ix, sz = MLX5E_INDIR_RQT_SIZE; int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
indirection_rqt = kzalloc(sz, GFP_KERNEL);
if (!indirection_rqt)
return -ENOMEM;
mlx5e_build_default_indir_rqt(indirection_rqt, sz, mlx5e_build_default_indir_rqt(indirection_rqt, sz,
hp->num_channels); hp->num_channels);
...@@ -462,6 +466,9 @@ static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc) ...@@ -462,6 +466,9 @@ static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
rqn = hp->pair->rqn[ix]; rqn = hp->pair->rqn[ix];
MLX5_SET(rqtc, rqtc, rq_num[i], rqn); MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
} }
kfree(indirection_rqt);
return 0;
} }
static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp) static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
...@@ -482,12 +489,15 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp) ...@@ -482,12 +489,15 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
MLX5_SET(rqtc, rqtc, rqt_max_size, sz); MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
mlx5e_hairpin_fill_rqt_rqns(hp, rqtc); err = mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
if (err)
goto out;
err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn); err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
if (!err) if (!err)
hp->indir_rqt.enabled = true; hp->indir_rqt.enabled = true;
out:
kvfree(in); kvfree(in);
return err; return err;
} }
...@@ -1077,19 +1087,23 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, ...@@ -1077,19 +1087,23 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
if (flow_flag_test(flow, CT)) { if (flow_flag_test(flow, CT)) {
mod_hdr_acts = &attr->parse_attr->mod_hdr_acts; mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
return mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv), rule = mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
flow, spec, attr, flow, spec, attr,
mod_hdr_acts); mod_hdr_acts);
} else {
rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
} }
rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
if (IS_ERR(rule)) if (IS_ERR(rule))
return rule; return rule;
if (attr->esw_attr->split_count) { if (attr->esw_attr->split_count) {
flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr); flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
if (IS_ERR(flow->rule[1])) { if (IS_ERR(flow->rule[1])) {
mlx5_eswitch_del_offloaded_rule(esw, rule, attr); if (flow_flag_test(flow, CT))
mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
else
mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
return flow->rule[1]; return flow->rule[1];
} }
} }
...@@ -1947,6 +1961,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, ...@@ -1947,6 +1961,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
misc_parameters); misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters); misc_parameters);
void *misc_c_3 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters_3);
void *misc_v_3 = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters_3);
struct flow_rule *rule = flow_cls_offload_flow_rule(f); struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector; struct flow_dissector *dissector = rule->match.dissector;
u16 addr_type = 0; u16 addr_type = 0;
...@@ -1976,6 +1994,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, ...@@ -1976,6 +1994,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_CT) | BIT(FLOW_DISSECTOR_KEY_CT) |
BIT(FLOW_DISSECTOR_KEY_ENC_IP) | BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
BIT(FLOW_DISSECTOR_KEY_ICMP) |
BIT(FLOW_DISSECTOR_KEY_MPLS))) { BIT(FLOW_DISSECTOR_KEY_MPLS))) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported key"); NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n", netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
...@@ -2295,7 +2314,49 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, ...@@ -2295,7 +2314,49 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
if (match.mask->flags) if (match.mask->flags)
*match_level = MLX5_MATCH_L4; *match_level = MLX5_MATCH_L4;
} }
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
struct flow_match_icmp match;
flow_rule_match_icmp(rule, &match);
switch (ip_proto) {
case IPPROTO_ICMP:
if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
MLX5_FLEX_PROTO_ICMP))
return -EOPNOTSUPP;
MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type,
match.mask->type);
MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type,
match.key->type);
MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_code,
match.mask->code);
MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_code,
match.key->code);
break;
case IPPROTO_ICMPV6:
if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
MLX5_FLEX_PROTO_ICMPV6))
return -EOPNOTSUPP;
MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type,
match.mask->type);
MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type,
match.key->type);
MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_code,
match.mask->code);
MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_code,
match.key->code);
break;
default:
NL_SET_ERR_MSG_MOD(extack,
"Code and type matching only with ICMP and ICMPv6");
netdev_err(priv->netdev,
"Code and type matching only with ICMP and ICMPv6\n");
return -EINVAL;
}
if (match.mask->code || match.mask->type) {
*match_level = MLX5_MATCH_L4;
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3;
}
}
return 0; return 0;
} }
...@@ -2979,7 +3040,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv, ...@@ -2979,7 +3040,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
actions = flow->attr->action; actions = flow->attr->action;
if (mlx5e_is_eswitch_flow(flow)) { if (mlx5e_is_eswitch_flow(flow)) {
if (flow->attr->esw_attr->split_count && ct_flow) { if (flow->attr->esw_attr->split_count && ct_flow &&
!MLX5_CAP_GEN(flow->attr->esw_attr->in_mdev, reg_c_preserve)) {
/* All registers used by ct are cleared when using /* All registers used by ct are cleared when using
* split rules. * split rules.
*/ */
...@@ -3779,6 +3841,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, ...@@ -3779,6 +3841,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return err; return err;
flow_flag_set(flow, CT); flow_flag_set(flow, CT);
esw_attr->split_count = esw_attr->out_count;
break; break;
default: default:
NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported"); NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
...@@ -3841,11 +3904,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, ...@@ -3841,11 +3904,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
NL_SET_ERR_MSG_MOD(extack,
"Mirroring goto chain rules isn't supported");
return -EOPNOTSUPP;
}
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
} }
......
...@@ -576,7 +576,7 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq, ...@@ -576,7 +576,7 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS); pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS);
wqe = MLX5E_TX_FETCH_WQE(sq, pi); wqe = MLX5E_TX_FETCH_WQE(sq, pi);
prefetchw(wqe->data); net_prefetchw(wqe->data);
*session = (struct mlx5e_tx_mpwqe) { *session = (struct mlx5e_tx_mpwqe) {
.wqe = wqe, .wqe = wqe,
......
...@@ -248,7 +248,7 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw, ...@@ -248,7 +248,7 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
err_ethertype: err_ethertype:
kfree(rule); kfree(rule);
out: out:
kfree(rule_spec); kvfree(rule_spec);
return err; return err;
} }
...@@ -328,7 +328,7 @@ static int mlx5_create_indir_recirc_group(struct mlx5_eswitch *esw, ...@@ -328,7 +328,7 @@ static int mlx5_create_indir_recirc_group(struct mlx5_eswitch *esw,
e->recirc_cnt = 0; e->recirc_cnt = 0;
out: out:
kfree(in); kvfree(in);
return err; return err;
} }
...@@ -347,7 +347,7 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw, ...@@ -347,7 +347,7 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw,
spec = kvzalloc(sizeof(*spec), GFP_KERNEL); spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) { if (!spec) {
kfree(in); kvfree(in);
return -ENOMEM; return -ENOMEM;
} }
...@@ -371,8 +371,8 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw, ...@@ -371,8 +371,8 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw,
} }
err_out: err_out:
kfree(spec); kvfree(spec);
kfree(in); kvfree(in);
return err; return err;
} }
......
...@@ -768,7 +768,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, ...@@ -768,7 +768,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
spin_lock(&lag_lock); spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev); ldev = mlx5_lag_dev_get(dev);
if (ldev && __mlx5_lag_is_roce(ldev)) { if (ldev && __mlx5_lag_is_active(ldev)) {
num_ports = MLX5_MAX_PORTS; num_ports = MLX5_MAX_PORTS;
mdev[MLX5_LAG_P1] = ldev->pf[MLX5_LAG_P1].dev; mdev[MLX5_LAG_P1] = ldev->pf[MLX5_LAG_P1].dev;
mdev[MLX5_LAG_P2] = ldev->pf[MLX5_LAG_P2].dev; mdev[MLX5_LAG_P2] = ldev->pf[MLX5_LAG_P2].dev;
......
...@@ -492,7 +492,7 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi ...@@ -492,7 +492,7 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi
break; break;
default: default:
break; break;
}; }
return 0; return 0;
} }
......
...@@ -331,7 +331,7 @@ static void dr_ste_v0_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr, ...@@ -331,7 +331,7 @@ static void dr_ste_v0_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr,
MLX5_SET(ste_sx_transmit, hw_ste_p, action_type, MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
DR_STE_ACTION_TYPE_PUSH_VLAN); DR_STE_ACTION_TYPE_PUSH_VLAN);
MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr); MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr);
/* Due to HW limitation we need to set this bit, otherwise reforamt + /* Due to HW limitation we need to set this bit, otherwise reformat +
* push vlan will not work. * push vlan will not work.
*/ */
if (go_back) if (go_back)
......
...@@ -437,21 +437,6 @@ static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action) ...@@ -437,21 +437,6 @@ static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
dr_ste_v1_set_reparse(hw_ste_p); dr_ste_v1_set_reparse(hw_ste_p);
} }
static void dr_ste_v1_set_rx_decap_l3(u8 *hw_ste_p,
u8 *s_action,
u16 decap_actions,
u32 decap_index)
{
MLX5_SET(ste_single_action_modify_list_v1, s_action, action_id,
DR_STE_V1_ACTION_ID_MODIFY_LIST);
MLX5_SET(ste_single_action_modify_list_v1, s_action, num_of_modify_actions,
decap_actions);
MLX5_SET(ste_single_action_modify_list_v1, s_action, modify_actions_ptr,
decap_index);
dr_ste_v1_set_reparse(hw_ste_p);
}
static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p, static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p,
u8 *s_action, u8 *s_action,
u16 num_of_actions, u16 num_of_actions,
...@@ -571,9 +556,6 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, ...@@ -571,9 +556,6 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
bool allow_ctr = true; bool allow_ctr = true;
if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) { if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
dr_ste_v1_set_rx_decap_l3(last_ste, action,
attr->decap_actions,
attr->decap_index);
dr_ste_v1_set_rewrite_actions(last_ste, action, dr_ste_v1_set_rewrite_actions(last_ste, action,
attr->decap_actions, attr->decap_actions,
attr->decap_index); attr->decap_index);
...@@ -1532,6 +1514,7 @@ static void dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *val ...@@ -1532,6 +1514,7 @@ static void dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *val
DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_gvmi, misc_mask, source_port); DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_gvmi, misc_mask, source_port);
DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_qp, misc_mask, source_sqn); DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_qp, misc_mask, source_sqn);
misc_mask->source_eswitch_owner_vhca_id = 0;
} }
static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
......
...@@ -1142,6 +1142,8 @@ enum mlx5_flex_parser_protos { ...@@ -1142,6 +1142,8 @@ enum mlx5_flex_parser_protos {
MLX5_FLEX_PROTO_GENEVE = 1 << 3, MLX5_FLEX_PROTO_GENEVE = 1 << 3,
MLX5_FLEX_PROTO_CW_MPLS_GRE = 1 << 4, MLX5_FLEX_PROTO_CW_MPLS_GRE = 1 << 4,
MLX5_FLEX_PROTO_CW_MPLS_UDP = 1 << 5, MLX5_FLEX_PROTO_CW_MPLS_UDP = 1 << 5,
MLX5_FLEX_PROTO_ICMP = 1 << 8,
MLX5_FLEX_PROTO_ICMPV6 = 1 << 9,
}; };
/* MLX5 DEV CAPs */ /* MLX5 DEV CAPs */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment