Commit 6a48faee authored by Maor Gottlieb's avatar Maor Gottlieb Committed by Saeed Mahameed

net/mlx5: Add direct rule fs_cmd implementation

Add support to create flow steering objects
via direct rule API (SW steering).
New layer is added - fs_dr, this layer translates the command that
fs_core sends to the FW into direct rule API. In case that direct
rule is not supported in some feature then -EOPNOTSUPP is
returned.
Signed-off-by: default avatarMaor Gottlieb <maorg@mellanox.com>
Reviewed-by: default avatarMark Bloch <markb@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent fb86f121
......@@ -73,4 +73,4 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
steering/dr_icm_pool.o steering/dr_crc32.o \
steering/dr_ste.o steering/dr_send.o \
steering/dr_cmd.o steering/dr_fw.o \
steering/dr_action.o
steering/dr_action.o steering/fs_dr.o
......@@ -135,6 +135,22 @@ static void mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace
{
}
static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns)
{
return 0;
}
static int mlx5_cmd_stub_create_ns(struct mlx5_flow_root_namespace *ns)
{
return 0;
}
static int mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace *ns)
{
return 0;
}
static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, u32 underlay_qpn,
bool disconnect)
......@@ -838,7 +854,10 @@ static const struct mlx5_flow_cmds mlx5_flow_cmds = {
.packet_reformat_alloc = mlx5_cmd_packet_reformat_alloc,
.packet_reformat_dealloc = mlx5_cmd_packet_reformat_dealloc,
.modify_header_alloc = mlx5_cmd_modify_header_alloc,
.modify_header_dealloc = mlx5_cmd_modify_header_dealloc
.modify_header_dealloc = mlx5_cmd_modify_header_dealloc,
.set_peer = mlx5_cmd_stub_set_peer,
.create_ns = mlx5_cmd_stub_create_ns,
.destroy_ns = mlx5_cmd_stub_destroy_ns,
};
static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
......@@ -854,10 +873,13 @@ static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
.packet_reformat_alloc = mlx5_cmd_stub_packet_reformat_alloc,
.packet_reformat_dealloc = mlx5_cmd_stub_packet_reformat_dealloc,
.modify_header_alloc = mlx5_cmd_stub_modify_header_alloc,
.modify_header_dealloc = mlx5_cmd_stub_modify_header_dealloc
.modify_header_dealloc = mlx5_cmd_stub_modify_header_dealloc,
.set_peer = mlx5_cmd_stub_set_peer,
.create_ns = mlx5_cmd_stub_create_ns,
.destroy_ns = mlx5_cmd_stub_destroy_ns,
};
static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
{
return &mlx5_flow_cmds;
}
......
......@@ -93,6 +93,12 @@ struct mlx5_flow_cmds {
void (*modify_header_dealloc)(struct mlx5_flow_root_namespace *ns,
struct mlx5_modify_hdr *modify_hdr);
int (*set_peer)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns);
int (*create_ns)(struct mlx5_flow_root_namespace *ns);
int (*destroy_ns)(struct mlx5_flow_root_namespace *ns);
};
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
......@@ -108,5 +114,6 @@ int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
u32 *out);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void);
#endif
......@@ -2991,3 +2991,9 @@ void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
kfree(pkt_reformat);
}
EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns)
{
return ns->cmds->set_peer(ns, peer_ns);
}
......@@ -37,16 +37,23 @@
#include <linux/mlx5/fs.h>
#include <linux/rhashtable.h>
#include <linux/llist.h>
#include <steering/fs_dr.h>
struct mlx5_modify_hdr {
enum mlx5_flow_namespace_type ns_type;
u32 id;
union {
struct mlx5_fs_dr_action action;
u32 id;
};
};
struct mlx5_pkt_reformat {
enum mlx5_flow_namespace_type ns_type;
int reformat_type; /* from mlx5_ifc */
u32 id;
union {
struct mlx5_fs_dr_action action;
u32 id;
};
};
/* FS_TYPE_PRIO_CHAINS is a PRIO that will have namespaces only,
......@@ -139,6 +146,7 @@ struct mlx5_flow_handle {
/* Type of children is mlx5_flow_group */
struct mlx5_flow_table {
struct fs_node node;
struct mlx5_fs_dr_table fs_dr_table;
u32 id;
u16 vport;
unsigned int max_fte;
......@@ -179,6 +187,7 @@ struct mlx5_ft_underlay_qp {
/* Type of children is mlx5_flow_rule */
struct fs_fte {
struct fs_node node;
struct mlx5_fs_dr_rule fs_dr_rule;
u32 val[MLX5_ST_SZ_DW_MATCH_PARAM];
u32 dests_size;
u32 index;
......@@ -214,6 +223,7 @@ struct mlx5_flow_group_mask {
/* Type of children is fs_fte */
struct mlx5_flow_group {
struct fs_node node;
struct mlx5_fs_dr_matcher fs_dr_matcher;
struct mlx5_flow_group_mask mask;
u32 start_index;
u32 max_ftes;
......@@ -225,6 +235,7 @@ struct mlx5_flow_group {
struct mlx5_flow_root_namespace {
struct mlx5_flow_namespace ns;
struct mlx5_fs_dr_domain fs_dr_domain;
enum fs_flow_table_type table_type;
struct mlx5_core_dev *dev;
struct mlx5_flow_table *root_ft;
......@@ -242,6 +253,11 @@ void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
unsigned long interval);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void);
int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns);
int mlx5_init_fs(struct mlx5_core_dev *dev);
void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2019 Mellanox Technologies */
#include "mlx5_core.h"
#include "fs_core.h"
#include "fs_cmd.h"
#include "mlx5dr.h"
#include "fs_dr.h"
static bool mlx5_dr_is_fw_table(u32 flags)
{
if (flags & MLX5_FLOW_TABLE_TERMINATION)
return true;
return false;
}
static int mlx5_cmd_dr_update_root_ft(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 underlay_qpn,
bool disconnect)
{
return mlx5_fs_cmd_get_fw_cmds()->update_root_ft(ns, ft, underlay_qpn,
disconnect);
}
static int set_miss_action(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft)
{
struct mlx5dr_action *old_miss_action;
struct mlx5dr_action *action = NULL;
struct mlx5dr_table *next_tbl;
int err;
next_tbl = next_ft ? next_ft->fs_dr_table.dr_table : NULL;
if (next_tbl) {
action = mlx5dr_action_create_dest_table(next_tbl);
if (!action)
return -EINVAL;
}
old_miss_action = ft->fs_dr_table.miss_action;
err = mlx5dr_table_set_miss_action(ft->fs_dr_table.dr_table, action);
if (err && action) {
err = mlx5dr_action_destroy(action);
if (err) {
action = NULL;
mlx5_core_err(ns->dev, "Failed to destroy action (%d)\n",
err);
}
}
ft->fs_dr_table.miss_action = action;
if (old_miss_action) {
err = mlx5dr_action_destroy(old_miss_action);
if (err)
mlx5_core_err(ns->dev, "Failed to destroy action (%d)\n",
err);
}
return err;
}
static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
unsigned int log_size,
struct mlx5_flow_table *next_ft)
{
struct mlx5dr_table *tbl;
int err;
if (mlx5_dr_is_fw_table(ft->flags))
return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft,
log_size,
next_ft);
tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain,
ft->level);
if (!tbl) {
mlx5_core_err(ns->dev, "Failed creating dr flow_table\n");
return -EINVAL;
}
ft->fs_dr_table.dr_table = tbl;
ft->id = mlx5dr_table_get_id(tbl);
if (next_ft) {
err = set_miss_action(ns, ft, next_ft);
if (err) {
mlx5dr_table_destroy(tbl);
ft->fs_dr_table.dr_table = NULL;
return err;
}
}
return 0;
}
static int mlx5_cmd_dr_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft)
{
struct mlx5dr_action *action = ft->fs_dr_table.miss_action;
int err;
if (mlx5_dr_is_fw_table(ft->flags))
return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
err = mlx5dr_table_destroy(ft->fs_dr_table.dr_table);
if (err) {
mlx5_core_err(ns->dev, "Failed to destroy flow_table (%d)\n",
err);
return err;
}
if (action) {
err = mlx5dr_action_destroy(action);
if (err) {
mlx5_core_err(ns->dev, "Failed to destroy action(%d)\n",
err);
return err;
}
}
return err;
}
static int mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft)
{
return set_miss_action(ns, ft, next_ft);
}
static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 *in,
struct mlx5_flow_group *fg)
{
struct mlx5dr_matcher *matcher;
u16 priority = MLX5_GET(create_flow_group_in, in,
start_flow_index);
u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
in,
match_criteria_enable);
struct mlx5dr_match_parameters mask;
if (mlx5_dr_is_fw_table(ft->flags))
return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in,
fg);
mask.match_buf = MLX5_ADDR_OF(create_flow_group_in,
in, match_criteria);
mask.match_sz = sizeof(fg->mask.match_criteria);
matcher = mlx5dr_matcher_create(ft->fs_dr_table.dr_table,
priority,
match_criteria_enable,
&mask);
if (!matcher) {
mlx5_core_err(ns->dev, "Failed creating matcher\n");
return -EINVAL;
}
fg->fs_dr_matcher.dr_matcher = matcher;
return 0;
}
static int mlx5_cmd_dr_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg)
{
if (mlx5_dr_is_fw_table(ft->flags))
return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg);
return mlx5dr_matcher_destroy(fg->fs_dr_matcher.dr_matcher);
}
static struct mlx5dr_action *create_vport_action(struct mlx5dr_domain *domain,
struct mlx5_flow_rule *dst)
{
struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
return mlx5dr_action_create_dest_vport(domain, dest_attr->vport.num,
dest_attr->vport.flags &
MLX5_FLOW_DEST_VPORT_VHCA_ID,
dest_attr->vport.vhca_id);
}
static struct mlx5dr_action *create_ft_action(struct mlx5_core_dev *dev,
struct mlx5_flow_rule *dst)
{
struct mlx5_flow_table *dest_ft = dst->dest_attr.ft;
if (mlx5_dr_is_fw_table(dest_ft->flags))
return mlx5dr_create_action_dest_flow_fw_table(dest_ft, dev);
return mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table);
}
static struct mlx5dr_action *create_action_push_vlan(struct mlx5dr_domain *domain,
struct mlx5_fs_vlan *vlan)
{
u16 n_ethtype = vlan->ethtype;
u8 prio = vlan->prio;
u16 vid = vlan->vid;
u32 vlan_hdr;
vlan_hdr = (u32)n_ethtype << 16 | (u32)(prio) << 12 | (u32)vid;
return mlx5dr_action_create_push_vlan(domain, htonl(vlan_hdr));
}
#define MLX5_FLOW_CONTEXT_ACTION_MAX 20
static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *group,
struct fs_fte *fte)
{
struct mlx5dr_domain *domain = ns->fs_dr_domain.dr_domain;
struct mlx5dr_action *term_action = NULL;
struct mlx5dr_match_parameters params;
struct mlx5_core_dev *dev = ns->dev;
struct mlx5dr_action **fs_dr_actions;
struct mlx5dr_action *tmp_action;
struct mlx5dr_action **actions;
bool delay_encap_set = false;
struct mlx5dr_rule *rule;
struct mlx5_flow_rule *dst;
int fs_dr_num_actions = 0;
int num_actions = 0;
size_t match_sz;
int err = 0;
int i;
if (mlx5_dr_is_fw_table(ft->flags))
return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(*actions),
GFP_KERNEL);
if (!actions)
return -ENOMEM;
fs_dr_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
sizeof(*fs_dr_actions), GFP_KERNEL);
if (!fs_dr_actions) {
kfree(actions);
return -ENOMEM;
}
match_sz = sizeof(fte->val);
/* The order of the actions are must to be keep, only the following
* order is supported by SW steering:
* TX: push vlan -> modify header -> encap
* RX: decap -> pop vlan -> modify header
*/
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
actions[num_actions++] = tmp_action;
}
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
actions[num_actions++] = tmp_action;
}
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
enum mlx5dr_action_reformat_type decap_type =
DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2;
tmp_action = mlx5dr_action_create_packet_reformat(domain,
decap_type, 0,
NULL);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
actions[num_actions++] = tmp_action;
}
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
bool is_decap = fte->action.pkt_reformat->reformat_type ==
MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
if (is_decap)
actions[num_actions++] =
fte->action.pkt_reformat->action.dr_action;
else
delay_encap_set = true;
}
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
tmp_action =
mlx5dr_action_create_pop_vlan();
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
actions[num_actions++] = tmp_action;
}
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
tmp_action =
mlx5dr_action_create_pop_vlan();
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
actions[num_actions++] = tmp_action;
}
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
actions[num_actions++] =
fte->action.modify_hdr->action.dr_action;
if (delay_encap_set)
actions[num_actions++] =
fte->action.pkt_reformat->action.dr_action;
/* The order of the actions below is not important */
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
tmp_action = mlx5dr_action_create_drop();
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
term_action = tmp_action;
}
if (fte->flow_context.flow_tag) {
tmp_action =
mlx5dr_action_create_tag(fte->flow_context.flow_tag);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
actions[num_actions++] = tmp_action;
}
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
list_for_each_entry(dst, &fte->node.children, node.list) {
enum mlx5_flow_destination_type type = dst->dest_attr.type;
u32 id;
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
err = -ENOSPC;
goto free_actions;
}
switch (type) {
case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
id = dst->dest_attr.counter_id;
tmp_action =
mlx5dr_action_create_flow_counter(id);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
actions[num_actions++] = tmp_action;
break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
tmp_action = create_ft_action(dev, dst);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
term_action = tmp_action;
break;
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
tmp_action = create_vport_action(domain, dst);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
term_action = tmp_action;
break;
default:
err = -EOPNOTSUPP;
goto free_actions;
}
}
}
params.match_sz = match_sz;
params.match_buf = (u64 *)fte->val;
if (term_action)
actions[num_actions++] = term_action;
rule = mlx5dr_rule_create(group->fs_dr_matcher.dr_matcher,
&params,
num_actions,
actions);
if (!rule) {
err = -EINVAL;
goto free_actions;
}
kfree(actions);
fte->fs_dr_rule.dr_rule = rule;
fte->fs_dr_rule.num_actions = fs_dr_num_actions;
fte->fs_dr_rule.dr_actions = fs_dr_actions;
return 0;
free_actions:
for (i = 0; i < fs_dr_num_actions; i++)
if (!IS_ERR_OR_NULL(fs_dr_actions[i]))
mlx5dr_action_destroy(fs_dr_actions[i]);
mlx5_core_err(dev, "Failed to create dr rule err(%d)\n", err);
kfree(actions);
kfree(fs_dr_actions);
return err;
}
static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
int reformat_type,
size_t size,
void *reformat_data,
enum mlx5_flow_namespace_type namespace,
struct mlx5_pkt_reformat *pkt_reformat)
{
struct mlx5dr_domain *dr_domain = ns->fs_dr_domain.dr_domain;
struct mlx5dr_action *action;
int dr_reformat;
switch (reformat_type) {
case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
dr_reformat = DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L2;
break;
case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
dr_reformat = DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2;
break;
case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
dr_reformat = DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3;
break;
default:
mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n",
reformat_type);
return -EOPNOTSUPP;
}
action = mlx5dr_action_create_packet_reformat(dr_domain,
dr_reformat,
size,
reformat_data);
if (!action) {
mlx5_core_err(ns->dev, "Failed allocating packet-reformat action\n");
return -EINVAL;
}
pkt_reformat->action.dr_action = action;
return 0;
}
static void mlx5_cmd_dr_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
struct mlx5_pkt_reformat *pkt_reformat)
{
mlx5dr_action_destroy(pkt_reformat->action.dr_action);
}
static int mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
u8 namespace, u8 num_actions,
void *modify_actions,
struct mlx5_modify_hdr *modify_hdr)
{
struct mlx5dr_domain *dr_domain = ns->fs_dr_domain.dr_domain;
struct mlx5dr_action *action;
size_t actions_sz;
actions_sz = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) *
num_actions;
action = mlx5dr_action_create_modify_header(dr_domain, 0,
actions_sz,
modify_actions);
if (!action) {
mlx5_core_err(ns->dev, "Failed allocating modify-header action\n");
return -EINVAL;
}
modify_hdr->action.dr_action = action;
return 0;
}
static void mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
struct mlx5_modify_hdr *modify_hdr)
{
mlx5dr_action_destroy(modify_hdr->action.dr_action);
}
static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *group,
int modify_mask,
struct fs_fte *fte)
{
return -EOPNOTSUPP;
}
static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
{
struct mlx5_fs_dr_rule *rule = &fte->fs_dr_rule;
int err;
int i;
if (mlx5_dr_is_fw_table(ft->flags))
return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
err = mlx5dr_rule_destroy(rule->dr_rule);
if (err)
return err;
for (i = 0; i < rule->num_actions; i++)
if (!IS_ERR_OR_NULL(rule->dr_actions[i]))
mlx5dr_action_destroy(rule->dr_actions[i]);
kfree(rule->dr_actions);
return 0;
}
static int mlx5_cmd_dr_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns)
{
struct mlx5dr_domain *peer_domain = NULL;
if (peer_ns)
peer_domain = peer_ns->fs_dr_domain.dr_domain;
mlx5dr_domain_set_peer(ns->fs_dr_domain.dr_domain,
peer_domain);
return 0;
}
static int mlx5_cmd_dr_create_ns(struct mlx5_flow_root_namespace *ns)
{
ns->fs_dr_domain.dr_domain =
mlx5dr_domain_create(ns->dev,
MLX5DR_DOMAIN_TYPE_FDB);
if (!ns->fs_dr_domain.dr_domain) {
mlx5_core_err(ns->dev, "Failed to create dr flow namespace\n");
return -EOPNOTSUPP;
}
return 0;
}
static int mlx5_cmd_dr_destroy_ns(struct mlx5_flow_root_namespace *ns)
{
return mlx5dr_domain_destroy(ns->fs_dr_domain.dr_domain);
}
bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev)
{
return mlx5dr_is_supported(dev);
}
static const struct mlx5_flow_cmds mlx5_flow_cmds_dr = {
.create_flow_table = mlx5_cmd_dr_create_flow_table,
.destroy_flow_table = mlx5_cmd_dr_destroy_flow_table,
.modify_flow_table = mlx5_cmd_dr_modify_flow_table,
.create_flow_group = mlx5_cmd_dr_create_flow_group,
.destroy_flow_group = mlx5_cmd_dr_destroy_flow_group,
.create_fte = mlx5_cmd_dr_create_fte,
.update_fte = mlx5_cmd_dr_update_fte,
.delete_fte = mlx5_cmd_dr_delete_fte,
.update_root_ft = mlx5_cmd_dr_update_root_ft,
.packet_reformat_alloc = mlx5_cmd_dr_packet_reformat_alloc,
.packet_reformat_dealloc = mlx5_cmd_dr_packet_reformat_dealloc,
.modify_header_alloc = mlx5_cmd_dr_modify_header_alloc,
.modify_header_dealloc = mlx5_cmd_dr_modify_header_dealloc,
.set_peer = mlx5_cmd_dr_set_peer,
.create_ns = mlx5_cmd_dr_create_ns,
.destroy_ns = mlx5_cmd_dr_destroy_ns,
};
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void)
{
return &mlx5_flow_cmds_dr;
}
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
* Copyright (c) 2019 Mellanox Technologies
*/
#ifndef _MLX5_FS_DR_
#define _MLX5_FS_DR_
#include "mlx5dr.h"
struct mlx5_flow_root_namespace;
struct fs_fte;
struct mlx5_fs_dr_action {
struct mlx5dr_action *dr_action;
};
struct mlx5_fs_dr_ns {
struct mlx5_dr_ns *dr_ns;
};
struct mlx5_fs_dr_rule {
struct mlx5dr_rule *dr_rule;
/* Only actions created by fs_dr */
struct mlx5dr_action **dr_actions;
int num_actions;
};
struct mlx5_fs_dr_domain {
struct mlx5dr_domain *dr_domain;
};
struct mlx5_fs_dr_matcher {
struct mlx5dr_matcher *dr_matcher;
};
struct mlx5_fs_dr_table {
struct mlx5dr_table *dr_table;
struct mlx5dr_action *miss_action;
};
#ifdef CONFIG_MLX5_SW_STEERING
bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void);
#else
static inline const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void)
{
return NULL;
}
static inline bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev)
{
return false;
}
#endif /* CONFIG_MLX5_SW_STEERING */
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment