Commit af68ccbc authored by Jason Gunthorpe's avatar Jason Gunthorpe

Merge branch 'mlx5-flow-mutate' into rdma.git for-next

For dependencies, branch based on 'mellanox/mlx5-next' of
git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux.git

Pull Flow actions to mutate packets from Leon Romanovsky:

====================
This series exposes the ability to create flow actions which can
mutate packet headers. We do that by exposing two new verbs:
 * modify header - can change existing packet headers. packet
 * reformat - can encapsulate or decapsulate a packet.
              Once created a flow action must be attached to a steering
              rule for it to take effect.

The first 10 patches refactor mlx5_core code, rename internal structures
to better reflect their operation and export needed functions so the RDMA
side can allocate the action.

The last 5 patches expose via the IOCTL infrastructure mlx5_ib methods
which do the actual allocation of resources and return an handle to the
user. A user of this API is expected to know how to work with the device's
spec as the input to those function is HW depended.

An example usage of the modify header action is routing, A user can create
an action which edits the L2 header and decrease the TTL.

An example usage of the packet reformat action is VXLAN encap/decap which
is done by the HW.
====================

* branch 'mlx5-flow-mutate':
  RDMA/mlx5: Extend packet reformat verbs
  RDMA/mlx5: Add new flow action verb - packet reformat
  RDMA/uverbs: Add generic function to fill in flow action object
  RDMA/mlx5: Add a new flow action verb - modify header
  RDMA/uverbs: Add UVERBS_ATTR_CONST_IN to the specs language
  net/mlx5: Export packet reformat alloc/dealloc functions
  net/mlx5: Pass a namespace for packet reformat ID allocation
  net/mlx5: Expose new packet reformat capabilities
  {net, RDMA}/mlx5: Rename encap to reformat packet
  net/mlx5: Move header encap type to IFC header file
  net/mlx5: Break encap/decap into two separated flow table creation flags
  net/mlx5: Add support for more namespaces when allocating modify header
  net/mlx5: Export modify header alloc/dealloc functions
  net/mlx5: Add proper NIC TX steering flow tables support
  net/mlx5: Cleanup flow namespace getter switch logic
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parents c6a21c38 a090d0d8
...@@ -611,3 +611,26 @@ int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx, ...@@ -611,3 +611,26 @@ int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx,
return 0; return 0;
} }
EXPORT_SYMBOL(uverbs_copy_to); EXPORT_SYMBOL(uverbs_copy_to);
int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
size_t idx, s64 lower_bound, u64 upper_bound,
s64 *def_val)
{
const struct uverbs_attr *attr;
attr = uverbs_attr_get(attrs_bundle, idx);
if (IS_ERR(attr)) {
if ((PTR_ERR(attr) != -ENOENT) || !def_val)
return PTR_ERR(attr);
*to = *def_val;
} else {
*to = attr->ptr_attr.data;
}
if (*to < lower_bound || (*to > 0 && (u64)*to > upper_bound))
return -EINVAL;
return 0;
}
EXPORT_SYMBOL(_uverbs_get_const);
...@@ -326,11 +326,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)( ...@@ -326,11 +326,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(
if (IS_ERR(action)) if (IS_ERR(action))
return PTR_ERR(action); return PTR_ERR(action);
atomic_set(&action->usecnt, 0); uverbs_flow_action_fill_action(action, uobj, ib_dev,
action->device = ib_dev; IB_FLOW_ACTION_ESP);
action->type = IB_FLOW_ACTION_ESP;
action->uobject = uobj;
uobj->object = action;
return 0; return 0;
} }
......
...@@ -284,7 +284,7 @@ static bool devx_is_obj_create_cmd(const void *in) ...@@ -284,7 +284,7 @@ static bool devx_is_obj_create_cmd(const void *in)
case MLX5_CMD_OP_CREATE_FLOW_TABLE: case MLX5_CMD_OP_CREATE_FLOW_TABLE:
case MLX5_CMD_OP_CREATE_FLOW_GROUP: case MLX5_CMD_OP_CREATE_FLOW_GROUP:
case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT: case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
...@@ -627,9 +627,9 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din, ...@@ -627,9 +627,9 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER); MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
break; break;
case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
MLX5_CMD_OP_DEALLOC_ENCAP_HEADER); MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
break; break;
case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT: case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
......
...@@ -7,7 +7,9 @@ ...@@ -7,7 +7,9 @@
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
#include <rdma/uverbs_types.h> #include <rdma/uverbs_types.h>
#include <rdma/uverbs_ioctl.h> #include <rdma/uverbs_ioctl.h>
#include <rdma/uverbs_std_types.h>
#include <rdma/mlx5_user_ioctl_cmds.h> #include <rdma/mlx5_user_ioctl_cmds.h>
#include <rdma/mlx5_user_ioctl_verbs.h>
#include <rdma/ib_umem.h> #include <rdma/ib_umem.h>
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
...@@ -16,6 +18,24 @@ ...@@ -16,6 +18,24 @@
#define UVERBS_MODULE_NAME mlx5_ib #define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h> #include <rdma/uverbs_named_ioctl.h>
static int
mlx5_ib_ft_type_to_namespace(enum mlx5_ib_uapi_flow_table_type table_type,
enum mlx5_flow_namespace_type *namespace)
{
switch (table_type) {
case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX:
*namespace = MLX5_FLOW_NAMESPACE_BYPASS;
break;
case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX:
*namespace = MLX5_FLOW_NAMESPACE_EGRESS;
break;
default:
return -EINVAL;
}
return 0;
}
static const struct uverbs_attr_spec mlx5_ib_flow_type[] = { static const struct uverbs_attr_spec mlx5_ib_flow_type[] = {
[MLX5_IB_FLOW_TYPE_NORMAL] = { [MLX5_IB_FLOW_TYPE_NORMAL] = {
.type = UVERBS_ATTR_TYPE_PTR_IN, .type = UVERBS_ATTR_TYPE_PTR_IN,
...@@ -175,6 +195,248 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)( ...@@ -175,6 +195,248 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
return err; return err;
} }
void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction)
{
switch (maction->flow_action_raw.sub_type) {
case MLX5_IB_FLOW_ACTION_MODIFY_HEADER:
mlx5_modify_header_dealloc(maction->flow_action_raw.dev->mdev,
maction->flow_action_raw.action_id);
break;
case MLX5_IB_FLOW_ACTION_PACKET_REFORMAT:
mlx5_packet_reformat_dealloc(maction->flow_action_raw.dev->mdev,
maction->flow_action_raw.action_id);
break;
case MLX5_IB_FLOW_ACTION_DECAP:
break;
default:
break;
}
}
static struct ib_flow_action *
mlx5_ib_create_modify_header(struct mlx5_ib_dev *dev,
enum mlx5_ib_uapi_flow_table_type ft_type,
u8 num_actions, void *in)
{
enum mlx5_flow_namespace_type namespace;
struct mlx5_ib_flow_action *maction;
int ret;
ret = mlx5_ib_ft_type_to_namespace(ft_type, &namespace);
if (ret)
return ERR_PTR(-EINVAL);
maction = kzalloc(sizeof(*maction), GFP_KERNEL);
if (!maction)
return ERR_PTR(-ENOMEM);
ret = mlx5_modify_header_alloc(dev->mdev, namespace, num_actions, in,
&maction->flow_action_raw.action_id);
if (ret) {
kfree(maction);
return ERR_PTR(ret);
}
maction->flow_action_raw.sub_type =
MLX5_IB_FLOW_ACTION_MODIFY_HEADER;
maction->flow_action_raw.dev = dev;
return &maction->ib_action;
}
static bool mlx5_ib_modify_header_supported(struct mlx5_ib_dev *dev)
{
return MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
max_modify_header_actions) ||
MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, max_modify_header_actions);
}
static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER)(
struct ib_uverbs_file *file,
struct uverbs_attr_bundle *attrs)
{
struct ib_uobject *uobj = uverbs_attr_get_uobject(
attrs, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE);
struct mlx5_ib_dev *mdev = to_mdev(uobj->context->device);
enum mlx5_ib_uapi_flow_table_type ft_type;
struct ib_flow_action *action;
size_t num_actions;
void *in;
int len;
int ret;
if (!mlx5_ib_modify_header_supported(mdev))
return -EOPNOTSUPP;
in = uverbs_attr_get_alloced_ptr(attrs,
MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM);
len = uverbs_attr_get_len(attrs,
MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM);
if (len % MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto))
return -EINVAL;
ret = uverbs_get_const(&ft_type, attrs,
MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE);
if (ret)
return ret;
num_actions = len / MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto),
action = mlx5_ib_create_modify_header(mdev, ft_type, num_actions, in);
if (IS_ERR(action))
return PTR_ERR(action);
uverbs_flow_action_fill_action(action, uobj, uobj->context->device,
IB_FLOW_ACTION_UNSPECIFIED);
return 0;
}
static bool mlx5_ib_flow_action_packet_reformat_valid(struct mlx5_ib_dev *ibdev,
u8 packet_reformat_type,
u8 ft_type)
{
switch (packet_reformat_type) {
case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX)
return MLX5_CAP_FLOWTABLE(ibdev->mdev,
encap_general_header);
break;
case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX)
return MLX5_CAP_FLOWTABLE_NIC_TX(ibdev->mdev,
reformat_l2_to_l3_tunnel);
break;
case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX)
return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev,
reformat_l3_tunnel_to_l2);
break;
case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2:
if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX)
return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev, decap);
break;
default:
break;
}
return false;
}
static int mlx5_ib_dv_to_prm_packet_reforamt_type(u8 dv_prt, u8 *prm_prt)
{
switch (dv_prt) {
case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
*prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL;
break;
case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
*prm_prt = MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
break;
case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
*prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
break;
default:
return -EINVAL;
}
return 0;
}
static int mlx5_ib_flow_action_create_packet_reformat_ctx(
struct mlx5_ib_dev *dev,
struct mlx5_ib_flow_action *maction,
u8 ft_type, u8 dv_prt,
void *in, size_t len)
{
enum mlx5_flow_namespace_type namespace;
u8 prm_prt;
int ret;
ret = mlx5_ib_ft_type_to_namespace(ft_type, &namespace);
if (ret)
return ret;
ret = mlx5_ib_dv_to_prm_packet_reforamt_type(dv_prt, &prm_prt);
if (ret)
return ret;
ret = mlx5_packet_reformat_alloc(dev->mdev, prm_prt, len,
in, namespace,
&maction->flow_action_raw.action_id);
if (ret)
return ret;
maction->flow_action_raw.sub_type =
MLX5_IB_FLOW_ACTION_PACKET_REFORMAT;
maction->flow_action_raw.dev = dev;
return 0;
}
static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT)(
struct ib_uverbs_file *file,
struct uverbs_attr_bundle *attrs)
{
struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE);
struct mlx5_ib_dev *mdev = to_mdev(uobj->context->device);
enum mlx5_ib_uapi_flow_action_packet_reformat_type dv_prt;
enum mlx5_ib_uapi_flow_table_type ft_type;
struct mlx5_ib_flow_action *maction;
int ret;
ret = uverbs_get_const(&ft_type, attrs,
MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE);
if (ret)
return ret;
ret = uverbs_get_const(&dv_prt, attrs,
MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE);
if (ret)
return ret;
if (!mlx5_ib_flow_action_packet_reformat_valid(mdev, dv_prt, ft_type))
return -EOPNOTSUPP;
maction = kzalloc(sizeof(*maction), GFP_KERNEL);
if (!maction)
return -ENOMEM;
if (dv_prt ==
MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2) {
maction->flow_action_raw.sub_type =
MLX5_IB_FLOW_ACTION_DECAP;
maction->flow_action_raw.dev = mdev;
} else {
void *in;
int len;
in = uverbs_attr_get_alloced_ptr(attrs,
MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF);
if (IS_ERR(in)) {
ret = PTR_ERR(in);
goto free_maction;
}
len = uverbs_attr_get_len(attrs,
MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF);
ret = mlx5_ib_flow_action_create_packet_reformat_ctx(mdev,
maction, ft_type, dv_prt, in, len);
if (ret)
goto free_maction;
}
uverbs_flow_action_fill_action(&maction->ib_action, uobj,
uobj->context->device,
IB_FLOW_ACTION_UNSPECIFIED);
return 0;
free_maction:
kfree(maction);
return ret;
}
DECLARE_UVERBS_NAMED_METHOD( DECLARE_UVERBS_NAMED_METHOD(
MLX5_IB_METHOD_CREATE_FLOW, MLX5_IB_METHOD_CREATE_FLOW,
UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE, UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE,
...@@ -209,6 +471,44 @@ ADD_UVERBS_METHODS(mlx5_ib_fs, ...@@ -209,6 +471,44 @@ ADD_UVERBS_METHODS(mlx5_ib_fs,
&UVERBS_METHOD(MLX5_IB_METHOD_CREATE_FLOW), &UVERBS_METHOD(MLX5_IB_METHOD_CREATE_FLOW),
&UVERBS_METHOD(MLX5_IB_METHOD_DESTROY_FLOW)); &UVERBS_METHOD(MLX5_IB_METHOD_DESTROY_FLOW));
DECLARE_UVERBS_NAMED_METHOD(
MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER,
UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE,
UVERBS_OBJECT_FLOW_ACTION,
UVERBS_ACCESS_NEW,
UA_MANDATORY),
UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM,
UVERBS_ATTR_MIN_SIZE(MLX5_UN_SZ_BYTES(
set_action_in_add_action_in_auto)),
UA_MANDATORY,
UA_ALLOC_AND_COPY),
UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE,
enum mlx5_ib_uapi_flow_table_type,
UA_MANDATORY));
DECLARE_UVERBS_NAMED_METHOD(
MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT,
UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE,
UVERBS_OBJECT_FLOW_ACTION,
UVERBS_ACCESS_NEW,
UA_MANDATORY),
UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF,
UVERBS_ATTR_MIN_SIZE(1),
UA_ALLOC_AND_COPY,
UA_OPTIONAL),
UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE,
enum mlx5_ib_uapi_flow_action_packet_reformat_type,
UA_MANDATORY),
UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE,
enum mlx5_ib_uapi_flow_table_type,
UA_MANDATORY));
ADD_UVERBS_METHODS(
mlx5_ib_flow_actions,
UVERBS_OBJECT_FLOW_ACTION,
&UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER),
&UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT));
DECLARE_UVERBS_NAMED_METHOD( DECLARE_UVERBS_NAMED_METHOD(
MLX5_IB_METHOD_FLOW_MATCHER_CREATE, MLX5_IB_METHOD_FLOW_MATCHER_CREATE,
UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE, UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE,
...@@ -247,6 +547,7 @@ int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root) ...@@ -247,6 +547,7 @@ int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root)
root[i++] = &flow_objects; root[i++] = &flow_objects;
root[i++] = &mlx5_ib_fs; root[i++] = &mlx5_ib_fs;
root[i++] = &mlx5_ib_flow_actions;
return i; return i;
} }
...@@ -4003,6 +4003,9 @@ static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action) ...@@ -4003,6 +4003,9 @@ static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action)
*/ */
mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx); mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx);
break; break;
case IB_FLOW_ACTION_UNSPECIFIED:
mlx5_ib_destroy_flow_action_raw(maction);
break;
default: default:
WARN_ON(true); WARN_ON(true);
break; break;
......
...@@ -153,6 +153,12 @@ struct mlx5_ib_pd { ...@@ -153,6 +153,12 @@ struct mlx5_ib_pd {
u32 pdn; u32 pdn;
}; };
enum {
MLX5_IB_FLOW_ACTION_MODIFY_HEADER,
MLX5_IB_FLOW_ACTION_PACKET_REFORMAT,
MLX5_IB_FLOW_ACTION_DECAP,
};
#define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1) #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
#define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1) #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
#if (MLX5_IB_FLOW_LAST_PRIO <= 0) #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
...@@ -816,6 +822,11 @@ struct mlx5_ib_flow_action { ...@@ -816,6 +822,11 @@ struct mlx5_ib_flow_action {
u64 ib_flags; u64 ib_flags;
struct mlx5_accel_esp_xfrm *ctx; struct mlx5_accel_esp_xfrm *ctx;
} esp_aes_gcm; } esp_aes_gcm;
struct {
struct mlx5_ib_dev *dev;
u32 sub_type;
u32 action_id;
} flow_action_raw;
}; };
}; };
...@@ -862,7 +873,7 @@ to_mcounters(struct ib_counters *ibcntrs) ...@@ -862,7 +873,7 @@ to_mcounters(struct ib_counters *ibcntrs)
struct mlx5_ib_dev { struct mlx5_ib_dev {
struct ib_device ib_dev; struct ib_device ib_dev;
const struct uverbs_object_tree_def *driver_trees[6]; const struct uverbs_object_tree_def *driver_trees[7];
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
struct mlx5_roce roce[MLX5_MAX_PORTS]; struct mlx5_roce roce[MLX5_MAX_PORTS];
int num_ports; int num_ports;
...@@ -1240,6 +1251,7 @@ struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add( ...@@ -1240,6 +1251,7 @@ struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add(
void *cmd_in, int inlen, int dest_id, int dest_type); void *cmd_in, int inlen, int dest_id, int dest_type);
bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type); bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type);
int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root); int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root);
void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction);
#else #else
static inline int static inline int
mlx5_ib_devx_create(struct mlx5_ib_dev *dev, mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
...@@ -1258,6 +1270,11 @@ mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root) ...@@ -1258,6 +1270,11 @@ mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root)
{ {
return 0; return 0;
} }
static inline void
mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction)
{
return;
};
#endif #endif
static inline void init_query_mad(struct ib_smp *mad) static inline void init_query_mad(struct ib_smp *mad)
{ {
......
...@@ -308,10 +308,11 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, ...@@ -308,10 +308,11 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_MODIFY_FLOW_TABLE: case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER: case MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT:
case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT: case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT:
case MLX5_CMD_OP_FPGA_DESTROY_QP: case MLX5_CMD_OP_FPGA_DESTROY_QP:
case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT: case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
case MLX5_CMD_OP_DEALLOC_MEMIC:
return MLX5_CMD_STAT_OK; return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP: case MLX5_CMD_OP_QUERY_HCA_CAP:
...@@ -426,7 +427,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, ...@@ -426,7 +427,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
case MLX5_CMD_OP_QUERY_FLOW_COUNTER: case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT: case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
case MLX5_CMD_OP_FPGA_CREATE_QP: case MLX5_CMD_OP_FPGA_CREATE_QP:
case MLX5_CMD_OP_FPGA_MODIFY_QP: case MLX5_CMD_OP_FPGA_MODIFY_QP:
...@@ -435,6 +436,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, ...@@ -435,6 +436,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
case MLX5_CMD_OP_ALLOC_MEMIC:
*status = MLX5_DRIVER_STATUS_ABORTED; *status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND; *synd = MLX5_DRIVER_SYND;
return -EIO; return -EIO;
...@@ -599,8 +601,8 @@ const char *mlx5_command_str(int command) ...@@ -599,8 +601,8 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER); MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER); MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE); MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER); MLX5_COMMAND_STR_CASE(ALLOC_PACKET_REFORMAT_CONTEXT);
MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER); MLX5_COMMAND_STR_CASE(DEALLOC_PACKET_REFORMAT_CONTEXT);
MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT); MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT);
MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT); MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT);
MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP); MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP);
...@@ -617,6 +619,8 @@ const char *mlx5_command_str(int command) ...@@ -617,6 +619,8 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJECT); MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJECT);
MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJECT); MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJECT);
MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT); MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT);
MLX5_COMMAND_STR_CASE(ALLOC_MEMIC);
MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC);
default: return "unknown command opcode"; default: return "unknown command opcode";
} }
} }
......
...@@ -133,7 +133,7 @@ TRACE_EVENT(mlx5_fs_del_fg, ...@@ -133,7 +133,7 @@ TRACE_EVENT(mlx5_fs_del_fg,
{MLX5_FLOW_CONTEXT_ACTION_DROP, "DROP"},\ {MLX5_FLOW_CONTEXT_ACTION_DROP, "DROP"},\
{MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, "FWD"},\ {MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, "FWD"},\
{MLX5_FLOW_CONTEXT_ACTION_COUNT, "CNT"},\ {MLX5_FLOW_CONTEXT_ACTION_COUNT, "CNT"},\
{MLX5_FLOW_CONTEXT_ACTION_ENCAP, "ENCAP"},\ {MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT, "REFORMAT"},\
{MLX5_FLOW_CONTEXT_ACTION_DECAP, "DECAP"},\ {MLX5_FLOW_CONTEXT_ACTION_DECAP, "DECAP"},\
{MLX5_FLOW_CONTEXT_ACTION_MOD_HDR, "MOD_HDR"},\ {MLX5_FLOW_CONTEXT_ACTION_MOD_HDR, "MOD_HDR"},\
{MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH, "VLAN_PUSH"},\ {MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH, "VLAN_PUSH"},\
......
...@@ -100,11 +100,6 @@ struct mlx5e_tc_flow_parse_attr { ...@@ -100,11 +100,6 @@ struct mlx5e_tc_flow_parse_attr {
int mirred_ifindex; int mirred_ifindex;
}; };
enum {
MLX5_HEADER_TYPE_VXLAN = 0x0,
MLX5_HEADER_TYPE_NVGRE = 0x1,
};
#define MLX5E_TC_TABLE_NUM_GROUPS 4 #define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16) #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
...@@ -686,7 +681,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, ...@@ -686,7 +681,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
.action = attr->action, .action = attr->action,
.has_flow_tag = true, .has_flow_tag = true,
.flow_tag = attr->flow_tag, .flow_tag = attr->flow_tag,
.encap_id = 0, .reformat_id = 0,
}; };
struct mlx5_fc *counter = NULL; struct mlx5_fc *counter = NULL;
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
...@@ -834,7 +829,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -834,7 +829,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_priv *out_priv; struct mlx5e_priv *out_priv;
int err; int err;
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) { if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
out_dev = __dev_get_by_index(dev_net(priv->netdev), out_dev = __dev_get_by_index(dev_net(priv->netdev),
attr->parse_attr->mirred_ifindex); attr->parse_attr->mirred_ifindex);
err = mlx5e_attach_encap(priv, &parse_attr->tun_info, err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
...@@ -890,7 +885,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -890,7 +885,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
err_mod_hdr: err_mod_hdr:
mlx5_eswitch_del_vlan_action(esw, attr); mlx5_eswitch_del_vlan_action(esw, attr);
err_add_vlan: err_add_vlan:
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
mlx5e_detach_encap(priv, flow); mlx5e_detach_encap(priv, flow);
err_attach_encap: err_attach_encap:
return rule; return rule;
...@@ -911,7 +906,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, ...@@ -911,7 +906,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5_eswitch_del_vlan_action(esw, attr); mlx5_eswitch_del_vlan_action(esw, attr);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) { if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
mlx5e_detach_encap(priv, flow); mlx5e_detach_encap(priv, flow);
kvfree(attr->parse_attr); kvfree(attr->parse_attr);
} }
...@@ -928,9 +923,10 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, ...@@ -928,9 +923,10 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow; struct mlx5e_tc_flow *flow;
int err; int err;
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
e->encap_size, e->encap_header, e->encap_size, e->encap_header,
&e->encap_id); MLX5_FLOW_NAMESPACE_FDB,
&e->encap_id);
if (err) { if (err) {
mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n", mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
err); err);
...@@ -984,7 +980,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, ...@@ -984,7 +980,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
if (e->flags & MLX5_ENCAP_ENTRY_VALID) { if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
e->flags &= ~MLX5_ENCAP_ENTRY_VALID; e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
mlx5_encap_dealloc(priv->mdev, e->encap_id); mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
} }
} }
...@@ -1053,7 +1049,7 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv, ...@@ -1053,7 +1049,7 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
if (e->flags & MLX5_ENCAP_ENTRY_VALID) if (e->flags & MLX5_ENCAP_ENTRY_VALID)
mlx5_encap_dealloc(priv->mdev, e->encap_id); mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
hash_del_rcu(&e->encap_hlist); hash_del_rcu(&e->encap_hlist);
kfree(e->encap_header); kfree(e->encap_header);
...@@ -2328,7 +2324,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, ...@@ -2328,7 +2324,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
return -ENOMEM; return -ENOMEM;
switch (e->tunnel_type) { switch (e->tunnel_type) {
case MLX5_HEADER_TYPE_VXLAN: case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
fl4.flowi4_proto = IPPROTO_UDP; fl4.flowi4_proto = IPPROTO_UDP;
fl4.fl4_dport = tun_key->tp_dst; fl4.fl4_dport = tun_key->tp_dst;
break; break;
...@@ -2372,7 +2368,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, ...@@ -2372,7 +2368,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
read_unlock_bh(&n->lock); read_unlock_bh(&n->lock);
switch (e->tunnel_type) { switch (e->tunnel_type) {
case MLX5_HEADER_TYPE_VXLAN: case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
gen_vxlan_header_ipv4(out_dev, encap_header, gen_vxlan_header_ipv4(out_dev, encap_header,
ipv4_encap_size, e->h_dest, tos, ttl, ipv4_encap_size, e->h_dest, tos, ttl,
fl4.daddr, fl4.daddr,
...@@ -2392,8 +2388,10 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, ...@@ -2392,8 +2388,10 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
goto out; goto out;
} }
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
ipv4_encap_size, encap_header, &e->encap_id); ipv4_encap_size, encap_header,
MLX5_FLOW_NAMESPACE_FDB,
&e->encap_id);
if (err) if (err)
goto destroy_neigh_entry; goto destroy_neigh_entry;
...@@ -2437,7 +2435,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, ...@@ -2437,7 +2435,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
return -ENOMEM; return -ENOMEM;
switch (e->tunnel_type) { switch (e->tunnel_type) {
case MLX5_HEADER_TYPE_VXLAN: case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
fl6.flowi6_proto = IPPROTO_UDP; fl6.flowi6_proto = IPPROTO_UDP;
fl6.fl6_dport = tun_key->tp_dst; fl6.fl6_dport = tun_key->tp_dst;
break; break;
...@@ -2481,7 +2479,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, ...@@ -2481,7 +2479,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
read_unlock_bh(&n->lock); read_unlock_bh(&n->lock);
switch (e->tunnel_type) { switch (e->tunnel_type) {
case MLX5_HEADER_TYPE_VXLAN: case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
gen_vxlan_header_ipv6(out_dev, encap_header, gen_vxlan_header_ipv6(out_dev, encap_header,
ipv6_encap_size, e->h_dest, tos, ttl, ipv6_encap_size, e->h_dest, tos, ttl,
&fl6.daddr, &fl6.daddr,
...@@ -2502,8 +2500,10 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, ...@@ -2502,8 +2500,10 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
goto out; goto out;
} }
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
ipv6_encap_size, encap_header, &e->encap_id); ipv6_encap_size, encap_header,
MLX5_FLOW_NAMESPACE_FDB,
&e->encap_id);
if (err) if (err)
goto destroy_neigh_entry; goto destroy_neigh_entry;
...@@ -2551,7 +2551,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, ...@@ -2551,7 +2551,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->tp_dst)) && if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->tp_dst)) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
tunnel_type = MLX5_HEADER_TYPE_VXLAN; tunnel_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
} else { } else {
netdev_warn(priv->netdev, netdev_warn(priv->netdev,
"%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst)); "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
...@@ -2726,7 +2726,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, ...@@ -2726,7 +2726,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
parse_attr->mirred_ifindex = out_dev->ifindex; parse_attr->mirred_ifindex = out_dev->ifindex;
parse_attr->tun_info = *info; parse_attr->tun_info = *info;
attr->parse_attr = parse_attr; attr->parse_attr = parse_attr;
action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP | action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT; MLX5_FLOW_CONTEXT_ACTION_COUNT;
/* attr->out_rep is resolved when we handle encap */ /* attr->out_rep is resolved when we handle encap */
...@@ -2872,7 +2872,8 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, ...@@ -2872,7 +2872,8 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
flow->flags |= MLX5E_TC_FLOW_OFFLOADED; flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) || if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) ||
!(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)) !(flow->esw_attr->action &
MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT))
kvfree(parse_attr); kvfree(parse_attr);
err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params); err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
......
...@@ -1746,7 +1746,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ...@@ -1746,7 +1746,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->enabled_vports = 0; esw->enabled_vports = 0;
esw->mode = SRIOV_NONE; esw->mode = SRIOV_NONE;
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE; esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) && if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)) MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC; esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
else else
......
...@@ -127,8 +127,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -127,8 +127,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_id = attr->mod_hdr_id; flow_act.modify_id = attr->mod_hdr_id;
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
flow_act.encap_id = attr->encap_id; flow_act.reformat_id = attr->encap_id;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, i); rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, i);
if (IS_ERR(rule)) if (IS_ERR(rule))
...@@ -529,7 +529,8 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw) ...@@ -529,7 +529,8 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
esw_size >>= 1; esw_size >>= 1;
if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
flags |= MLX5_FLOW_TABLE_TUNNEL_EN; flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH, fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
esw_size, esw_size,
...@@ -1244,7 +1245,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap) ...@@ -1244,7 +1245,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
return err; return err;
if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
(!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) || (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
return -EOPNOTSUPP; return -EOPNOTSUPP;
......
...@@ -152,7 +152,8 @@ static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, ...@@ -152,7 +152,8 @@ static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
struct mlx5_flow_table *next_ft, struct mlx5_flow_table *next_ft,
unsigned int *table_id, u32 flags) unsigned int *table_id, u32 flags)
{ {
int en_encap_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN); int en_encap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
int en_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0}; u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0}; u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
int err; int err;
...@@ -169,9 +170,9 @@ static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, ...@@ -169,9 +170,9 @@ static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
} }
MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en, MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
en_encap_decap); en_decap);
MLX5_SET(create_flow_table_in, in, flow_table_context.encap_en, MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
en_encap_decap); en_encap);
switch (op_mod) { switch (op_mod) {
case FS_FT_OP_MOD_NORMAL: case FS_FT_OP_MOD_NORMAL:
...@@ -343,7 +344,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, ...@@ -343,7 +344,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag); MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag);
MLX5_SET(flow_context, in_flow_context, action, fte->action.action); MLX5_SET(flow_context, in_flow_context, action, fte->action.action);
MLX5_SET(flow_context, in_flow_context, encap_id, fte->action.encap_id); MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
fte->action.reformat_id);
MLX5_SET(flow_context, in_flow_context, modify_header_id, MLX5_SET(flow_context, in_flow_context, modify_header_id,
fte->action.modify_id); fte->action.modify_id);
...@@ -594,62 +596,78 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, ...@@ -594,62 +596,78 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
*bytes = MLX5_GET64(traffic_counter, stats, octets); *bytes = MLX5_GET64(traffic_counter, stats, octets);
} }
int mlx5_encap_alloc(struct mlx5_core_dev *dev, int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
int header_type, int reformat_type,
size_t size, size_t size,
void *encap_header, void *reformat_data,
u32 *encap_id) enum mlx5_flow_namespace_type namespace,
u32 *packet_reformat_id)
{ {
int max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size); u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)];
u32 out[MLX5_ST_SZ_DW(alloc_encap_header_out)]; void *packet_reformat_context_in;
void *encap_header_in; int max_encap_size;
void *header; void *reformat;
int inlen; int inlen;
int err; int err;
u32 *in; u32 *in;
if (namespace == MLX5_FLOW_NAMESPACE_FDB)
max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
else
max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size);
if (size > max_encap_size) { if (size > max_encap_size) {
mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n", mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
size, max_encap_size); size, max_encap_size);
return -EINVAL; return -EINVAL;
} }
in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + size, in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) + size,
GFP_KERNEL); GFP_KERNEL);
if (!in) if (!in)
return -ENOMEM; return -ENOMEM;
encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in, encap_header); packet_reformat_context_in = MLX5_ADDR_OF(alloc_packet_reformat_context_in,
header = MLX5_ADDR_OF(encap_header_in, encap_header_in, encap_header); in, packet_reformat_context);
inlen = header - (void *)in + size; reformat = MLX5_ADDR_OF(packet_reformat_context_in,
packet_reformat_context_in,
reformat_data);
inlen = reformat - (void *)in + size;
memset(in, 0, inlen); memset(in, 0, inlen);
MLX5_SET(alloc_encap_header_in, in, opcode, MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
MLX5_CMD_OP_ALLOC_ENCAP_HEADER); MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
MLX5_SET(encap_header_in, encap_header_in, encap_header_size, size); MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
MLX5_SET(encap_header_in, encap_header_in, header_type, header_type); reformat_data_size, size);
memcpy(header, encap_header, size); MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
reformat_type, reformat_type);
memcpy(reformat, reformat_data, size);
memset(out, 0, sizeof(out)); memset(out, 0, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
*encap_id = MLX5_GET(alloc_encap_header_out, out, encap_id); *packet_reformat_id = MLX5_GET(alloc_packet_reformat_context_out,
out, packet_reformat_id);
kfree(in); kfree(in);
return err; return err;
} }
EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id) void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
u32 packet_reformat_id)
{ {
u32 in[MLX5_ST_SZ_DW(dealloc_encap_header_in)]; u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)];
u32 out[MLX5_ST_SZ_DW(dealloc_encap_header_out)]; u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)];
memset(in, 0, sizeof(in)); memset(in, 0, sizeof(in));
MLX5_SET(dealloc_encap_header_in, in, opcode, MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
MLX5_CMD_OP_DEALLOC_ENCAP_HEADER); MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
MLX5_SET(dealloc_encap_header_in, in, encap_id, encap_id); MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
packet_reformat_id);
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
} }
EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
int mlx5_modify_header_alloc(struct mlx5_core_dev *dev, int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
u8 namespace, u8 num_actions, u8 namespace, u8 num_actions,
...@@ -667,9 +685,14 @@ int mlx5_modify_header_alloc(struct mlx5_core_dev *dev, ...@@ -667,9 +685,14 @@ int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
table_type = FS_FT_FDB; table_type = FS_FT_FDB;
break; break;
case MLX5_FLOW_NAMESPACE_KERNEL: case MLX5_FLOW_NAMESPACE_KERNEL:
case MLX5_FLOW_NAMESPACE_BYPASS:
max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions); max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
table_type = FS_FT_NIC_RX; table_type = FS_FT_NIC_RX;
break; break;
case MLX5_FLOW_NAMESPACE_EGRESS:
max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
table_type = FS_FT_NIC_TX;
break;
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -702,6 +725,7 @@ int mlx5_modify_header_alloc(struct mlx5_core_dev *dev, ...@@ -702,6 +725,7 @@ int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
kfree(in); kfree(in);
return err; return err;
} }
EXPORT_SYMBOL(mlx5_modify_header_alloc);
void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id) void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id)
{ {
...@@ -716,6 +740,7 @@ void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id) ...@@ -716,6 +740,7 @@ void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id)
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
} }
EXPORT_SYMBOL(mlx5_modify_header_dealloc);
static const struct mlx5_flow_cmds mlx5_flow_cmds = { static const struct mlx5_flow_cmds mlx5_flow_cmds = {
.create_flow_table = mlx5_cmd_create_flow_table, .create_flow_table = mlx5_cmd_create_flow_table,
...@@ -760,8 +785,8 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ ...@@ -760,8 +785,8 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ
case FS_FT_FDB: case FS_FT_FDB:
case FS_FT_SNIFFER_RX: case FS_FT_SNIFFER_RX:
case FS_FT_SNIFFER_TX: case FS_FT_SNIFFER_TX:
return mlx5_fs_cmd_get_fw_cmds();
case FS_FT_NIC_TX: case FS_FT_NIC_TX:
return mlx5_fs_cmd_get_fw_cmds();
default: default:
return mlx5_fs_cmd_get_stub_cmds(); return mlx5_fs_cmd_get_stub_cmds();
} }
......
...@@ -76,6 +76,14 @@ ...@@ -76,6 +76,14 @@
FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \ FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
FS_CAP(flow_table_properties_nic_receive.flow_table_modify)) FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
#define FS_CHAINING_CAPS_EGRESS \
FS_REQUIRED_CAPS( \
FS_CAP(flow_table_properties_nic_transmit.flow_modify_en), \
FS_CAP(flow_table_properties_nic_transmit.modify_root), \
FS_CAP(flow_table_properties_nic_transmit \
.identified_miss_table_mode), \
FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
#define LEFTOVERS_NUM_LEVELS 1 #define LEFTOVERS_NUM_LEVELS 1
#define LEFTOVERS_NUM_PRIOS 1 #define LEFTOVERS_NUM_PRIOS 1
...@@ -151,6 +159,17 @@ static struct init_tree_node { ...@@ -151,6 +159,17 @@ static struct init_tree_node {
} }
}; };
static struct init_tree_node egress_root_fs = {
.type = FS_TYPE_NAMESPACE,
.ar_size = 1,
.children = (struct init_tree_node[]) {
ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
FS_CHAINING_CAPS_EGRESS,
ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
BY_PASS_PRIO_NUM_LEVELS))),
}
};
enum fs_i_lock_class { enum fs_i_lock_class {
FS_LOCK_GRANDPARENT, FS_LOCK_GRANDPARENT,
FS_LOCK_PARENT, FS_LOCK_PARENT,
...@@ -1388,7 +1407,7 @@ static bool check_conflicting_actions(u32 action1, u32 action2) ...@@ -1388,7 +1407,7 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
return false; return false;
if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP | if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_ENCAP | MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
MLX5_FLOW_CONTEXT_ACTION_DECAP | MLX5_FLOW_CONTEXT_ACTION_DECAP |
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
...@@ -1978,7 +1997,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, ...@@ -1978,7 +1997,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
{ {
struct mlx5_flow_steering *steering = dev->priv.steering; struct mlx5_flow_steering *steering = dev->priv.steering;
struct mlx5_flow_root_namespace *root_ns; struct mlx5_flow_root_namespace *root_ns;
int prio; int prio = 0;
struct fs_prio *fs_prio; struct fs_prio *fs_prio;
struct mlx5_flow_namespace *ns; struct mlx5_flow_namespace *ns;
...@@ -1986,40 +2005,29 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, ...@@ -1986,40 +2005,29 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
return NULL; return NULL;
switch (type) { switch (type) {
case MLX5_FLOW_NAMESPACE_BYPASS:
case MLX5_FLOW_NAMESPACE_LAG:
case MLX5_FLOW_NAMESPACE_OFFLOADS:
case MLX5_FLOW_NAMESPACE_ETHTOOL:
case MLX5_FLOW_NAMESPACE_KERNEL:
case MLX5_FLOW_NAMESPACE_LEFTOVERS:
case MLX5_FLOW_NAMESPACE_ANCHOR:
prio = type;
break;
case MLX5_FLOW_NAMESPACE_FDB: case MLX5_FLOW_NAMESPACE_FDB:
if (steering->fdb_root_ns) if (steering->fdb_root_ns)
return &steering->fdb_root_ns->ns; return &steering->fdb_root_ns->ns;
else return NULL;
return NULL;
case MLX5_FLOW_NAMESPACE_SNIFFER_RX: case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
if (steering->sniffer_rx_root_ns) if (steering->sniffer_rx_root_ns)
return &steering->sniffer_rx_root_ns->ns; return &steering->sniffer_rx_root_ns->ns;
else return NULL;
return NULL;
case MLX5_FLOW_NAMESPACE_SNIFFER_TX: case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
if (steering->sniffer_tx_root_ns) if (steering->sniffer_tx_root_ns)
return &steering->sniffer_tx_root_ns->ns; return &steering->sniffer_tx_root_ns->ns;
else
return NULL;
case MLX5_FLOW_NAMESPACE_EGRESS:
if (steering->egress_root_ns)
return &steering->egress_root_ns->ns;
else
return NULL;
default:
return NULL; return NULL;
default:
break;
}
if (type == MLX5_FLOW_NAMESPACE_EGRESS) {
root_ns = steering->egress_root_ns;
} else { /* Must be NIC RX */
root_ns = steering->root_ns;
prio = type;
} }
root_ns = steering->root_ns;
if (!root_ns) if (!root_ns)
return NULL; return NULL;
...@@ -2535,16 +2543,23 @@ static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev) ...@@ -2535,16 +2543,23 @@ static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
static int init_egress_root_ns(struct mlx5_flow_steering *steering) static int init_egress_root_ns(struct mlx5_flow_steering *steering)
{ {
struct fs_prio *prio; int err;
steering->egress_root_ns = create_root_ns(steering, steering->egress_root_ns = create_root_ns(steering,
FS_FT_NIC_TX); FS_FT_NIC_TX);
if (!steering->egress_root_ns) if (!steering->egress_root_ns)
return -ENOMEM; return -ENOMEM;
/* create 1 prio*/ err = init_root_tree(steering, &egress_root_fs,
prio = fs_create_prio(&steering->egress_root_ns->ns, 0, 1); &steering->egress_root_ns->ns.node);
return PTR_ERR_OR_ZERO(prio); if (err)
goto cleanup;
set_prio_attrs(steering->egress_root_ns);
return 0;
cleanup:
cleanup_root_ns(steering->egress_root_ns);
steering->egress_root_ns = NULL;
return err;
} }
int mlx5_init_fs(struct mlx5_core_dev *dev) int mlx5_init_fs(struct mlx5_core_dev *dev)
...@@ -2612,7 +2627,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) ...@@ -2612,7 +2627,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err; goto err;
} }
if (MLX5_IPSEC_DEV(dev)) { if (MLX5_IPSEC_DEV(dev) || MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
err = init_egress_root_ns(steering); err = init_egress_root_ns(steering);
if (err) if (err)
goto err; goto err;
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <linux/if_link.h> #include <linux/if_link.h>
#include <linux/firmware.h> #include <linux/firmware.h>
#include <linux/mlx5/cq.h> #include <linux/mlx5/cq.h>
#include <linux/mlx5/fs.h>
#define DRIVER_NAME "mlx5_core" #define DRIVER_NAME "mlx5_core"
#define DRIVER_VERSION "5.0-0" #define DRIVER_VERSION "5.0-0"
...@@ -169,17 +170,6 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev); ...@@ -169,17 +170,6 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev);
void mlx5_dev_list_lock(void); void mlx5_dev_list_lock(void);
void mlx5_dev_list_unlock(void); void mlx5_dev_list_unlock(void);
int mlx5_dev_list_trylock(void); int mlx5_dev_list_trylock(void);
int mlx5_encap_alloc(struct mlx5_core_dev *dev,
int header_type,
size_t size,
void *encap_header,
u32 *encap_id);
void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id);
int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
u8 namespace, u8 num_actions,
void *modify_actions, u32 *modify_header_id);
void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id);
bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv); bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv);
......
...@@ -1120,6 +1120,12 @@ enum mlx5_qcam_feature_groups { ...@@ -1120,6 +1120,12 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \ #define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap) MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
#define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap)
#define MLX5_CAP_FLOWTABLE_NIC_TX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit.cap)
#define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \ #define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
......
...@@ -163,10 +163,7 @@ enum mlx5_dcbx_oper_mode { ...@@ -163,10 +163,7 @@ enum mlx5_dcbx_oper_mode {
}; };
enum mlx5_dct_atomic_mode { enum mlx5_dct_atomic_mode {
MLX5_ATOMIC_MODE_DCT_OFF = 20, MLX5_ATOMIC_MODE_DCT_CX = 2,
MLX5_ATOMIC_MODE_DCT_NONE = 0 << MLX5_ATOMIC_MODE_DCT_OFF,
MLX5_ATOMIC_MODE_DCT_IB_COMP = 1 << MLX5_ATOMIC_MODE_DCT_OFF,
MLX5_ATOMIC_MODE_DCT_CX = 2 << MLX5_ATOMIC_MODE_DCT_OFF,
}; };
enum { enum {
......
...@@ -45,7 +45,8 @@ enum { ...@@ -45,7 +45,8 @@ enum {
}; };
enum { enum {
MLX5_FLOW_TABLE_TUNNEL_EN = BIT(0), MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0),
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1),
}; };
#define LEFTOVERS_RULE_NUM 2 #define LEFTOVERS_RULE_NUM 2
...@@ -159,7 +160,7 @@ struct mlx5_flow_act { ...@@ -159,7 +160,7 @@ struct mlx5_flow_act {
u32 action; u32 action;
bool has_flow_tag; bool has_flow_tag;
u32 flow_tag; u32 flow_tag;
u32 encap_id; u32 reformat_id;
u32 modify_id; u32 modify_id;
uintptr_t esp_id; uintptr_t esp_id;
struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH]; struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH];
...@@ -196,4 +197,19 @@ int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter, ...@@ -196,4 +197,19 @@ int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
u8 namespace, u8 num_actions,
void *modify_actions, u32 *modify_header_id);
void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
u32 modify_header_id);
int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
int reformat_type,
size_t size,
void *reformat_data,
enum mlx5_flow_namespace_type namespace,
u32 *packet_reformat_id);
void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
u32 packet_reformat_id);
#endif #endif
...@@ -243,8 +243,8 @@ enum { ...@@ -243,8 +243,8 @@ enum {
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a, MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a,
MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b, MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c, MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
MLX5_CMD_OP_ALLOC_ENCAP_HEADER = 0x93d, MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT = 0x93d,
MLX5_CMD_OP_DEALLOC_ENCAP_HEADER = 0x93e, MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT = 0x93e,
MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940, MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940,
MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941, MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941,
MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT = 0x942, MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT = 0x942,
...@@ -336,7 +336,7 @@ struct mlx5_ifc_flow_table_prop_layout_bits { ...@@ -336,7 +336,7 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 modify_root[0x1]; u8 modify_root[0x1];
u8 identified_miss_table_mode[0x1]; u8 identified_miss_table_mode[0x1];
u8 flow_table_modify[0x1]; u8 flow_table_modify[0x1];
u8 encap[0x1]; u8 reformat[0x1];
u8 decap[0x1]; u8 decap[0x1];
u8 reserved_at_9[0x1]; u8 reserved_at_9[0x1];
u8 pop_vlan[0x1]; u8 pop_vlan[0x1];
...@@ -344,8 +344,12 @@ struct mlx5_ifc_flow_table_prop_layout_bits { ...@@ -344,8 +344,12 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reserved_at_c[0x1]; u8 reserved_at_c[0x1];
u8 pop_vlan_2[0x1]; u8 pop_vlan_2[0x1];
u8 push_vlan_2[0x1]; u8 push_vlan_2[0x1];
u8 reserved_at_f[0x11]; u8 reformat_and_vlan_action[0x1];
u8 reserved_at_10[0x2];
u8 reformat_l3_tunnel_to_l2[0x1];
u8 reformat_l2_to_l3_tunnel[0x1];
u8 reformat_and_modify_action[0x1];
u8 reserved_at_14[0xb];
u8 reserved_at_20[0x2]; u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6]; u8 log_max_ft_size[0x6];
u8 log_max_modify_header_context[0x8]; u8 log_max_modify_header_context[0x8];
...@@ -554,7 +558,13 @@ struct mlx5_ifc_flow_table_nic_cap_bits { ...@@ -554,7 +558,13 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
u8 nic_rx_multi_path_tirs[0x1]; u8 nic_rx_multi_path_tirs[0x1];
u8 nic_rx_multi_path_tirs_fts[0x1]; u8 nic_rx_multi_path_tirs_fts[0x1];
u8 allow_sniffer_and_nic_rx_shared_tir[0x1]; u8 allow_sniffer_and_nic_rx_shared_tir[0x1];
u8 reserved_at_3[0x1fd]; u8 reserved_at_3[0x1d];
u8 encap_general_header[0x1];
u8 reserved_at_21[0xa];
u8 log_max_packet_reformat_context[0x5];
u8 reserved_at_30[0x6];
u8 max_encap_header_size[0xa];
u8 reserved_at_40[0x1c0];
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
...@@ -599,7 +609,7 @@ struct mlx5_ifc_e_switch_cap_bits { ...@@ -599,7 +609,7 @@ struct mlx5_ifc_e_switch_cap_bits {
u8 vxlan_encap_decap[0x1]; u8 vxlan_encap_decap[0x1];
u8 nvgre_encap_decap[0x1]; u8 nvgre_encap_decap[0x1];
u8 reserved_at_22[0x9]; u8 reserved_at_22[0x9];
u8 log_max_encap_headers[0x5]; u8 log_max_packet_reformat_context[0x5];
u8 reserved_2b[0x6]; u8 reserved_2b[0x6];
u8 max_encap_header_size[0xa]; u8 max_encap_header_size[0xa];
...@@ -2394,7 +2404,7 @@ enum { ...@@ -2394,7 +2404,7 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2, MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4,
MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8, MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8,
MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10, MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT = 0x10,
MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20, MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20,
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40, MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40,
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP = 0x80, MLX5_FLOW_CONTEXT_ACTION_VLAN_POP = 0x80,
...@@ -2427,7 +2437,7 @@ struct mlx5_ifc_flow_context_bits { ...@@ -2427,7 +2437,7 @@ struct mlx5_ifc_flow_context_bits {
u8 reserved_at_a0[0x8]; u8 reserved_at_a0[0x8];
u8 flow_counter_list_size[0x18]; u8 flow_counter_list_size[0x18];
u8 encap_id[0x20]; u8 packet_reformat_id[0x20];
u8 modify_header_id[0x20]; u8 modify_header_id[0x20];
...@@ -4802,19 +4812,19 @@ struct mlx5_ifc_query_eq_in_bits { ...@@ -4802,19 +4812,19 @@ struct mlx5_ifc_query_eq_in_bits {
u8 reserved_at_60[0x20]; u8 reserved_at_60[0x20];
}; };
struct mlx5_ifc_encap_header_in_bits { struct mlx5_ifc_packet_reformat_context_in_bits {
u8 reserved_at_0[0x5]; u8 reserved_at_0[0x5];
u8 header_type[0x3]; u8 reformat_type[0x3];
u8 reserved_at_8[0xe]; u8 reserved_at_8[0xe];
u8 encap_header_size[0xa]; u8 reformat_data_size[0xa];
u8 reserved_at_20[0x10]; u8 reserved_at_20[0x10];
u8 encap_header[2][0x8]; u8 reformat_data[2][0x8];
u8 more_encap_header[0][0x8]; u8 more_reformat_data[0][0x8];
}; };
struct mlx5_ifc_query_encap_header_out_bits { struct mlx5_ifc_query_packet_reformat_context_out_bits {
u8 status[0x8]; u8 status[0x8];
u8 reserved_at_8[0x18]; u8 reserved_at_8[0x18];
...@@ -4822,33 +4832,41 @@ struct mlx5_ifc_query_encap_header_out_bits { ...@@ -4822,33 +4832,41 @@ struct mlx5_ifc_query_encap_header_out_bits {
u8 reserved_at_40[0xa0]; u8 reserved_at_40[0xa0];
struct mlx5_ifc_encap_header_in_bits encap_header[0]; struct mlx5_ifc_packet_reformat_context_in_bits packet_reformat_context[0];
}; };
struct mlx5_ifc_query_encap_header_in_bits { struct mlx5_ifc_query_packet_reformat_context_in_bits {
u8 opcode[0x10]; u8 opcode[0x10];
u8 reserved_at_10[0x10]; u8 reserved_at_10[0x10];
u8 reserved_at_20[0x10]; u8 reserved_at_20[0x10];
u8 op_mod[0x10]; u8 op_mod[0x10];
u8 encap_id[0x20]; u8 packet_reformat_id[0x20];
u8 reserved_at_60[0xa0]; u8 reserved_at_60[0xa0];
}; };
struct mlx5_ifc_alloc_encap_header_out_bits { struct mlx5_ifc_alloc_packet_reformat_context_out_bits {
u8 status[0x8]; u8 status[0x8];
u8 reserved_at_8[0x18]; u8 reserved_at_8[0x18];
u8 syndrome[0x20]; u8 syndrome[0x20];
u8 encap_id[0x20]; u8 packet_reformat_id[0x20];
u8 reserved_at_60[0x20]; u8 reserved_at_60[0x20];
}; };
struct mlx5_ifc_alloc_encap_header_in_bits { enum {
MLX5_REFORMAT_TYPE_L2_TO_VXLAN = 0x0,
MLX5_REFORMAT_TYPE_L2_TO_NVGRE = 0x1,
MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2,
MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x3,
MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4,
};
struct mlx5_ifc_alloc_packet_reformat_context_in_bits {
u8 opcode[0x10]; u8 opcode[0x10];
u8 reserved_at_10[0x10]; u8 reserved_at_10[0x10];
...@@ -4857,10 +4875,10 @@ struct mlx5_ifc_alloc_encap_header_in_bits { ...@@ -4857,10 +4875,10 @@ struct mlx5_ifc_alloc_encap_header_in_bits {
u8 reserved_at_40[0xa0]; u8 reserved_at_40[0xa0];
struct mlx5_ifc_encap_header_in_bits encap_header; struct mlx5_ifc_packet_reformat_context_in_bits packet_reformat_context;
}; };
struct mlx5_ifc_dealloc_encap_header_out_bits { struct mlx5_ifc_dealloc_packet_reformat_context_out_bits {
u8 status[0x8]; u8 status[0x8];
u8 reserved_at_8[0x18]; u8 reserved_at_8[0x18];
...@@ -4869,14 +4887,14 @@ struct mlx5_ifc_dealloc_encap_header_out_bits { ...@@ -4869,14 +4887,14 @@ struct mlx5_ifc_dealloc_encap_header_out_bits {
u8 reserved_at_40[0x40]; u8 reserved_at_40[0x40];
}; };
struct mlx5_ifc_dealloc_encap_header_in_bits { struct mlx5_ifc_dealloc_packet_reformat_context_in_bits {
u8 opcode[0x10]; u8 opcode[0x10];
u8 reserved_at_10[0x10]; u8 reserved_at_10[0x10];
u8 reserved_20[0x10]; u8 reserved_20[0x10];
u8 op_mod[0x10]; u8 op_mod[0x10];
u8 encap_id[0x20]; u8 packet_reformat_id[0x20];
u8 reserved_60[0x20]; u8 reserved_60[0x20];
}; };
...@@ -6978,7 +6996,7 @@ struct mlx5_ifc_create_flow_table_out_bits { ...@@ -6978,7 +6996,7 @@ struct mlx5_ifc_create_flow_table_out_bits {
}; };
struct mlx5_ifc_flow_table_context_bits { struct mlx5_ifc_flow_table_context_bits {
u8 encap_en[0x1]; u8 reformat_en[0x1];
u8 decap_en[0x1]; u8 decap_en[0x1];
u8 reserved_at_2[0x2]; u8 reserved_at_2[0x2];
u8 table_miss_action[0x4]; u8 table_miss_action[0x4];
......
...@@ -365,6 +365,15 @@ struct uverbs_object_tree_def { ...@@ -365,6 +365,15 @@ struct uverbs_object_tree_def {
__VA_ARGS__ }, \ __VA_ARGS__ }, \
}) })
/* An input value that is a member in the enum _enum_type. */
#define UVERBS_ATTR_CONST_IN(_attr_id, _enum_type, ...) \
UVERBS_ATTR_PTR_IN( \
_attr_id, \
UVERBS_ATTR_SIZE( \
sizeof(u64) + BUILD_BUG_ON_ZERO(!sizeof(_enum_type)), \
sizeof(u64)), \
__VA_ARGS__)
/* /*
* An input value that is a bitwise combination of values of _enum_type. * An input value that is a bitwise combination of values of _enum_type.
* This permits the flag value to be passed as either a u32 or u64, it must * This permits the flag value to be passed as either a u32 or u64, it must
...@@ -603,6 +612,9 @@ static inline __malloc void *uverbs_zalloc(struct uverbs_attr_bundle *bundle, ...@@ -603,6 +612,9 @@ static inline __malloc void *uverbs_zalloc(struct uverbs_attr_bundle *bundle,
{ {
return _uverbs_alloc(bundle, size, GFP_KERNEL | __GFP_ZERO); return _uverbs_alloc(bundle, size, GFP_KERNEL | __GFP_ZERO);
} }
int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
size_t idx, s64 lower_bound, u64 upper_bound,
s64 *def_val);
#else #else
static inline int static inline int
uverbs_get_flags64(u64 *to, const struct uverbs_attr_bundle *attrs_bundle, uverbs_get_flags64(u64 *to, const struct uverbs_attr_bundle *attrs_bundle,
...@@ -631,6 +643,34 @@ static inline __malloc void *uverbs_zalloc(struct uverbs_attr_bundle *bundle, ...@@ -631,6 +643,34 @@ static inline __malloc void *uverbs_zalloc(struct uverbs_attr_bundle *bundle,
{ {
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
static inline int
_uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
size_t idx, s64 lower_bound, u64 upper_bound,
s64 *def_val)
{
return -EINVAL;
}
#endif #endif
#define uverbs_get_const(_to, _attrs_bundle, _idx) \
({ \
s64 _val; \
int _ret = _uverbs_get_const(&_val, _attrs_bundle, _idx, \
type_min(typeof(*_to)), \
type_max(typeof(*_to)), NULL); \
(*_to) = _val; \
_ret; \
})
#define uverbs_get_const_default(_to, _attrs_bundle, _idx, _default) \
({ \
s64 _val; \
s64 _def_val = _default; \
int _ret = \
_uverbs_get_const(&_val, _attrs_bundle, _idx, \
type_min(typeof(*_to)), \
type_max(typeof(*_to)), &_def_val); \
(*_to) = _val; \
_ret; \
})
#endif #endif
...@@ -140,5 +140,17 @@ __uobj_alloc(const struct uverbs_api_object *obj, struct ib_uverbs_file *ufile, ...@@ -140,5 +140,17 @@ __uobj_alloc(const struct uverbs_api_object *obj, struct ib_uverbs_file *ufile,
#define uobj_alloc(_type, _ufile, _ib_dev) \ #define uobj_alloc(_type, _ufile, _ib_dev) \
__uobj_alloc(uobj_get_type(_ufile, _type), _ufile, _ib_dev) __uobj_alloc(uobj_get_type(_ufile, _type), _ufile, _ib_dev)
static inline void uverbs_flow_action_fill_action(struct ib_flow_action *action,
struct ib_uobject *uobj,
struct ib_device *ib_dev,
enum ib_flow_action_type type)
{
atomic_set(&action->usecnt, 0);
action->device = ib_dev;
action->type = type;
action->uobject = uobj;
uobj->object = action;
}
#endif #endif
...@@ -166,4 +166,22 @@ enum mlx5_ib_flow_methods { ...@@ -166,4 +166,22 @@ enum mlx5_ib_flow_methods {
MLX5_IB_METHOD_DESTROY_FLOW, MLX5_IB_METHOD_DESTROY_FLOW,
}; };
enum mlx5_ib_flow_action_methods {
MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER = (1U << UVERBS_ID_NS_SHIFT),
MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT,
};
enum mlx5_ib_create_flow_action_create_modify_header_attrs {
MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM,
MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE,
};
enum mlx5_ib_create_flow_action_create_packet_reformat_attrs {
MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE,
MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE,
MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF,
};
#endif #endif
...@@ -39,5 +39,17 @@ enum mlx5_ib_uapi_flow_action_flags { ...@@ -39,5 +39,17 @@ enum mlx5_ib_uapi_flow_action_flags {
MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA = 1 << 0, MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA = 1 << 0,
}; };
enum mlx5_ib_uapi_flow_table_type {
MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX = 0x0,
MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX = 0x1,
};
enum mlx5_ib_uapi_flow_action_packet_reformat_type {
MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 = 0x0,
MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x1,
MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x2,
MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x3,
};
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment