Commit d44a919a authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2020-07-16' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2020-07-16

Fixes:
1) Fix build break when CONFIG_XPS is not set
2) Fix missing switch_id for representors

Updates:
1) IPsec XFRM RX offloads from Raed and Huy.
  - Added IPSec RX steering flow tables to NIC RX
  - Refactoring of the existing FPGA IPSec, to add support
    for ConnectX IPsec.
  - RX data path handling for IPSec traffic
  - Synchronize offloading device ESN with xfrm received SN

2) Parav allows E-Switch to siwtch to switchdev mode directly without
   the need to go through legacy mode first.

3) From Tariq, Misc updates including:
   3.1) indirect calls for RX and XDP handlers
   3.2) Make MLX5_EN_TLS non-prompt as it should always be enabled when
        TLS and MLX5_EN are selected.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 721dab2b 54b154ec
...@@ -134,12 +134,25 @@ config MLX5_FPGA_IPSEC ...@@ -134,12 +134,25 @@ config MLX5_FPGA_IPSEC
mlx5_core driver will include the Innova FPGA core and allow building mlx5_core driver will include the Innova FPGA core and allow building
sandbox-specific client drivers. sandbox-specific client drivers.
config MLX5_IPSEC
bool "Mellanox Technologies IPsec Connect-X support"
depends on MLX5_CORE_EN
depends on XFRM_OFFLOAD
depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
select MLX5_ACCEL
default n
help
Build IPsec support for the Connect-X family of network cards by Mellanox
Technologies.
Note: If you select this option, the mlx5_core driver will include
IPsec support for the Connect-X family.
config MLX5_EN_IPSEC config MLX5_EN_IPSEC
bool "IPSec XFRM cryptography-offload accelaration" bool "IPSec XFRM cryptography-offload accelaration"
depends on MLX5_CORE_EN depends on MLX5_CORE_EN
depends on XFRM_OFFLOAD depends on XFRM_OFFLOAD
depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
depends on MLX5_FPGA_IPSEC depends on MLX5_FPGA_IPSEC || MLX5_IPSEC
default n default n
help help
Build support for IPsec cryptography-offload accelaration in the NIC. Build support for IPsec cryptography-offload accelaration in the NIC.
...@@ -150,7 +163,10 @@ config MLX5_FPGA_TLS ...@@ -150,7 +163,10 @@ config MLX5_FPGA_TLS
bool "Mellanox Technologies TLS Innova support" bool "Mellanox Technologies TLS Innova support"
depends on TLS_DEVICE depends on TLS_DEVICE
depends on TLS=y || MLX5_CORE=m depends on TLS=y || MLX5_CORE=m
depends on MLX5_CORE_EN
depends on MLX5_FPGA depends on MLX5_FPGA
depends on XPS
select MLX5_EN_TLS
default n default n
help help
Build TLS support for the Innova family of network cards by Mellanox Build TLS support for the Innova family of network cards by Mellanox
...@@ -161,21 +177,19 @@ config MLX5_FPGA_TLS ...@@ -161,21 +177,19 @@ config MLX5_FPGA_TLS
config MLX5_TLS config MLX5_TLS
bool "Mellanox Technologies TLS Connect-X support" bool "Mellanox Technologies TLS Connect-X support"
depends on MLX5_CORE_EN
depends on TLS_DEVICE depends on TLS_DEVICE
depends on TLS=y || MLX5_CORE=m depends on TLS=y || MLX5_CORE=m
depends on MLX5_CORE_EN
depends on XPS
select MLX5_ACCEL select MLX5_ACCEL
select MLX5_EN_TLS
default n default n
help help
Build TLS support for the Connect-X family of network cards by Mellanox Build TLS support for the Connect-X family of network cards by Mellanox
Technologies. Technologies.
config MLX5_EN_TLS config MLX5_EN_TLS
bool "TLS cryptography-offload accelaration" bool
depends on MLX5_CORE_EN
depends on XPS
depends on MLX5_FPGA_TLS || MLX5_TLS
default y
help help
Build support for TLS cryptography-offload accelaration in the NIC. Build support for TLS cryptography-offload accelaration in the NIC.
Note: Support for hardware with this capability needs to be selected Note: Support for hardware with this capability needs to be selected
......
...@@ -64,6 +64,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib ...@@ -64,6 +64,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib
# #
# Accelerations & FPGA # Accelerations & FPGA
# #
mlx5_core-$(CONFIG_MLX5_IPSEC) += accel/ipsec_offload.o
mlx5_core-$(CONFIG_MLX5_FPGA_IPSEC) += fpga/ipsec.o mlx5_core-$(CONFIG_MLX5_FPGA_IPSEC) += fpga/ipsec.o
mlx5_core-$(CONFIG_MLX5_FPGA_TLS) += fpga/tls.o mlx5_core-$(CONFIG_MLX5_FPGA_TLS) += fpga/tls.o
mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/tls.o accel/ipsec.o mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/tls.o accel/ipsec.o
...@@ -71,7 +72,7 @@ mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/tls.o accel/ipsec.o ...@@ -71,7 +72,7 @@ mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/tls.o accel/ipsec.o
mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
en_accel/ipsec_stats.o en_accel/ipsec_stats.o en_accel/ipsec_fs.o
mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o \ mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o \
en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \ en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \
......
...@@ -31,37 +31,88 @@ ...@@ -31,37 +31,88 @@
* *
*/ */
#ifdef CONFIG_MLX5_FPGA_IPSEC
#include <linux/mlx5/device.h> #include <linux/mlx5/device.h>
#include "accel/ipsec.h" #include "accel/ipsec.h"
#include "mlx5_core.h" #include "mlx5_core.h"
#include "fpga/ipsec.h" #include "fpga/ipsec.h"
#include "accel/ipsec_offload.h"
void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
{
const struct mlx5_accel_ipsec_ops *ipsec_ops;
int err = 0;
ipsec_ops = (mlx5_ipsec_offload_ops(mdev)) ?
mlx5_ipsec_offload_ops(mdev) :
mlx5_fpga_ipsec_ops(mdev);
if (!ipsec_ops || !ipsec_ops->init) {
mlx5_core_dbg(mdev, "IPsec ops is not supported\n");
return;
}
err = ipsec_ops->init(mdev);
if (err) {
mlx5_core_warn_once(mdev, "Failed to start IPsec device, err = %d\n", err);
return;
}
mdev->ipsec_ops = ipsec_ops;
}
void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
{
const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
if (!ipsec_ops || !ipsec_ops->cleanup)
return;
ipsec_ops->cleanup(mdev);
}
u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev)
{ {
return mlx5_fpga_ipsec_device_caps(mdev); const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
if (!ipsec_ops || !ipsec_ops->device_caps)
return 0;
return ipsec_ops->device_caps(mdev);
} }
EXPORT_SYMBOL_GPL(mlx5_accel_ipsec_device_caps); EXPORT_SYMBOL_GPL(mlx5_accel_ipsec_device_caps);
unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev) unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev)
{ {
return mlx5_fpga_ipsec_counters_count(mdev); const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
if (!ipsec_ops || !ipsec_ops->counters_count)
return -EOPNOTSUPP;
return ipsec_ops->counters_count(mdev);
} }
int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
unsigned int count) unsigned int count)
{ {
return mlx5_fpga_ipsec_counters_read(mdev, counters, count); const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
if (!ipsec_ops || !ipsec_ops->counters_read)
return -EOPNOTSUPP;
return ipsec_ops->counters_read(mdev, counters, count);
} }
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm, struct mlx5_accel_esp_xfrm *xfrm,
u32 *sa_handle) u32 *sa_handle)
{ {
const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
__be32 saddr[4] = {}, daddr[4] = {}; __be32 saddr[4] = {}, daddr[4] = {};
if (!ipsec_ops || !ipsec_ops->create_hw_context)
return ERR_PTR(-EOPNOTSUPP);
if (!xfrm->attrs.is_ipv6) { if (!xfrm->attrs.is_ipv6) {
saddr[3] = xfrm->attrs.saddr.a4; saddr[3] = xfrm->attrs.saddr.a4;
daddr[3] = xfrm->attrs.daddr.a4; daddr[3] = xfrm->attrs.daddr.a4;
...@@ -70,29 +121,18 @@ void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, ...@@ -70,29 +121,18 @@ void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr)); memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr));
} }
return mlx5_fpga_ipsec_create_sa_ctx(mdev, xfrm, saddr, return ipsec_ops->create_hw_context(mdev, xfrm, saddr, daddr, xfrm->attrs.spi,
daddr, xfrm->attrs.spi,
xfrm->attrs.is_ipv6, sa_handle); xfrm->attrs.is_ipv6, sa_handle);
} }
void mlx5_accel_esp_free_hw_context(void *context) void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context)
{ {
mlx5_fpga_ipsec_delete_sa_ctx(context); const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
}
int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) if (!ipsec_ops || !ipsec_ops->free_hw_context)
{ return;
return mlx5_fpga_ipsec_init(mdev);
}
void mlx5_accel_ipsec_build_fs_cmds(void)
{
mlx5_fpga_ipsec_build_fs_cmds();
}
void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) ipsec_ops->free_hw_context(context);
{
mlx5_fpga_ipsec_cleanup(mdev);
} }
struct mlx5_accel_esp_xfrm * struct mlx5_accel_esp_xfrm *
...@@ -100,9 +140,13 @@ mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, ...@@ -100,9 +140,13 @@ mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs, const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags) u32 flags)
{ {
const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
struct mlx5_accel_esp_xfrm *xfrm; struct mlx5_accel_esp_xfrm *xfrm;
xfrm = mlx5_fpga_esp_create_xfrm(mdev, attrs, flags); if (!ipsec_ops || !ipsec_ops->esp_create_xfrm)
return ERR_PTR(-EOPNOTSUPP);
xfrm = ipsec_ops->esp_create_xfrm(mdev, attrs, flags);
if (IS_ERR(xfrm)) if (IS_ERR(xfrm))
return xfrm; return xfrm;
...@@ -113,15 +157,23 @@ EXPORT_SYMBOL_GPL(mlx5_accel_esp_create_xfrm); ...@@ -113,15 +157,23 @@ EXPORT_SYMBOL_GPL(mlx5_accel_esp_create_xfrm);
void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
{ {
mlx5_fpga_esp_destroy_xfrm(xfrm); const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops;
if (!ipsec_ops || !ipsec_ops->esp_destroy_xfrm)
return;
ipsec_ops->esp_destroy_xfrm(xfrm);
} }
EXPORT_SYMBOL_GPL(mlx5_accel_esp_destroy_xfrm); EXPORT_SYMBOL_GPL(mlx5_accel_esp_destroy_xfrm);
int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs) const struct mlx5_accel_esp_xfrm_attrs *attrs)
{ {
return mlx5_fpga_esp_modify_xfrm(xfrm, attrs); const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops;
if (!ipsec_ops || !ipsec_ops->esp_modify_xfrm)
return -EOPNOTSUPP;
return ipsec_ops->esp_modify_xfrm(xfrm, attrs);
} }
EXPORT_SYMBOL_GPL(mlx5_accel_esp_modify_xfrm); EXPORT_SYMBOL_GPL(mlx5_accel_esp_modify_xfrm);
#endif
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#include <linux/mlx5/accel.h> #include <linux/mlx5/accel.h>
#ifdef CONFIG_MLX5_FPGA_IPSEC #ifdef CONFIG_MLX5_ACCEL
#define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \ #define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \
MLX5_ACCEL_IPSEC_CAP_DEVICE) MLX5_ACCEL_IPSEC_CAP_DEVICE)
...@@ -49,12 +49,30 @@ int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, ...@@ -49,12 +49,30 @@ int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm, struct mlx5_accel_esp_xfrm *xfrm,
u32 *sa_handle); u32 *sa_handle);
void mlx5_accel_esp_free_hw_context(void *context); void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context);
int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev); void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
void mlx5_accel_ipsec_build_fs_cmds(void);
void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev); void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
struct mlx5_accel_ipsec_ops {
u32 (*device_caps)(struct mlx5_core_dev *mdev);
unsigned int (*counters_count)(struct mlx5_core_dev *mdev);
int (*counters_read)(struct mlx5_core_dev *mdev, u64 *counters, unsigned int count);
void* (*create_hw_context)(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
const __be32 saddr[4], const __be32 daddr[4],
const __be32 spi, bool is_ipv6, u32 *sa_handle);
void (*free_hw_context)(void *context);
int (*init)(struct mlx5_core_dev *mdev);
void (*cleanup)(struct mlx5_core_dev *mdev);
struct mlx5_accel_esp_xfrm* (*esp_create_xfrm)(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags);
int (*esp_modify_xfrm)(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs);
void (*esp_destroy_xfrm)(struct mlx5_accel_esp_xfrm *xfrm);
};
#else #else
#define MLX5_IPSEC_DEV(mdev) false #define MLX5_IPSEC_DEV(mdev) false
...@@ -67,23 +85,12 @@ mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, ...@@ -67,23 +85,12 @@ mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
return NULL; return NULL;
} }
static inline void mlx5_accel_esp_free_hw_context(void *context) static inline void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) {}
{
}
static inline int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) static inline void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) {}
{
return 0;
}
static inline void mlx5_accel_ipsec_build_fs_cmds(void) static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) {}
{
}
static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
{
}
#endif #endif /* CONFIG_MLX5_ACCEL */
#endif /* __MLX5_ACCEL_IPSEC_H__ */ #endif /* __MLX5_ACCEL_IPSEC_H__ */
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIBt
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#include "mlx5_core.h"
#include "ipsec_offload.h"
#include "lib/mlx5.h"
#include "en_accel/ipsec_fs.h"
#define MLX5_IPSEC_DEV_BASIC_CAPS (MLX5_ACCEL_IPSEC_CAP_DEVICE | MLX5_ACCEL_IPSEC_CAP_IPV6 | \
MLX5_ACCEL_IPSEC_CAP_LSO)
struct mlx5_ipsec_sa_ctx {
struct rhash_head hash;
u32 enc_key_id;
u32 ipsec_obj_id;
/* hw ctx */
struct mlx5_core_dev *dev;
struct mlx5_ipsec_esp_xfrm *mxfrm;
};
struct mlx5_ipsec_esp_xfrm {
/* reference counter of SA ctx */
struct mlx5_ipsec_sa_ctx *sa_ctx;
struct mutex lock; /* protects mlx5_ipsec_esp_xfrm */
struct mlx5_accel_esp_xfrm accel_xfrm;
};
static u32 mlx5_ipsec_offload_device_caps(struct mlx5_core_dev *mdev)
{
u32 caps = MLX5_IPSEC_DEV_BASIC_CAPS;
if (!mlx5_is_ipsec_device(mdev))
return 0;
if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) ||
!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt))
return 0;
if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) &&
MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt))
caps |= MLX5_ACCEL_IPSEC_CAP_ESP;
if (MLX5_CAP_IPSEC(mdev, ipsec_esn)) {
caps |= MLX5_ACCEL_IPSEC_CAP_ESN;
caps |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN;
}
/* We can accommodate up to 2^24 different IPsec objects
* because we use up to 24 bit in flow table metadata
* to hold the IPsec Object unique handle.
*/
WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24);
return caps;
}
static int
mlx5_ipsec_offload_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) {
mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay (replay_type = %d)\n",
attrs->replay_type);
return -EOPNOTSUPP;
}
if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) {
mlx5_core_err(mdev, "Only aes gcm keymat is supported (keymat_type = %d)\n",
attrs->keymat_type);
return -EOPNOTSUPP;
}
if (attrs->keymat.aes_gcm.iv_algo !=
MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) {
mlx5_core_err(mdev, "Only iv sequence algo is supported (iv_algo = %d)\n",
attrs->keymat.aes_gcm.iv_algo);
return -EOPNOTSUPP;
}
if (attrs->keymat.aes_gcm.key_len != 128 &&
attrs->keymat.aes_gcm.key_len != 256) {
mlx5_core_err(mdev, "Cannot offload xfrm states with key length other than 128/256 bit (key length = %d)\n",
attrs->keymat.aes_gcm.key_len);
return -EOPNOTSUPP;
}
if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) &&
!MLX5_CAP_IPSEC(mdev, ipsec_esn)) {
mlx5_core_err(mdev, "Cannot offload xfrm states with ESN triggered\n");
return -EOPNOTSUPP;
}
return 0;
}
static struct mlx5_accel_esp_xfrm *
mlx5_ipsec_offload_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags)
{
struct mlx5_ipsec_esp_xfrm *mxfrm;
int err = 0;
err = mlx5_ipsec_offload_esp_validate_xfrm_attrs(mdev, attrs);
if (err)
return ERR_PTR(err);
mxfrm = kzalloc(sizeof(*mxfrm), GFP_KERNEL);
if (!mxfrm)
return ERR_PTR(-ENOMEM);
mutex_init(&mxfrm->lock);
memcpy(&mxfrm->accel_xfrm.attrs, attrs,
sizeof(mxfrm->accel_xfrm.attrs));
return &mxfrm->accel_xfrm;
}
static void mlx5_ipsec_offload_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
{
struct mlx5_ipsec_esp_xfrm *mxfrm = container_of(xfrm, struct mlx5_ipsec_esp_xfrm,
accel_xfrm);
/* assuming no sa_ctx are connected to this xfrm_ctx */
WARN_ON(mxfrm->sa_ctx);
kfree(mxfrm);
}
struct mlx5_ipsec_obj_attrs {
const struct aes_gcm_keymat *aes_gcm;
u32 accel_flags;
u32 esn_msb;
u32 enc_key_id;
};
static int mlx5_create_ipsec_obj(struct mlx5_core_dev *mdev,
struct mlx5_ipsec_obj_attrs *attrs,
u32 *ipsec_id)
{
const struct aes_gcm_keymat *aes_gcm = attrs->aes_gcm;
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {};
void *obj, *salt_p, *salt_iv_p;
int err;
obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object);
/* salt and seq_iv */
salt_p = MLX5_ADDR_OF(ipsec_obj, obj, salt);
memcpy(salt_p, &aes_gcm->salt, sizeof(aes_gcm->salt));
switch (aes_gcm->icv_len) {
case 64:
MLX5_SET(ipsec_obj, obj, icv_length,
MLX5_IPSEC_OBJECT_ICV_LEN_8B);
break;
case 96:
MLX5_SET(ipsec_obj, obj, icv_length,
MLX5_IPSEC_OBJECT_ICV_LEN_12B);
break;
case 128:
MLX5_SET(ipsec_obj, obj, icv_length,
MLX5_IPSEC_OBJECT_ICV_LEN_16B);
break;
default:
return -EINVAL;
}
salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
/* esn */
if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
MLX5_SET(ipsec_obj, obj, esn_en, 1);
MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn_msb);
if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP)
MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
}
MLX5_SET(ipsec_obj, obj, dekn, attrs->enc_key_id);
/* general object fields set */
MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
MLX5_GENERAL_OBJECT_TYPES_IPSEC);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (!err)
*ipsec_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
return err;
}
static void mlx5_destroy_ipsec_obj(struct mlx5_core_dev *mdev, u32 ipsec_id)
{
u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
MLX5_GENERAL_OBJECT_TYPES_IPSEC);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, ipsec_id);
mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
static void *mlx5_ipsec_offload_create_sa_ctx(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *accel_xfrm,
const __be32 saddr[4], const __be32 daddr[4],
const __be32 spi, bool is_ipv6, u32 *hw_handle)
{
struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs = &accel_xfrm->attrs;
struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm;
struct mlx5_ipsec_obj_attrs ipsec_attrs = {};
struct mlx5_ipsec_esp_xfrm *mxfrm;
struct mlx5_ipsec_sa_ctx *sa_ctx;
int err;
/* alloc SA context */
sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL);
if (!sa_ctx)
return ERR_PTR(-ENOMEM);
sa_ctx->dev = mdev;
mxfrm = container_of(accel_xfrm, struct mlx5_ipsec_esp_xfrm, accel_xfrm);
mutex_lock(&mxfrm->lock);
sa_ctx->mxfrm = mxfrm;
/* key */
err = mlx5_create_encryption_key(mdev, aes_gcm->aes_key,
aes_gcm->key_len / BITS_PER_BYTE,
MLX5_ACCEL_OBJ_IPSEC_KEY,
&sa_ctx->enc_key_id);
if (err) {
mlx5_core_dbg(mdev, "Failed to create encryption key (err = %d)\n", err);
goto err_sa_ctx;
}
ipsec_attrs.aes_gcm = aes_gcm;
ipsec_attrs.accel_flags = accel_xfrm->attrs.flags;
ipsec_attrs.esn_msb = accel_xfrm->attrs.esn;
ipsec_attrs.enc_key_id = sa_ctx->enc_key_id;
err = mlx5_create_ipsec_obj(mdev, &ipsec_attrs,
&sa_ctx->ipsec_obj_id);
if (err) {
mlx5_core_dbg(mdev, "Failed to create IPsec object (err = %d)\n", err);
goto err_enc_key;
}
*hw_handle = sa_ctx->ipsec_obj_id;
mxfrm->sa_ctx = sa_ctx;
mutex_unlock(&mxfrm->lock);
return sa_ctx;
err_enc_key:
mlx5_destroy_encryption_key(mdev, sa_ctx->enc_key_id);
err_sa_ctx:
mutex_unlock(&mxfrm->lock);
kfree(sa_ctx);
return ERR_PTR(err);
}
static void mlx5_ipsec_offload_delete_sa_ctx(void *context)
{
struct mlx5_ipsec_sa_ctx *sa_ctx = (struct mlx5_ipsec_sa_ctx *)context;
struct mlx5_ipsec_esp_xfrm *mxfrm = sa_ctx->mxfrm;
mutex_lock(&mxfrm->lock);
mlx5_destroy_ipsec_obj(sa_ctx->dev, sa_ctx->ipsec_obj_id);
mlx5_destroy_encryption_key(sa_ctx->dev, sa_ctx->enc_key_id);
kfree(sa_ctx);
mxfrm->sa_ctx = NULL;
mutex_unlock(&mxfrm->lock);
}
static int mlx5_ipsec_offload_init(struct mlx5_core_dev *mdev)
{
return 0;
}
static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev,
struct mlx5_ipsec_obj_attrs *attrs,
u32 ipsec_id)
{
u32 in[MLX5_ST_SZ_DW(modify_ipsec_obj_in)] = {};
u32 out[MLX5_ST_SZ_DW(query_ipsec_obj_out)];
u64 modify_field_select = 0;
u64 general_obj_types;
void *obj;
int err;
if (!(attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED))
return 0;
general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
return -EINVAL;
/* general object fields set */
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, ipsec_id);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (err) {
mlx5_core_err(mdev, "Query IPsec object failed (Object id %d), err = %d\n",
ipsec_id, err);
return err;
}
obj = MLX5_ADDR_OF(query_ipsec_obj_out, out, ipsec_object);
modify_field_select = MLX5_GET64(ipsec_obj, obj, modify_field_select);
/* esn */
if (!(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP) ||
!(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB))
return -EOPNOTSUPP;
obj = MLX5_ADDR_OF(modify_ipsec_obj_in, in, ipsec_object);
MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn_msb);
if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP)
MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
/* general object fields set */
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
static int mlx5_ipsec_offload_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct mlx5_ipsec_obj_attrs ipsec_attrs = {};
struct mlx5_core_dev *mdev = xfrm->mdev;
struct mlx5_ipsec_esp_xfrm *mxfrm;
int err = 0;
if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
return 0;
if (mlx5_ipsec_offload_esp_validate_xfrm_attrs(mdev, attrs))
return -EOPNOTSUPP;
mxfrm = container_of(xfrm, struct mlx5_ipsec_esp_xfrm, accel_xfrm);
mutex_lock(&mxfrm->lock);
if (!mxfrm->sa_ctx)
/* Not bound xfrm, change only sw attrs */
goto change_sw_xfrm_attrs;
/* need to add find and replace in ipsec_rhash_sa the sa_ctx */
/* modify device with new hw_sa */
ipsec_attrs.accel_flags = attrs->flags;
ipsec_attrs.esn_msb = attrs->esn;
err = mlx5_modify_ipsec_obj(mdev,
&ipsec_attrs,
mxfrm->sa_ctx->ipsec_obj_id);
change_sw_xfrm_attrs:
if (!err)
memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs));
mutex_unlock(&mxfrm->lock);
return err;
}
static const struct mlx5_accel_ipsec_ops ipsec_offload_ops = {
.device_caps = mlx5_ipsec_offload_device_caps,
.create_hw_context = mlx5_ipsec_offload_create_sa_ctx,
.free_hw_context = mlx5_ipsec_offload_delete_sa_ctx,
.init = mlx5_ipsec_offload_init,
.esp_create_xfrm = mlx5_ipsec_offload_esp_create_xfrm,
.esp_destroy_xfrm = mlx5_ipsec_offload_esp_destroy_xfrm,
.esp_modify_xfrm = mlx5_ipsec_offload_esp_modify_xfrm,
};
const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev)
{
if (!mlx5_ipsec_offload_device_caps(mdev))
return NULL;
return &ipsec_offload_ops;
}
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#ifndef __MLX5_IPSEC_OFFLOAD_H__
#define __MLX5_IPSEC_OFFLOAD_H__
#include <linux/mlx5/driver.h>
#include "accel/ipsec.h"
#ifdef CONFIG_MLX5_IPSEC
const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev);
static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev)
{
if (!MLX5_CAP_GEN(mdev, ipsec_offload))
return false;
if (!MLX5_CAP_GEN(mdev, log_max_dek))
return false;
if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
return false;
return MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) &&
MLX5_CAP_ETH(mdev, insert_trailer);
}
#else
static inline const struct mlx5_accel_ipsec_ops *
mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev) { return NULL; }
static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev)
{
return false;
}
#endif /* CONFIG_MLX5_IPSEC */
#endif /* __MLX5_IPSEC_OFFLOAD_H__ */
...@@ -113,7 +113,9 @@ int mlx5_ktls_create_key(struct mlx5_core_dev *mdev, ...@@ -113,7 +113,9 @@ int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
return -EINVAL; return -EINVAL;
} }
return mlx5_create_encryption_key(mdev, key, sz_bytes, p_key_id); return mlx5_create_encryption_key(mdev, key, sz_bytes,
MLX5_ACCEL_OBJ_TLS_KEY,
p_key_id);
} }
void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id) void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id)
......
...@@ -129,7 +129,11 @@ enum { ...@@ -129,7 +129,11 @@ enum {
MLX5E_ACCEL_FS_TCP_FT_LEVEL, MLX5E_ACCEL_FS_TCP_FT_LEVEL,
#endif #endif
#ifdef CONFIG_MLX5_EN_ARFS #ifdef CONFIG_MLX5_EN_ARFS
MLX5E_ARFS_FT_LEVEL MLX5E_ARFS_FT_LEVEL,
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
#endif #endif
}; };
......
...@@ -32,6 +32,9 @@ ...@@ -32,6 +32,9 @@
#define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0) #define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0)
#define MLX5_FTE_ID_MASK MLX5_FTE_ID_MAX #define MLX5_FTE_ID_MASK MLX5_FTE_ID_MAX
#define MLX5_CT_LABELS_BITS (mlx5e_tc_attr_to_reg_mappings[LABELS_TO_REG].mlen * 8)
#define MLX5_CT_LABELS_MASK GENMASK(MLX5_CT_LABELS_BITS - 1, 0)
#define ct_dbg(fmt, args...)\ #define ct_dbg(fmt, args...)\
netdev_dbg(ct_priv->netdev, "ct_debug: " fmt "\n", ##args) netdev_dbg(ct_priv->netdev, "ct_debug: " fmt "\n", ##args)
...@@ -48,6 +51,7 @@ struct mlx5_tc_ct_priv { ...@@ -48,6 +51,7 @@ struct mlx5_tc_ct_priv {
struct mlx5_flow_table *post_ct; struct mlx5_flow_table *post_ct;
struct mutex control_lock; /* guards parallel adds/dels */ struct mutex control_lock; /* guards parallel adds/dels */
struct mapping_ctx *zone_mapping; struct mapping_ctx *zone_mapping;
struct mapping_ctx *labels_mapping;
}; };
struct mlx5_ct_flow { struct mlx5_ct_flow {
...@@ -404,6 +408,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv, ...@@ -404,6 +408,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
mlx5_eswitch_del_offloaded_rule(esw, zone_rule->rule, attr); mlx5_eswitch_del_offloaded_rule(esw, zone_rule->rule, attr);
mlx5e_mod_hdr_detach(ct_priv->esw->dev, mlx5e_mod_hdr_detach(ct_priv->esw->dev,
&esw->offloads.mod_hdr, zone_rule->mh); &esw->offloads.mod_hdr, zone_rule->mh);
mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
} }
static void static void
...@@ -436,7 +441,7 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv, ...@@ -436,7 +441,7 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5e_tc_mod_hdr_acts *mod_acts, struct mlx5e_tc_mod_hdr_acts *mod_acts,
u8 ct_state, u8 ct_state,
u32 mark, u32 mark,
u32 label, u32 labels_id,
u8 zone_restore_id) u8 zone_restore_id)
{ {
struct mlx5_eswitch *esw = ct_priv->esw; struct mlx5_eswitch *esw = ct_priv->esw;
...@@ -453,7 +458,7 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv, ...@@ -453,7 +458,7 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv,
return err; return err;
err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts, err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
LABELS_TO_REG, label); LABELS_TO_REG, labels_id);
if (err) if (err)
return err; return err;
...@@ -597,13 +602,10 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, ...@@ -597,13 +602,10 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
if (!meta) if (!meta)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (meta->ct_metadata.labels[1] || err = mapping_add(ct_priv->labels_mapping, meta->ct_metadata.labels,
meta->ct_metadata.labels[2] || &attr->ct_attr.ct_labels_id);
meta->ct_metadata.labels[3]) { if (err)
ct_dbg("Failed to offload ct entry due to unsupported label");
return -EOPNOTSUPP; return -EOPNOTSUPP;
}
if (nat) { if (nat) {
err = mlx5_tc_ct_entry_create_nat(ct_priv, flow_rule, err = mlx5_tc_ct_entry_create_nat(ct_priv, flow_rule,
&mod_acts); &mod_acts);
...@@ -617,7 +619,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, ...@@ -617,7 +619,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
err = mlx5_tc_ct_entry_set_registers(ct_priv, &mod_acts, err = mlx5_tc_ct_entry_set_registers(ct_priv, &mod_acts,
ct_state, ct_state,
meta->ct_metadata.mark, meta->ct_metadata.mark,
meta->ct_metadata.labels[0], attr->ct_attr.ct_labels_id,
zone_restore_id); zone_restore_id);
if (err) if (err)
goto err_mapping; goto err_mapping;
...@@ -637,6 +639,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, ...@@ -637,6 +639,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
err_mapping: err_mapping:
dealloc_mod_hdr_actions(&mod_acts); dealloc_mod_hdr_actions(&mod_acts);
mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
return err; return err;
} }
...@@ -959,6 +962,7 @@ int ...@@ -959,6 +962,7 @@ int
mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
struct flow_cls_offload *f, struct flow_cls_offload *f,
struct mlx5_ct_attr *ct_attr,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
...@@ -969,6 +973,7 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, ...@@ -969,6 +973,7 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
u16 ct_state_on, ct_state_off; u16 ct_state_on, ct_state_off;
u16 ct_state, ct_state_mask; u16 ct_state, ct_state_mask;
struct flow_match_ct match; struct flow_match_ct match;
u32 ct_labels[4];
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT)) if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT))
return 0; return 0;
...@@ -995,12 +1000,6 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, ...@@ -995,12 +1000,6 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (mask->ct_labels[1] || mask->ct_labels[2] || mask->ct_labels[3]) {
NL_SET_ERR_MSG_MOD(extack,
"only lower 32bits of ct_labels are supported for offload");
return -EOPNOTSUPP;
}
ct_state_on = ct_state & ct_state_mask; ct_state_on = ct_state & ct_state_mask;
ct_state_off = (ct_state & ct_state_mask) ^ ct_state_mask; ct_state_off = (ct_state & ct_state_mask) ^ ct_state_mask;
trk = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_TRACKED; trk = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_TRACKED;
...@@ -1029,10 +1028,17 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, ...@@ -1029,10 +1028,17 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
if (mask->ct_mark) if (mask->ct_mark)
mlx5e_tc_match_to_reg_match(spec, MARK_TO_REG, mlx5e_tc_match_to_reg_match(spec, MARK_TO_REG,
key->ct_mark, mask->ct_mark); key->ct_mark, mask->ct_mark);
if (mask->ct_labels[0]) if (mask->ct_labels[0] || mask->ct_labels[1] || mask->ct_labels[2] ||
mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG, mask->ct_labels[3]) {
key->ct_labels[0], ct_labels[0] = key->ct_labels[0] & mask->ct_labels[0];
mask->ct_labels[0]); ct_labels[1] = key->ct_labels[1] & mask->ct_labels[1];
ct_labels[2] = key->ct_labels[2] & mask->ct_labels[2];
ct_labels[3] = key->ct_labels[3] & mask->ct_labels[3];
if (mapping_add(ct_priv->labels_mapping, ct_labels, &ct_attr->ct_labels_id))
return -EOPNOTSUPP;
mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG, ct_attr->ct_labels_id,
MLX5_CT_LABELS_MASK);
}
return 0; return 0;
} }
...@@ -1398,7 +1404,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) ...@@ -1398,7 +1404,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
* + tuple + zone match + * + tuple + zone match +
* +--------------------+ * +--------------------+
* | set mark * | set mark
* | set label * | set labels_id
* | set established * | set established
* | set zone_restore * | set zone_restore
* | do nat (if needed) * | do nat (if needed)
...@@ -1789,7 +1795,13 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv) ...@@ -1789,7 +1795,13 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
ct_priv->zone_mapping = mapping_create(sizeof(u16), 0, true); ct_priv->zone_mapping = mapping_create(sizeof(u16), 0, true);
if (IS_ERR(ct_priv->zone_mapping)) { if (IS_ERR(ct_priv->zone_mapping)) {
err = PTR_ERR(ct_priv->zone_mapping); err = PTR_ERR(ct_priv->zone_mapping);
goto err_mapping; goto err_mapping_zone;
}
ct_priv->labels_mapping = mapping_create(sizeof(u32) * 4, 0, true);
if (IS_ERR(ct_priv->labels_mapping)) {
err = PTR_ERR(ct_priv->labels_mapping);
goto err_mapping_labels;
} }
ct_priv->esw = esw; ct_priv->esw = esw;
...@@ -1833,8 +1845,10 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv) ...@@ -1833,8 +1845,10 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
err_ct_nat_tbl: err_ct_nat_tbl:
mlx5_esw_chains_destroy_global_table(esw, ct_priv->ct); mlx5_esw_chains_destroy_global_table(esw, ct_priv->ct);
err_ct_tbl: err_ct_tbl:
mapping_destroy(ct_priv->labels_mapping);
err_mapping_labels:
mapping_destroy(ct_priv->zone_mapping); mapping_destroy(ct_priv->zone_mapping);
err_mapping: err_mapping_zone:
kfree(ct_priv); kfree(ct_priv);
err_alloc: err_alloc:
err_support: err_support:
...@@ -1854,6 +1868,7 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv) ...@@ -1854,6 +1868,7 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct_nat); mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct_nat);
mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct); mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct);
mapping_destroy(ct_priv->zone_mapping); mapping_destroy(ct_priv->zone_mapping);
mapping_destroy(ct_priv->labels_mapping);
rhashtable_destroy(&ct_priv->ct_tuples_ht); rhashtable_destroy(&ct_priv->ct_tuples_ht);
rhashtable_destroy(&ct_priv->ct_tuples_nat_ht); rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
......
...@@ -25,6 +25,7 @@ struct mlx5_ct_attr { ...@@ -25,6 +25,7 @@ struct mlx5_ct_attr {
u16 ct_action; u16 ct_action;
struct mlx5_ct_flow *ct_flow; struct mlx5_ct_flow *ct_flow;
struct nf_flowtable *nf_ft; struct nf_flowtable *nf_ft;
u32 ct_labels_id;
}; };
#define zone_to_reg_ct {\ #define zone_to_reg_ct {\
...@@ -90,6 +91,7 @@ int ...@@ -90,6 +91,7 @@ int
mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
struct flow_cls_offload *f, struct flow_cls_offload *f,
struct mlx5_ct_attr *ct_attr,
struct netlink_ext_ack *extack); struct netlink_ext_ack *extack);
int int
mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv, mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
...@@ -132,6 +134,7 @@ static inline int ...@@ -132,6 +134,7 @@ static inline int
mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
struct flow_cls_offload *f, struct flow_cls_offload *f,
struct mlx5_ct_attr *ct_attr,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct flow_rule *rule = flow_cls_offload_flow_rule(f); struct flow_rule *rule = flow_cls_offload_flow_rule(f);
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <net/xdp_sock_drv.h> #include <net/xdp_sock_drv.h>
#include "en/xdp.h" #include "en/xdp.h"
#include "en/params.h" #include "en/params.h"
#include <linux/indirect_call_wrapper.h>
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk)
{ {
...@@ -114,7 +115,8 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, ...@@ -114,7 +115,8 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
xdpi.page.di = *di; xdpi.page.di = *di;
} }
return sq->xmit_xdp_frame(sq, &xdptxd, &xdpi, 0); return INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, 0);
} }
/* returns true if packet was consumed by xdp */ /* returns true if packet was consumed by xdp */
...@@ -237,7 +239,7 @@ enum { ...@@ -237,7 +239,7 @@ enum {
MLX5E_XDP_CHECK_START_MPWQE = 2, MLX5E_XDP_CHECK_START_MPWQE = 2,
}; };
static int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq) INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq)
{ {
if (unlikely(!sq->mpwqe.wqe)) { if (unlikely(!sq->mpwqe.wqe)) {
const u16 stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); const u16 stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
...@@ -256,10 +258,9 @@ static int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq) ...@@ -256,10 +258,9 @@ static int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq)
return MLX5E_XDP_CHECK_OK; return MLX5E_XDP_CHECK_OK;
} }
static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, INDIRECT_CALLABLE_SCOPE bool
struct mlx5e_xdp_xmit_data *xdptxd, mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_xmit_data *xdptxd,
struct mlx5e_xdp_info *xdpi, struct mlx5e_xdp_info *xdpi, int check_result)
int check_result)
{ {
struct mlx5e_xdp_mpwqe *session = &sq->mpwqe; struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
struct mlx5e_xdpsq_stats *stats = sq->stats; struct mlx5e_xdpsq_stats *stats = sq->stats;
...@@ -293,7 +294,7 @@ static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, ...@@ -293,7 +294,7 @@ static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
return true; return true;
} }
static int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq) INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
{ {
if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) { if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
/* SQ is full, ring doorbell */ /* SQ is full, ring doorbell */
...@@ -305,10 +306,9 @@ static int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq) ...@@ -305,10 +306,9 @@ static int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
return MLX5E_XDP_CHECK_OK; return MLX5E_XDP_CHECK_OK;
} }
static bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, INDIRECT_CALLABLE_SCOPE bool
struct mlx5e_xdp_xmit_data *xdptxd, mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_xmit_data *xdptxd,
struct mlx5e_xdp_info *xdpi, struct mlx5e_xdp_info *xdpi, int check_result)
int check_result)
{ {
struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5_wq_cyc *wq = &sq->wq;
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
...@@ -506,6 +506,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, ...@@ -506,6 +506,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
struct xdp_frame *xdpf = frames[i]; struct xdp_frame *xdpf = frames[i];
struct mlx5e_xdp_xmit_data xdptxd; struct mlx5e_xdp_xmit_data xdptxd;
struct mlx5e_xdp_info xdpi; struct mlx5e_xdp_info xdpi;
bool ret;
xdptxd.data = xdpf->data; xdptxd.data = xdpf->data;
xdptxd.len = xdpf->len; xdptxd.len = xdpf->len;
...@@ -522,7 +523,9 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, ...@@ -522,7 +523,9 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
xdpi.frame.xdpf = xdpf; xdpi.frame.xdpf = xdpf;
xdpi.frame.dma_addr = xdptxd.dma_addr; xdpi.frame.dma_addr = xdptxd.dma_addr;
if (unlikely(!sq->xmit_xdp_frame(sq, &xdptxd, &xdpi, 0))) { ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, 0);
if (unlikely(!ret)) {
dma_unmap_single(sq->pdev, xdptxd.dma_addr, dma_unmap_single(sq->pdev, xdptxd.dma_addr,
xdptxd.len, DMA_TO_DEVICE); xdptxd.len, DMA_TO_DEVICE);
xdp_return_frame_rx_napi(xdpf); xdp_return_frame_rx_napi(xdpf);
......
...@@ -32,6 +32,8 @@ ...@@ -32,6 +32,8 @@
#ifndef __MLX5_EN_XDP_H__ #ifndef __MLX5_EN_XDP_H__
#define __MLX5_EN_XDP_H__ #define __MLX5_EN_XDP_H__
#include <linux/indirect_call_wrapper.h>
#include "en.h" #include "en.h"
#include "en/txrx.h" #include "en/txrx.h"
...@@ -70,6 +72,17 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq); ...@@ -70,6 +72,17 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq);
int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags); u32 flags);
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
struct mlx5e_xdp_xmit_data *xdptxd,
struct mlx5e_xdp_info *xdpi,
int check_result));
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
struct mlx5e_xdp_xmit_data *xdptxd,
struct mlx5e_xdp_info *xdpi,
int check_result));
INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq));
INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq));
static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv) static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
{ {
set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include "en/xdp.h" #include "en/xdp.h"
#include "en/params.h" #include "en/params.h"
#include <net/xdp_sock_drv.h> #include <net/xdp_sock_drv.h>
#include <linux/indirect_call_wrapper.h>
int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
{ {
...@@ -75,8 +76,12 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget) ...@@ -75,8 +76,12 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
xdpi.mode = MLX5E_XDP_XMIT_MODE_XSK; xdpi.mode = MLX5E_XDP_XMIT_MODE_XSK;
for (; budget; budget--) { for (; budget; budget--) {
int check_result = sq->xmit_xdp_frame_check(sq); int check_result = INDIRECT_CALL_2(sq->xmit_xdp_frame_check,
mlx5e_xmit_xdp_frame_check_mpwqe,
mlx5e_xmit_xdp_frame_check,
sq);
struct xdp_desc desc; struct xdp_desc desc;
bool ret;
if (unlikely(check_result < 0)) { if (unlikely(check_result < 0)) {
work_done = false; work_done = false;
...@@ -98,7 +103,9 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget) ...@@ -98,7 +103,9 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
xsk_buff_raw_dma_sync_for_device(umem, xdptxd.dma_addr, xdptxd.len); xsk_buff_raw_dma_sync_for_device(umem, xdptxd.dma_addr, xdptxd.len);
if (unlikely(!sq->xmit_xdp_frame(sq, &xdptxd, &xdpi, check_result))) { ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, check_result);
if (unlikely(!ret)) {
if (sq->mpwqe.wqe) if (sq->mpwqe.wqe)
mlx5e_xdp_mpwqe_complete(sq); mlx5e_xdp_mpwqe_complete(sq);
......
...@@ -148,16 +148,6 @@ static inline bool mlx5e_accel_tx_finish(struct mlx5e_priv *priv, ...@@ -148,16 +148,6 @@ static inline bool mlx5e_accel_tx_finish(struct mlx5e_priv *priv,
return true; return true;
} }
static inline int mlx5e_accel_sk_get_rxq(struct sock *sk)
{
int rxq = sk_rx_queue_get(sk);
if (unlikely(rxq == -1))
rxq = 0;
return rxq;
}
static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv) static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv)
{ {
return mlx5e_ktls_init_rx(priv); return mlx5e_ktls_init_rx(priv);
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
#include "en.h" #include "en.h"
#include "en_accel/ipsec.h" #include "en_accel/ipsec.h"
#include "en_accel/ipsec_rxtx.h" #include "en_accel/ipsec_rxtx.h"
#include "en_accel/ipsec_fs.h"
static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x) static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
{ {
...@@ -111,7 +111,7 @@ static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry) ...@@ -111,7 +111,7 @@ static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry)
static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry) static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
{ {
struct xfrm_replay_state_esn *replay_esn; struct xfrm_replay_state_esn *replay_esn;
u32 seq_bottom; u32 seq_bottom = 0;
u8 overlap; u8 overlap;
u32 *esn; u32 *esn;
...@@ -121,7 +121,9 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry) ...@@ -121,7 +121,9 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
} }
replay_esn = sa_entry->x->replay_esn; replay_esn = sa_entry->x->replay_esn;
if (replay_esn->seq >= replay_esn->replay_window)
seq_bottom = replay_esn->seq - replay_esn->replay_window + 1; seq_bottom = replay_esn->seq - replay_esn->replay_window + 1;
overlap = sa_entry->esn_state.overlap; overlap = sa_entry->esn_state.overlap;
sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x, sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x,
...@@ -282,6 +284,27 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) ...@@ -282,6 +284,27 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
return 0; return 0;
} }
static int mlx5e_xfrm_fs_add_rule(struct mlx5e_priv *priv,
struct mlx5e_ipsec_sa_entry *sa_entry)
{
if (!mlx5_is_ipsec_device(priv->mdev))
return 0;
return mlx5e_accel_ipsec_fs_add_rule(priv, &sa_entry->xfrm->attrs,
sa_entry->ipsec_obj_id,
&sa_entry->ipsec_rule);
}
static void mlx5e_xfrm_fs_del_rule(struct mlx5e_priv *priv,
struct mlx5e_ipsec_sa_entry *sa_entry)
{
if (!mlx5_is_ipsec_device(priv->mdev))
return;
mlx5e_accel_ipsec_fs_del_rule(priv, &sa_entry->xfrm->attrs,
&sa_entry->ipsec_rule);
}
static int mlx5e_xfrm_add_state(struct xfrm_state *x) static int mlx5e_xfrm_add_state(struct xfrm_state *x)
{ {
struct mlx5e_ipsec_sa_entry *sa_entry = NULL; struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
...@@ -329,10 +352,15 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) ...@@ -329,10 +352,15 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
goto err_xfrm; goto err_xfrm;
} }
sa_entry->ipsec_obj_id = sa_handle;
err = mlx5e_xfrm_fs_add_rule(priv, sa_entry);
if (err)
goto err_hw_ctx;
if (x->xso.flags & XFRM_OFFLOAD_INBOUND) { if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
err = mlx5e_ipsec_sadb_rx_add(sa_entry, sa_handle); err = mlx5e_ipsec_sadb_rx_add(sa_entry, sa_handle);
if (err) if (err)
goto err_hw_ctx; goto err_add_rule;
} else { } else {
sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ? sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ?
mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv; mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
...@@ -341,8 +369,10 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) ...@@ -341,8 +369,10 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
x->xso.offload_handle = (unsigned long)sa_entry; x->xso.offload_handle = (unsigned long)sa_entry;
goto out; goto out;
err_add_rule:
mlx5e_xfrm_fs_del_rule(priv, sa_entry);
err_hw_ctx: err_hw_ctx:
mlx5_accel_esp_free_hw_context(sa_entry->hw_context); mlx5_accel_esp_free_hw_context(priv->mdev, sa_entry->hw_context);
err_xfrm: err_xfrm:
mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm); mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
err_sa_entry: err_sa_entry:
...@@ -366,13 +396,15 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x) ...@@ -366,13 +396,15 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
static void mlx5e_xfrm_free_state(struct xfrm_state *x) static void mlx5e_xfrm_free_state(struct xfrm_state *x)
{ {
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_priv *priv = netdev_priv(x->xso.dev);
if (!sa_entry) if (!sa_entry)
return; return;
if (sa_entry->hw_context) { if (sa_entry->hw_context) {
flush_workqueue(sa_entry->ipsec->wq); flush_workqueue(sa_entry->ipsec->wq);
mlx5_accel_esp_free_hw_context(sa_entry->hw_context); mlx5e_xfrm_fs_del_rule(priv, sa_entry);
mlx5_accel_esp_free_hw_context(sa_entry->xfrm->mdev, sa_entry->hw_context);
mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm); mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
} }
...@@ -405,6 +437,8 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv) ...@@ -405,6 +437,8 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv)
kfree(ipsec); kfree(ipsec);
return -ENOMEM; return -ENOMEM;
} }
mlx5e_accel_ipsec_fs_init(priv);
netdev_dbg(priv->netdev, "IPSec attached to netdevice\n"); netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
return 0; return 0;
} }
...@@ -416,6 +450,7 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv) ...@@ -416,6 +450,7 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
if (!ipsec) if (!ipsec)
return; return;
mlx5e_accel_ipsec_fs_cleanup(priv);
destroy_workqueue(ipsec->wq); destroy_workqueue(ipsec->wq);
ida_destroy(&ipsec->halloc); ida_destroy(&ipsec->halloc);
......
...@@ -75,6 +75,8 @@ struct mlx5e_ipsec_stats { ...@@ -75,6 +75,8 @@ struct mlx5e_ipsec_stats {
u64 ipsec_cmd_drop; u64 ipsec_cmd_drop;
}; };
struct mlx5e_accel_fs_esp;
struct mlx5e_ipsec { struct mlx5e_ipsec {
struct mlx5e_priv *en_priv; struct mlx5e_priv *en_priv;
DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS); DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS);
...@@ -84,6 +86,7 @@ struct mlx5e_ipsec { ...@@ -84,6 +86,7 @@ struct mlx5e_ipsec {
struct mlx5e_ipsec_sw_stats sw_stats; struct mlx5e_ipsec_sw_stats sw_stats;
struct mlx5e_ipsec_stats stats; struct mlx5e_ipsec_stats stats;
struct workqueue_struct *wq; struct workqueue_struct *wq;
struct mlx5e_accel_fs_esp *rx_fs;
}; };
struct mlx5e_ipsec_esn_state { struct mlx5e_ipsec_esn_state {
...@@ -92,6 +95,11 @@ struct mlx5e_ipsec_esn_state { ...@@ -92,6 +95,11 @@ struct mlx5e_ipsec_esn_state {
u8 overlap: 1; u8 overlap: 1;
}; };
struct mlx5e_ipsec_rule {
struct mlx5_flow_handle *rule;
struct mlx5_modify_hdr *set_modify_hdr;
};
struct mlx5e_ipsec_sa_entry { struct mlx5e_ipsec_sa_entry {
struct hlist_node hlist; /* Item in SADB_RX hashtable */ struct hlist_node hlist; /* Item in SADB_RX hashtable */
struct mlx5e_ipsec_esn_state esn_state; struct mlx5e_ipsec_esn_state esn_state;
...@@ -102,6 +110,8 @@ struct mlx5e_ipsec_sa_entry { ...@@ -102,6 +110,8 @@ struct mlx5e_ipsec_sa_entry {
void *hw_context; void *hw_context;
void (*set_iv_op)(struct sk_buff *skb, struct xfrm_state *x, void (*set_iv_op)(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo); struct xfrm_offload *xo);
u32 ipsec_obj_id;
struct mlx5e_ipsec_rule ipsec_rule;
}; };
void mlx5e_ipsec_build_inverse_table(void); void mlx5e_ipsec_build_inverse_table(void);
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#include <linux/netdevice.h>
#include "accel/ipsec_offload.h"
#include "ipsec_fs.h"
#include "fs_core.h"
#define NUM_IPSEC_FTE BIT(15)
enum accel_fs_esp_type {
ACCEL_FS_ESP4,
ACCEL_FS_ESP6,
ACCEL_FS_ESP_NUM_TYPES,
};
struct mlx5e_ipsec_rx_err {
struct mlx5_flow_table *ft;
struct mlx5_flow_handle *rule;
struct mlx5_modify_hdr *copy_modify_hdr;
};
struct mlx5e_accel_fs_esp_prot {
struct mlx5_flow_table *ft;
struct mlx5_flow_group *miss_group;
struct mlx5_flow_handle *miss_rule;
struct mlx5_flow_destination default_dest;
struct mlx5e_ipsec_rx_err rx_err;
u32 refcnt;
struct mutex prot_mutex; /* protect ESP4/ESP6 protocol */
};
struct mlx5e_accel_fs_esp {
struct mlx5e_accel_fs_esp_prot fs_prot[ACCEL_FS_ESP_NUM_TYPES];
};
/* IPsec RX flow steering */
static enum mlx5e_traffic_types fs_esp2tt(enum accel_fs_esp_type i)
{
if (i == ACCEL_FS_ESP4)
return MLX5E_TT_IPV4_IPSEC_ESP;
return MLX5E_TT_IPV6_IPSEC_ESP;
}
static int rx_err_add_rule(struct mlx5e_priv *priv,
struct mlx5e_accel_fs_esp_prot *fs_prot,
struct mlx5e_ipsec_rx_err *rx_err)
{
u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_flow_act flow_act = {};
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_flow_handle *fte;
struct mlx5_flow_spec *spec;
int err = 0;
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
/* Action to copy 7 bit ipsec_syndrome to regB[0:6] */
MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME);
MLX5_SET(copy_action_in, action, src_offset, 0);
MLX5_SET(copy_action_in, action, length, 7);
MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
MLX5_SET(copy_action_in, action, dst_offset, 0);
modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
1, action);
if (IS_ERR(modify_hdr)) {
err = PTR_ERR(modify_hdr);
netdev_err(priv->netdev,
"fail to alloc ipsec copy modify_header_id err=%d\n", err);
goto out_spec;
}
/* create fte */
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_act.modify_hdr = modify_hdr;
fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act,
&fs_prot->default_dest, 1);
if (IS_ERR(fte)) {
err = PTR_ERR(fte);
netdev_err(priv->netdev, "fail to add ipsec rx err copy rule err=%d\n", err);
goto out;
}
rx_err->rule = fte;
rx_err->copy_modify_hdr = modify_hdr;
out:
if (err)
mlx5_modify_header_dealloc(mdev, modify_hdr);
out_spec:
kfree(spec);
return err;
}
static void rx_err_del_rule(struct mlx5e_priv *priv,
struct mlx5e_ipsec_rx_err *rx_err)
{
if (rx_err->rule) {
mlx5_del_flow_rules(rx_err->rule);
rx_err->rule = NULL;
}
if (rx_err->copy_modify_hdr) {
mlx5_modify_header_dealloc(priv->mdev, rx_err->copy_modify_hdr);
rx_err->copy_modify_hdr = NULL;
}
}
static void rx_err_destroy_ft(struct mlx5e_priv *priv, struct mlx5e_ipsec_rx_err *rx_err)
{
rx_err_del_rule(priv, rx_err);
if (rx_err->ft) {
mlx5_destroy_flow_table(rx_err->ft);
rx_err->ft = NULL;
}
}
static int rx_err_create_ft(struct mlx5e_priv *priv,
struct mlx5e_accel_fs_esp_prot *fs_prot,
struct mlx5e_ipsec_rx_err *rx_err)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_table *ft;
int err;
ft_attr.max_fte = 1;
ft_attr.autogroup.max_num_groups = 1;
ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
netdev_err(priv->netdev, "fail to create ipsec rx inline ft err=%d\n", err);
return err;
}
rx_err->ft = ft;
err = rx_err_add_rule(priv, fs_prot, rx_err);
if (err)
goto out_err;
return 0;
out_err:
mlx5_destroy_flow_table(ft);
rx_err->ft = NULL;
return err;
}
static void rx_fs_destroy(struct mlx5e_accel_fs_esp_prot *fs_prot)
{
if (fs_prot->miss_rule) {
mlx5_del_flow_rules(fs_prot->miss_rule);
fs_prot->miss_rule = NULL;
}
if (fs_prot->miss_group) {
mlx5_destroy_flow_group(fs_prot->miss_group);
fs_prot->miss_group = NULL;
}
if (fs_prot->ft) {
mlx5_destroy_flow_table(fs_prot->ft);
fs_prot->ft = NULL;
}
}
static int rx_fs_create(struct mlx5e_priv *priv,
struct mlx5e_accel_fs_esp_prot *fs_prot)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_group *miss_group;
struct mlx5_flow_handle *miss_rule;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec;
struct mlx5_flow_table *ft;
u32 *flow_group_in;
int err = 0;
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!flow_group_in || !spec) {
err = -ENOMEM;
goto out;
}
/* Create FT */
ft_attr.max_fte = NUM_IPSEC_FTE;
ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
ft_attr.autogroup.num_reserved_entries = 1;
ft_attr.autogroup.max_num_groups = 1;
ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
netdev_err(priv->netdev, "fail to create ipsec rx ft err=%d\n", err);
goto out;
}
fs_prot->ft = ft;
/* Create miss_group */
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
miss_group = mlx5_create_flow_group(ft, flow_group_in);
if (IS_ERR(miss_group)) {
err = PTR_ERR(miss_group);
netdev_err(priv->netdev, "fail to create ipsec rx miss_group err=%d\n", err);
goto out;
}
fs_prot->miss_group = miss_group;
/* Create miss rule */
miss_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &fs_prot->default_dest, 1);
if (IS_ERR(miss_rule)) {
err = PTR_ERR(miss_rule);
netdev_err(priv->netdev, "fail to create ipsec rx miss_rule err=%d\n", err);
goto out;
}
fs_prot->miss_rule = miss_rule;
out:
kfree(flow_group_in);
kfree(spec);
return err;
}
static int rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
{
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
accel_esp = priv->ipsec->rx_fs;
/* The netdev unreg already happened, so all offloaded rule are already removed */
fs_prot = &accel_esp->fs_prot[type];
rx_fs_destroy(fs_prot);
rx_err_destroy_ft(priv, &fs_prot->rx_err);
return 0;
}
static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
{
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
int err;
accel_esp = priv->ipsec->rx_fs;
fs_prot = &accel_esp->fs_prot[type];
fs_prot->default_dest = mlx5e_ttc_get_default_dest(priv, fs_esp2tt(type));
err = rx_err_create_ft(priv, fs_prot, &fs_prot->rx_err);
if (err)
return err;
err = rx_fs_create(priv, fs_prot);
if (err)
rx_destroy(priv, type);
return err;
}
static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
{
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5_flow_destination dest = {};
struct mlx5e_accel_fs_esp *accel_esp;
int err = 0;
accel_esp = priv->ipsec->rx_fs;
fs_prot = &accel_esp->fs_prot[type];
mutex_lock(&fs_prot->prot_mutex);
if (fs_prot->refcnt++)
goto out;
/* create FT */
err = rx_create(priv, type);
if (err) {
fs_prot->refcnt--;
goto out;
}
/* connect */
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = fs_prot->ft;
mlx5e_ttc_fwd_dest(priv, fs_esp2tt(type), &dest);
out:
mutex_unlock(&fs_prot->prot_mutex);
return err;
}
static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
{
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
accel_esp = priv->ipsec->rx_fs;
fs_prot = &accel_esp->fs_prot[type];
mutex_lock(&fs_prot->prot_mutex);
if (--fs_prot->refcnt)
goto out;
/* disconnect */
mlx5e_ttc_fwd_default_dest(priv, fs_esp2tt(type));
/* remove FT */
rx_destroy(priv, type);
out:
mutex_unlock(&fs_prot->prot_mutex);
}
static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 ipsec_obj_id,
struct mlx5_flow_spec *spec,
struct mlx5_flow_act *flow_act)
{
u8 ip_version = attrs->is_ipv6 ? 6 : 4;
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS;
/* ip_version */
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ip_version);
/* Non fragmented */
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
/* ESP header */
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
/* SPI number */
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi,
be32_to_cpu(attrs->spi));
if (ip_version == 4) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
&attrs->saddr.a4, 4);
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&attrs->daddr.a4, 4);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
} else {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
&attrs->saddr.a6, 16);
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
&attrs->daddr.a6, 16);
memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
0xff, 16);
memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
0xff, 16);
}
flow_act->ipsec_obj_id = ipsec_obj_id;
flow_act->flags |= FLOW_ACT_NO_APPEND;
}
static int rx_add_rule(struct mlx5e_priv *priv,
struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 ipsec_obj_id,
struct mlx5e_ipsec_rule *ipsec_rule)
{
u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5_modify_hdr *modify_hdr = NULL;
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5_flow_destination dest = {};
struct mlx5e_accel_fs_esp *accel_esp;
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
enum accel_fs_esp_type type;
struct mlx5_flow_spec *spec;
int err = 0;
accel_esp = priv->ipsec->rx_fs;
type = attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4;
fs_prot = &accel_esp->fs_prot[type];
err = rx_ft_get(priv, type);
if (err)
return err;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
err = -ENOMEM;
goto out_err;
}
setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act);
/* Set 1 bit ipsec marker */
/* Set 24 bit ipsec_obj_id */
MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
MLX5_SET(set_action_in, action, data, (ipsec_obj_id << 1) | 0x1);
MLX5_SET(set_action_in, action, offset, 7);
MLX5_SET(set_action_in, action, length, 25);
modify_hdr = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL,
1, action);
if (IS_ERR(modify_hdr)) {
err = PTR_ERR(modify_hdr);
netdev_err(priv->netdev,
"fail to alloc ipsec set modify_header_id err=%d\n", err);
modify_hdr = NULL;
goto out_err;
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_IPSEC_DECRYPT |
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
flow_act.modify_hdr = modify_hdr;
dest.ft = fs_prot->rx_err.ft;
rule = mlx5_add_flow_rules(fs_prot->ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n",
attrs->action, err);
goto out_err;
}
ipsec_rule->rule = rule;
ipsec_rule->set_modify_hdr = modify_hdr;
goto out;
out_err:
if (modify_hdr)
mlx5_modify_header_dealloc(priv->mdev, modify_hdr);
rx_ft_put(priv, type);
out:
kvfree(spec);
return err;
}
static void rx_del_rule(struct mlx5e_priv *priv,
struct mlx5_accel_esp_xfrm_attrs *attrs,
struct mlx5e_ipsec_rule *ipsec_rule)
{
mlx5_del_flow_rules(ipsec_rule->rule);
ipsec_rule->rule = NULL;
mlx5_modify_header_dealloc(priv->mdev, ipsec_rule->set_modify_hdr);
ipsec_rule->set_modify_hdr = NULL;
rx_ft_put(priv, attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4);
}
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 ipsec_obj_id,
struct mlx5e_ipsec_rule *ipsec_rule)
{
if (!priv->ipsec->rx_fs || attrs->action != MLX5_ACCEL_ESP_ACTION_DECRYPT)
return -EOPNOTSUPP;
return rx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule);
}
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
struct mlx5_accel_esp_xfrm_attrs *attrs,
struct mlx5e_ipsec_rule *ipsec_rule)
{
if (!priv->ipsec->rx_fs)
return;
rx_del_rule(priv, attrs, ipsec_rule);
}
static void fs_cleanup_rx(struct mlx5e_priv *priv)
{
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
enum accel_fs_esp_type i;
accel_esp = priv->ipsec->rx_fs;
for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) {
fs_prot = &accel_esp->fs_prot[i];
mutex_destroy(&fs_prot->prot_mutex);
WARN_ON(fs_prot->refcnt);
}
kfree(priv->ipsec->rx_fs);
priv->ipsec->rx_fs = NULL;
}
static int fs_init_rx(struct mlx5e_priv *priv)
{
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
enum accel_fs_esp_type i;
priv->ipsec->rx_fs =
kzalloc(sizeof(struct mlx5e_accel_fs_esp), GFP_KERNEL);
if (!priv->ipsec->rx_fs)
return -ENOMEM;
accel_esp = priv->ipsec->rx_fs;
for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) {
fs_prot = &accel_esp->fs_prot[i];
mutex_init(&fs_prot->prot_mutex);
}
return 0;
}
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv)
{
if (!priv->ipsec->rx_fs)
return;
fs_cleanup_rx(priv);
}
int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv)
{
if (!mlx5_is_ipsec_device(priv->mdev) || !priv->ipsec)
return -EOPNOTSUPP;
return fs_init_rx(priv);
}
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#ifndef __MLX5_IPSEC_STEERING_H__
#define __MLX5_IPSEC_STEERING_H__
#include "en.h"
#include "ipsec.h"
#include "accel/ipsec_offload.h"
#include "en/fs.h"
#ifdef CONFIG_MLX5_EN_IPSEC
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv);
int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv);
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 ipsec_obj_id,
struct mlx5e_ipsec_rule *ipsec_rule);
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
struct mlx5_accel_esp_xfrm_attrs *attrs,
struct mlx5e_ipsec_rule *ipsec_rule);
#else
static inline void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv) {}
static inline int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv) { return 0; }
#endif
#endif /* __MLX5_IPSEC_STEERING_H__ */
...@@ -360,6 +360,62 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, ...@@ -360,6 +360,62 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
return skb; return skb;
} }
enum {
MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED,
MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER,
};
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb,
struct mlx5_cqe64 *cqe)
{
u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
u8 ipsec_syndrome = ipsec_meta_data & 0xFF;
struct mlx5e_priv *priv;
struct xfrm_offload *xo;
struct xfrm_state *xs;
struct sec_path *sp;
u32 sa_handle;
sa_handle = MLX5_IPSEC_METADATA_HANDLE(ipsec_meta_data);
priv = netdev_priv(netdev);
sp = secpath_set(skb);
if (unlikely(!sp)) {
atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
return;
}
xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
if (unlikely(!xs)) {
atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
return;
}
sp = skb_sec_path(skb);
sp->xvec[sp->len++] = xs;
sp->olen++;
xo = xfrm_offload(skb);
xo->flags = CRYPTO_DONE;
switch (ipsec_syndrome & MLX5_IPSEC_METADATA_SYNDROM_MASK) {
case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
xo->status = CRYPTO_SUCCESS;
if (WARN_ON_ONCE(priv->ipsec->no_trailer))
xo->flags |= XFRM_ESP_NO_TRAILER;
break;
case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED:
xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
break;
case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER:
xo->status = CRYPTO_INVALID_PACKET_SYNTAX;
break;
default:
atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
}
}
bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev, bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
netdev_features_t features) netdev_features_t features)
{ {
......
...@@ -34,13 +34,17 @@ ...@@ -34,13 +34,17 @@
#ifndef __MLX5E_IPSEC_RXTX_H__ #ifndef __MLX5E_IPSEC_RXTX_H__
#define __MLX5E_IPSEC_RXTX_H__ #define __MLX5E_IPSEC_RXTX_H__
#ifdef CONFIG_MLX5_EN_IPSEC
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <net/xfrm.h> #include <net/xfrm.h>
#include "en.h" #include "en.h"
#include "en/txrx.h" #include "en/txrx.h"
#define MLX5_IPSEC_METADATA_MARKER_MASK (0x80)
#define MLX5_IPSEC_METADATA_SYNDROM_MASK (0x7F)
#define MLX5_IPSEC_METADATA_HANDLE(metadata) (((metadata) >> 8) & 0xFF)
#ifdef CONFIG_MLX5_EN_IPSEC
struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb, u32 *cqe_bcnt); struct sk_buff *skb, u32 *cqe_bcnt);
void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
...@@ -55,7 +59,21 @@ void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x, ...@@ -55,7 +59,21 @@ void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv, bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv,
struct mlx5_wqe_eth_seg *eseg, struct mlx5_wqe_eth_seg *eseg,
struct sk_buff *skb); struct sk_buff *skb);
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb,
struct mlx5_cqe64 *cqe);
static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe)
{
return !!(MLX5_IPSEC_METADATA_MARKER_MASK & be32_to_cpu(cqe->ft_metadata));
}
#else
static inline
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb,
struct mlx5_cqe64 *cqe)
{}
static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; }
#endif /* CONFIG_MLX5_EN_IPSEC */ #endif /* CONFIG_MLX5_EN_IPSEC */
#endif /* __MLX5E_IPSEC_RXTX_H__ */ #endif /* __MLX5E_IPSEC_RXTX_H__ */
...@@ -547,6 +547,16 @@ void mlx5e_ktls_handle_ctx_completion(struct mlx5e_icosq_wqe_info *wi) ...@@ -547,6 +547,16 @@ void mlx5e_ktls_handle_ctx_completion(struct mlx5e_icosq_wqe_info *wi)
queue_work(rule->priv->tls->rx_wq, &rule->work); queue_work(rule->priv->tls->rx_wq, &rule->work);
} }
static int mlx5e_ktls_sk_get_rxq(struct sock *sk)
{
int rxq = sk_rx_queue_get(sk);
if (unlikely(rxq == -1))
rxq = 0;
return rxq;
}
int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
struct tls_crypto_info *crypto_info, struct tls_crypto_info *crypto_info,
u32 start_offload_tcp_sn) u32 start_offload_tcp_sn)
...@@ -573,7 +583,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, ...@@ -573,7 +583,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
priv_rx->crypto_info = priv_rx->crypto_info =
*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info; *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
rxq = mlx5e_accel_sk_get_rxq(sk); rxq = mlx5e_ktls_sk_get_rxq(sk);
priv_rx->rxq = rxq; priv_rx->rxq = rxq;
priv_rx->sk = sk; priv_rx->sk = sk;
......
...@@ -65,6 +65,7 @@ ...@@ -65,6 +65,7 @@
#include "en/hv_vhca_stats.h" #include "en/hv_vhca_stats.h"
#include "en/devlink.h" #include "en/devlink.h"
#include "lib/mlx5.h" #include "lib/mlx5.h"
#include "fpga/ipsec.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{ {
...@@ -231,7 +232,6 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, ...@@ -231,7 +232,6 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
ds_cnt); ds_cnt);
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
cseg->umr_mkey = rq->mkey_be; cseg->umr_mkey = rq->mkey_be;
ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE; ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
...@@ -496,7 +496,8 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, ...@@ -496,7 +496,8 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
#ifdef CONFIG_MLX5_EN_IPSEC #ifdef CONFIG_MLX5_EN_IPSEC
if (c->priv->ipsec) if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) &&
c->priv->ipsec)
rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe; rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
else else
#endif #endif
......
...@@ -1196,18 +1196,22 @@ static int register_devlink_port(struct mlx5_core_dev *dev, ...@@ -1196,18 +1196,22 @@ static int register_devlink_port(struct mlx5_core_dev *dev,
mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid); mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, rep->vport); dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, rep->vport);
pfnum = PCI_FUNC(dev->pdev->devfn); pfnum = PCI_FUNC(dev->pdev->devfn);
if (rep->vport == MLX5_VPORT_UPLINK) {
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = pfnum; attrs.phys.port_number = pfnum;
memcpy(attrs.switch_id.id, &ppid.id[0], ppid.id_len); memcpy(attrs.switch_id.id, &ppid.id[0], ppid.id_len);
attrs.switch_id.id_len = ppid.id_len; attrs.switch_id.id_len = ppid.id_len;
if (rep->vport == MLX5_VPORT_UPLINK)
devlink_port_attrs_set(&rpriv->dl_port, &attrs); devlink_port_attrs_set(&rpriv->dl_port, &attrs);
else if (rep->vport == MLX5_VPORT_PF) } else if (rep->vport == MLX5_VPORT_PF) {
memcpy(rpriv->dl_port.attrs.switch_id.id, &ppid.id[0], ppid.id_len);
rpriv->dl_port.attrs.switch_id.id_len = ppid.id_len;
devlink_port_attrs_pci_pf_set(&rpriv->dl_port, pfnum); devlink_port_attrs_pci_pf_set(&rpriv->dl_port, pfnum);
else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport)) } else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport)) {
memcpy(rpriv->dl_port.attrs.switch_id.id, &ppid.id[0], ppid.id_len);
rpriv->dl_port.attrs.switch_id.id_len = ppid.id_len;
devlink_port_attrs_pci_vf_set(&rpriv->dl_port, devlink_port_attrs_pci_vf_set(&rpriv->dl_port,
pfnum, rep->vport - 1); pfnum, rep->vport - 1);
}
return devlink_port_register(devlink, &rpriv->dl_port, dl_port_index); return devlink_port_register(devlink, &rpriv->dl_port, dl_port_index);
} }
......
...@@ -973,9 +973,14 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, ...@@ -973,9 +973,14 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
goto csum_unnecessary; goto csum_unnecessary;
if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) { if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP)) u8 ipproto = get_ip_proto(skb, network_depth, proto);
if (unlikely(ipproto == IPPROTO_SCTP))
goto csum_unnecessary; goto csum_unnecessary;
if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
goto csum_none;
stats->csum_complete++; stats->csum_complete++;
skb->ip_summed = CHECKSUM_COMPLETE; skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum); skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
...@@ -1021,6 +1026,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, ...@@ -1021,6 +1026,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
mlx5e_tls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt); mlx5e_tls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe);
if (lro_num_seg > 1) { if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
...@@ -1258,7 +1266,10 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -1258,7 +1266,10 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto free_wqe; goto free_wqe;
} }
skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt); skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
rq, cqe, wi, cqe_bcnt);
if (!skb) { if (!skb) {
/* probably for XDP */ /* probably for XDP */
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
......
...@@ -4401,7 +4401,8 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -4401,7 +4401,8 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
goto err_free; goto err_free;
/* actions validation depends on parsing the ct matches first */ /* actions validation depends on parsing the ct matches first */
err = mlx5_tc_ct_parse_match(priv, &parse_attr->spec, f, extack); err = mlx5_tc_ct_parse_match(priv, &parse_attr->spec, f,
&flow->esw_attr->ct_attr, extack);
if (err) if (err)
goto err_free; goto err_free;
......
...@@ -1652,7 +1652,17 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) ...@@ -1652,7 +1652,17 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
return 0; return 0;
mutex_lock(&esw->mode_lock); mutex_lock(&esw->mode_lock);
if (esw->mode == MLX5_ESWITCH_NONE) {
ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs); ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs);
} else {
enum mlx5_eswitch_vport_event vport_events;
vport_events = (esw->mode == MLX5_ESWITCH_LEGACY) ?
MLX5_LEGACY_SRIOV_VPORT_EVENTS : MLX5_VPORT_UC_ADDR_CHANGE;
ret = mlx5_eswitch_load_vf_vports(esw, num_vfs, vport_events);
if (!ret)
esw->esw_funcs.num_vfs = num_vfs;
}
mutex_unlock(&esw->mode_lock); mutex_unlock(&esw->mode_lock);
return ret; return ret;
} }
...@@ -1699,6 +1709,7 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) ...@@ -1699,6 +1709,7 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
mutex_lock(&esw->mode_lock); mutex_lock(&esw->mode_lock);
mlx5_eswitch_disable_locked(esw, clear_vf); mlx5_eswitch_disable_locked(esw, clear_vf);
esw->esw_funcs.num_vfs = 0;
mutex_unlock(&esw->mode_lock); mutex_unlock(&esw->mode_lock);
} }
......
...@@ -513,16 +513,9 @@ static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev) ...@@ -513,16 +513,9 @@ static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF; MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
} }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev)
{ {
/* Ideally device should have the functions changed supported return mlx5_core_is_ecpf_esw_manager(dev);
* capability regardless of it being ECPF or PF wherever such
* event should be processed such as on eswitch manager device.
* However, some ECPF based device might not have this capability
* set. Hence OR for ECPF check to cover such device.
*/
return MLX5_CAP_ESW(dev, esw_functions_changed) ||
mlx5_core_is_ecpf_esw_manager(dev);
} }
static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw) static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw)
......
...@@ -1578,13 +1578,6 @@ static int esw_offloads_start(struct mlx5_eswitch *esw, ...@@ -1578,13 +1578,6 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
{ {
int err, err1; int err, err1;
if (esw->mode != MLX5_ESWITCH_LEGACY &&
!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
NL_SET_ERR_MSG_MOD(extack,
"Can't set offloads mode, SRIOV legacy not enabled");
return -EINVAL;
}
mlx5_eswitch_disable_locked(esw, false); mlx5_eswitch_disable_locked(esw, false);
err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS, err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
esw->dev->priv.sriov.num_vfs); esw->dev->priv.sriov.num_vfs);
...@@ -2293,7 +2286,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, ...@@ -2293,7 +2286,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
{ {
u16 cur_mlx5_mode, mlx5_mode = 0; u16 cur_mlx5_mode, mlx5_mode = 0;
struct mlx5_eswitch *esw; struct mlx5_eswitch *esw;
int err; int err = 0;
esw = mlx5_devlink_eswitch_get(devlink); esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw)) if (IS_ERR(esw))
...@@ -2303,12 +2296,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, ...@@ -2303,12 +2296,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
return -EINVAL; return -EINVAL;
mutex_lock(&esw->mode_lock); mutex_lock(&esw->mode_lock);
err = eswitch_devlink_esw_mode_check(esw);
if (err)
goto unlock;
cur_mlx5_mode = esw->mode; cur_mlx5_mode = esw->mode;
if (cur_mlx5_mode == mlx5_mode) if (cur_mlx5_mode == mlx5_mode)
goto unlock; goto unlock;
......
...@@ -359,7 +359,7 @@ u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) ...@@ -359,7 +359,7 @@ u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
return ret; return ret;
} }
unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev) static unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
{ {
struct mlx5_fpga_device *fdev = mdev->fpga; struct mlx5_fpga_device *fdev = mdev->fpga;
...@@ -370,7 +370,7 @@ unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev) ...@@ -370,7 +370,7 @@ unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
number_of_ipsec_counters); number_of_ipsec_counters);
} }
int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, static int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
unsigned int counters_count) unsigned int counters_count)
{ {
struct mlx5_fpga_device *fdev = mdev->fpga; struct mlx5_fpga_device *fdev = mdev->fpga;
...@@ -665,12 +665,10 @@ static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev, ...@@ -665,12 +665,10 @@ static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev,
return true; return true;
} }
void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev, static void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *accel_xfrm, struct mlx5_accel_esp_xfrm *accel_xfrm,
const __be32 saddr[4], const __be32 saddr[4], const __be32 daddr[4],
const __be32 daddr[4], const __be32 spi, bool is_ipv6, u32 *sa_handle)
const __be32 spi, bool is_ipv6,
u32 *sa_handle)
{ {
struct mlx5_fpga_ipsec_sa_ctx *sa_ctx; struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
struct mlx5_fpga_esp_xfrm *fpga_xfrm = struct mlx5_fpga_esp_xfrm *fpga_xfrm =
...@@ -862,7 +860,7 @@ mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx) ...@@ -862,7 +860,7 @@ mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
mutex_unlock(&fipsec->sa_hash_lock); mutex_unlock(&fipsec->sa_hash_lock);
} }
void mlx5_fpga_ipsec_delete_sa_ctx(void *context) static void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
{ {
struct mlx5_fpga_esp_xfrm *fpga_xfrm = struct mlx5_fpga_esp_xfrm *fpga_xfrm =
((struct mlx5_fpga_ipsec_sa_ctx *)context)->fpga_xfrm; ((struct mlx5_fpga_ipsec_sa_ctx *)context)->fpga_xfrm;
...@@ -1264,7 +1262,7 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flo ...@@ -1264,7 +1262,7 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flo
} }
} }
int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev) static int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
{ {
struct mlx5_fpga_conn_attr init_attr = {0}; struct mlx5_fpga_conn_attr init_attr = {0};
struct mlx5_fpga_device *fdev = mdev->fpga; struct mlx5_fpga_device *fdev = mdev->fpga;
...@@ -1346,7 +1344,7 @@ static void destroy_rules_rb(struct rb_root *root) ...@@ -1346,7 +1344,7 @@ static void destroy_rules_rb(struct rb_root *root)
} }
} }
void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev) static void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
{ {
struct mlx5_fpga_device *fdev = mdev->fpga; struct mlx5_fpga_device *fdev = mdev->fpga;
...@@ -1451,7 +1449,7 @@ mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev, ...@@ -1451,7 +1449,7 @@ mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
return 0; return 0;
} }
struct mlx5_accel_esp_xfrm * static struct mlx5_accel_esp_xfrm *
mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev, mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs, const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags) u32 flags)
...@@ -1479,7 +1477,7 @@ mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev, ...@@ -1479,7 +1477,7 @@ mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
return &fpga_xfrm->accel_xfrm; return &fpga_xfrm->accel_xfrm;
} }
void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) static void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
{ {
struct mlx5_fpga_esp_xfrm *fpga_xfrm = struct mlx5_fpga_esp_xfrm *fpga_xfrm =
container_of(xfrm, struct mlx5_fpga_esp_xfrm, container_of(xfrm, struct mlx5_fpga_esp_xfrm,
...@@ -1488,7 +1486,7 @@ void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) ...@@ -1488,7 +1486,7 @@ void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
kfree(fpga_xfrm); kfree(fpga_xfrm);
} }
int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, static int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs) const struct mlx5_accel_esp_xfrm_attrs *attrs)
{ {
struct mlx5_core_dev *mdev = xfrm->mdev; struct mlx5_core_dev *mdev = xfrm->mdev;
...@@ -1560,3 +1558,24 @@ int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, ...@@ -1560,3 +1558,24 @@ int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
mutex_unlock(&fpga_xfrm->lock); mutex_unlock(&fpga_xfrm->lock);
return err; return err;
} }
static const struct mlx5_accel_ipsec_ops fpga_ipsec_ops = {
.device_caps = mlx5_fpga_ipsec_device_caps,
.counters_count = mlx5_fpga_ipsec_counters_count,
.counters_read = mlx5_fpga_ipsec_counters_read,
.create_hw_context = mlx5_fpga_ipsec_create_sa_ctx,
.free_hw_context = mlx5_fpga_ipsec_delete_sa_ctx,
.init = mlx5_fpga_ipsec_init,
.cleanup = mlx5_fpga_ipsec_cleanup,
.esp_create_xfrm = mlx5_fpga_esp_create_xfrm,
.esp_modify_xfrm = mlx5_fpga_esp_modify_xfrm,
.esp_destroy_xfrm = mlx5_fpga_esp_destroy_xfrm,
};
const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev)
{
if (!mlx5_fpga_is_ipsec_device(mdev))
return NULL;
return &fpga_ipsec_ops;
}
...@@ -38,44 +38,23 @@ ...@@ -38,44 +38,23 @@
#include "fs_cmd.h" #include "fs_cmd.h"
#ifdef CONFIG_MLX5_FPGA_IPSEC #ifdef CONFIG_MLX5_FPGA_IPSEC
const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev);
u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev); u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev);
int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
unsigned int counters_count);
void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *accel_xfrm,
const __be32 saddr[4],
const __be32 daddr[4],
const __be32 spi, bool is_ipv6,
u32 *sa_handle);
void mlx5_fpga_ipsec_delete_sa_ctx(void *context);
int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev);
void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev);
void mlx5_fpga_ipsec_build_fs_cmds(void);
struct mlx5_accel_esp_xfrm *
mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags);
void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm);
int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs);
const struct mlx5_flow_cmds * const struct mlx5_flow_cmds *
mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type); mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type);
void mlx5_fpga_ipsec_build_fs_cmds(void);
#else #else
static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) static inline
{ const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev)
return 0; { return NULL; }
} static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; }
static inline const struct mlx5_flow_cmds * static inline const struct mlx5_flow_cmds *
mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type) mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
{ {
return mlx5_fs_cmd_get_default(type); return mlx5_fs_cmd_get_default(type);
} }
static inline void mlx5_fpga_ipsec_build_fs_cmds(void) {};
#endif /* CONFIG_MLX5_FPGA_IPSEC */ #endif /* CONFIG_MLX5_FPGA_IPSEC */
#endif /* __MLX5_FPGA_IPSEC_H__ */ #endif /* __MLX5_FPGA_IPSEC_H__ */
...@@ -459,6 +459,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, ...@@ -459,6 +459,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, modify_header_id, MLX5_SET(flow_context, in_flow_context, modify_header_id,
fte->action.modify_hdr->id); fte->action.modify_hdr->id);
MLX5_SET(flow_context, in_flow_context, ipsec_obj_id, fte->action.ipsec_obj_id);
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan); vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype); MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
......
...@@ -105,7 +105,7 @@ ...@@ -105,7 +105,7 @@
#define ETHTOOL_PRIO_NUM_LEVELS 1 #define ETHTOOL_PRIO_NUM_LEVELS 1
#define ETHTOOL_NUM_PRIOS 11 #define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS) #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
/* Vlan, mac, ttc, inner ttc, {aRFS/accel} */ /* Vlan, mac, ttc, inner ttc, {aRFS/accel and esp/esp_err} */
#define KERNEL_NIC_PRIO_NUM_LEVELS 6 #define KERNEL_NIC_PRIO_NUM_LEVELS 6
#define KERNEL_NIC_NUM_PRIOS 1 #define KERNEL_NIC_NUM_PRIOS 1
/* One more level for tc */ /* One more level for tc */
......
...@@ -250,6 +250,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) ...@@ -250,6 +250,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err; return err;
} }
if (MLX5_CAP_GEN(dev, ipsec_offload)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_IPSEC);
if (err)
return err;
}
return 0; return 0;
} }
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
int mlx5_create_encryption_key(struct mlx5_core_dev *mdev, int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
void *key, u32 sz_bytes, void *key, u32 sz_bytes,
u32 *p_key_id) u32 key_type, u32 *p_key_id)
{ {
u32 in[MLX5_ST_SZ_DW(create_encryption_key_in)] = {}; u32 in[MLX5_ST_SZ_DW(create_encryption_key_in)] = {};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
...@@ -41,8 +41,7 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev, ...@@ -41,8 +41,7 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
memcpy(key_p, key, sz_bytes); memcpy(key_p, key, sz_bytes);
MLX5_SET(encryption_key_obj, obj, key_size, general_obj_key_size); MLX5_SET(encryption_key_obj, obj, key_size, general_obj_key_size);
MLX5_SET(encryption_key_obj, obj, key_type, MLX5_SET(encryption_key_obj, obj, key_type, key_type);
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS);
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
MLX5_CMD_OP_CREATE_GENERAL_OBJECT); MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
......
...@@ -80,8 +80,14 @@ void mlx5_get_pme_stats(struct mlx5_core_dev *dev, struct mlx5_pme_stats *stats) ...@@ -80,8 +80,14 @@ void mlx5_get_pme_stats(struct mlx5_core_dev *dev, struct mlx5_pme_stats *stats)
int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data); int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data);
/* Crypto */ /* Crypto */
enum {
MLX5_ACCEL_OBJ_TLS_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS,
MLX5_ACCEL_OBJ_IPSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC,
};
int mlx5_create_encryption_key(struct mlx5_core_dev *mdev, int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
void *key, u32 sz_bytes, u32 *p_key_id); void *key, u32 sz_bytes,
u32 key_type, u32 *p_key_id);
void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id); void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev) static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
......
...@@ -1089,11 +1089,7 @@ static int mlx5_load(struct mlx5_core_dev *dev) ...@@ -1089,11 +1089,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_fpga_start; goto err_fpga_start;
} }
err = mlx5_accel_ipsec_init(dev); mlx5_accel_ipsec_init(dev);
if (err) {
mlx5_core_err(dev, "IPSec device start failed %d\n", err);
goto err_ipsec_start;
}
err = mlx5_accel_tls_init(dev); err = mlx5_accel_tls_init(dev);
if (err) { if (err) {
...@@ -1135,7 +1131,6 @@ static int mlx5_load(struct mlx5_core_dev *dev) ...@@ -1135,7 +1131,6 @@ static int mlx5_load(struct mlx5_core_dev *dev)
mlx5_accel_tls_cleanup(dev); mlx5_accel_tls_cleanup(dev);
err_tls_start: err_tls_start:
mlx5_accel_ipsec_cleanup(dev); mlx5_accel_ipsec_cleanup(dev);
err_ipsec_start:
mlx5_fpga_device_stop(dev); mlx5_fpga_device_stop(dev);
err_fpga_start: err_fpga_start:
mlx5_rsc_dump_cleanup(dev); mlx5_rsc_dump_cleanup(dev);
...@@ -1628,7 +1623,7 @@ static int __init init(void) ...@@ -1628,7 +1623,7 @@ static int __init init(void)
get_random_bytes(&sw_owner_id, sizeof(sw_owner_id)); get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
mlx5_core_verify_params(); mlx5_core_verify_params();
mlx5_accel_ipsec_build_fs_cmds(); mlx5_fpga_ipsec_build_fs_cmds();
mlx5_register_debugfs(); mlx5_register_debugfs();
err = pci_register_driver(&mlx5_core_driver); err = pci_register_driver(&mlx5_core_driver);
......
...@@ -126,7 +126,7 @@ enum mlx5_accel_ipsec_cap { ...@@ -126,7 +126,7 @@ enum mlx5_accel_ipsec_cap {
MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN = 1 << 7, MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN = 1 << 7,
}; };
#ifdef CONFIG_MLX5_FPGA_IPSEC #ifdef CONFIG_MLX5_ACCEL
u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev); u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev);
...@@ -152,5 +152,5 @@ static inline int ...@@ -152,5 +152,5 @@ static inline int
mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs) { return -EOPNOTSUPP; } const struct mlx5_accel_esp_xfrm_attrs *attrs) { return -EOPNOTSUPP; }
#endif #endif /* CONFIG_MLX5_ACCEL */
#endif #endif /* __MLX5_ACCEL_H__ */
...@@ -707,6 +707,9 @@ struct mlx5_core_dev { ...@@ -707,6 +707,9 @@ struct mlx5_core_dev {
} roce; } roce;
#ifdef CONFIG_MLX5_FPGA #ifdef CONFIG_MLX5_FPGA
struct mlx5_fpga_device *fpga; struct mlx5_fpga_device *fpga;
#endif
#ifdef CONFIG_MLX5_ACCEL
const struct mlx5_accel_ipsec_ops *ipsec_ops;
#endif #endif
struct mlx5_clock clock; struct mlx5_clock clock;
struct mlx5_ib_clock_info *clock_info; struct mlx5_ib_clock_info *clock_info;
......
...@@ -207,7 +207,10 @@ struct mlx5_flow_act { ...@@ -207,7 +207,10 @@ struct mlx5_flow_act {
u32 action; u32 action;
struct mlx5_modify_hdr *modify_hdr; struct mlx5_modify_hdr *modify_hdr;
struct mlx5_pkt_reformat *pkt_reformat; struct mlx5_pkt_reformat *pkt_reformat;
union {
u32 ipsec_obj_id;
uintptr_t esp_id; uintptr_t esp_id;
};
u32 flags; u32 flags;
struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH]; struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH];
struct ib_counters *counters; struct ib_counters *counters;
......
...@@ -416,7 +416,11 @@ struct mlx5_ifc_flow_table_prop_layout_bits { ...@@ -416,7 +416,11 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 table_miss_action_domain[0x1]; u8 table_miss_action_domain[0x1];
u8 termination_table[0x1]; u8 termination_table[0x1];
u8 reformat_and_fwd_to_table[0x1]; u8 reformat_and_fwd_to_table[0x1];
u8 reserved_at_1a[0x6]; u8 reserved_at_1a[0x2];
u8 ipsec_encrypt[0x1];
u8 ipsec_decrypt[0x1];
u8 reserved_at_1e[0x2];
u8 termination_table_raw_traffic[0x1]; u8 termination_table_raw_traffic[0x1];
u8 reserved_at_21[0x1]; u8 reserved_at_21[0x1];
u8 log_max_ft_size[0x6]; u8 log_max_ft_size[0x6];
...@@ -2965,6 +2969,8 @@ enum { ...@@ -2965,6 +2969,8 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100, MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100,
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 = 0x400, MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 = 0x400,
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800, MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800,
MLX5_FLOW_CONTEXT_ACTION_IPSEC_DECRYPT = 0x1000,
MLX5_FLOW_CONTEXT_ACTION_IPSEC_ENCRYPT = 0x2000,
}; };
enum { enum {
...@@ -3006,7 +3012,8 @@ struct mlx5_ifc_flow_context_bits { ...@@ -3006,7 +3012,8 @@ struct mlx5_ifc_flow_context_bits {
struct mlx5_ifc_vlan_bits push_vlan_2; struct mlx5_ifc_vlan_bits push_vlan_2;
u8 reserved_at_120[0xe0]; u8 ipsec_obj_id[0x20];
u8 reserved_at_140[0xc0];
struct mlx5_ifc_fte_match_param_bits match_value; struct mlx5_ifc_fte_match_param_bits match_value;
...@@ -5752,6 +5759,7 @@ enum { ...@@ -5752,6 +5759,7 @@ enum {
MLX5_ACTION_IN_FIELD_METADATA_REG_C_7 = 0x58, MLX5_ACTION_IN_FIELD_METADATA_REG_C_7 = 0x58,
MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59, MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59,
MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B, MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B,
MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME = 0x5D,
}; };
struct mlx5_ifc_alloc_modify_header_context_out_bits { struct mlx5_ifc_alloc_modify_header_context_out_bits {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment