Commit 2fa33b35 authored by Leon Romanovsky's avatar Leon Romanovsky

net/mlx5_fpga: Drop INNOVA IPsec support

Mellanox INNOVA IPsec cards are EOL in Nov, 2019 [1]. As such, the code
is unmaintained, untested and not in-use by any upstream/distro oriented
customers. In order to reduce code complexity, drop the kernel code.

[1] https://network.nvidia.com/related-docs/eol/LCR-000535.pdf

Link: https://lore.kernel.org/r/2afe88ec5020a491079eacf6fe3c89b64d65195c.1649232994.git.leonro@nvidia.comReviewed-by: default avatarRaed Salem <raeds@nvidia.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
parent 0276bd3a
...@@ -22,7 +22,6 @@ config MLX5_ACCEL ...@@ -22,7 +22,6 @@ config MLX5_ACCEL
config MLX5_FPGA config MLX5_FPGA
bool "Mellanox Technologies Innova support" bool "Mellanox Technologies Innova support"
depends on MLX5_CORE depends on MLX5_CORE
select MLX5_ACCEL
help help
Build support for the Innova family of network cards by Mellanox Build support for the Innova family of network cards by Mellanox
Technologies. Innova network cards are comprised of a ConnectX chip Technologies. Innova network cards are comprised of a ConnectX chip
...@@ -143,17 +142,6 @@ config MLX5_CORE_IPOIB ...@@ -143,17 +142,6 @@ config MLX5_CORE_IPOIB
help help
MLX5 IPoIB offloads & acceleration support. MLX5 IPoIB offloads & acceleration support.
config MLX5_FPGA_IPSEC
bool "Mellanox Technologies IPsec Innova support"
depends on MLX5_CORE
depends on MLX5_FPGA
help
Build IPsec support for the Innova family of network cards by Mellanox
Technologies. Innova network cards are comprised of a ConnectX chip
and an FPGA chip on one board. If you select this option, the
mlx5_core driver will include the Innova FPGA core and allow building
sandbox-specific client drivers.
config MLX5_IPSEC config MLX5_IPSEC
bool "Mellanox Technologies IPsec Connect-X support" bool "Mellanox Technologies IPsec Connect-X support"
depends on MLX5_CORE_EN depends on MLX5_CORE_EN
...@@ -171,7 +159,7 @@ config MLX5_EN_IPSEC ...@@ -171,7 +159,7 @@ config MLX5_EN_IPSEC
depends on MLX5_CORE_EN depends on MLX5_CORE_EN
depends on XFRM_OFFLOAD depends on XFRM_OFFLOAD
depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
depends on MLX5_FPGA_IPSEC || MLX5_IPSEC depends on MLX5_IPSEC
help help
Build support for IPsec cryptography-offload acceleration in the NIC. Build support for IPsec cryptography-offload acceleration in the NIC.
Note: Support for hardware with this capability needs to be selected Note: Support for hardware with this capability needs to be selected
......
...@@ -89,7 +89,6 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib ...@@ -89,7 +89,6 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib
# Accelerations & FPGA # Accelerations & FPGA
# #
mlx5_core-$(CONFIG_MLX5_IPSEC) += accel/ipsec_offload.o mlx5_core-$(CONFIG_MLX5_IPSEC) += accel/ipsec_offload.o
mlx5_core-$(CONFIG_MLX5_FPGA_IPSEC) += fpga/ipsec.o
mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/ipsec.o mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/ipsec.o
mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o
......
...@@ -35,7 +35,6 @@ ...@@ -35,7 +35,6 @@
#include "accel/ipsec.h" #include "accel/ipsec.h"
#include "mlx5_core.h" #include "mlx5_core.h"
#include "fpga/ipsec.h"
#include "accel/ipsec_offload.h" #include "accel/ipsec_offload.h"
void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
...@@ -43,10 +42,7 @@ void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) ...@@ -43,10 +42,7 @@ void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
const struct mlx5_accel_ipsec_ops *ipsec_ops; const struct mlx5_accel_ipsec_ops *ipsec_ops;
int err = 0; int err = 0;
ipsec_ops = (mlx5_ipsec_offload_ops(mdev)) ? ipsec_ops = mlx5_ipsec_offload_ops(mdev);
mlx5_ipsec_offload_ops(mdev) :
mlx5_fpga_ipsec_ops(mdev);
if (!ipsec_ops || !ipsec_ops->init) { if (!ipsec_ops || !ipsec_ops->init) {
mlx5_core_dbg(mdev, "IPsec ops is not supported\n"); mlx5_core_dbg(mdev, "IPsec ops is not supported\n");
return; return;
......
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
#include "en/port.h" #include "en/port.h"
#include "en_accel/en_accel.h" #include "en_accel/en_accel.h"
#include "accel/ipsec.h" #include "accel/ipsec.h"
#include "fpga/ipsec.h"
static bool mlx5e_rx_is_xdp(struct mlx5e_params *params, static bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk) struct mlx5e_xsk_param *xsk)
...@@ -327,9 +326,6 @@ bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, ...@@ -327,9 +326,6 @@ bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
if (!mlx5e_check_fragmented_striding_rq_cap(mdev)) if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
return false; return false;
if (mlx5_fpga_is_ipsec_device(mdev))
return false;
if (params->xdp_prog) { if (params->xdp_prog) {
/* XSK params are not considered here. If striding RQ is in use, /* XSK params are not considered here. If striding RQ is in use,
* and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
...@@ -423,9 +419,6 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev, ...@@ -423,9 +419,6 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
int max_mtu; int max_mtu;
int i; int i;
if (mlx5_fpga_is_ipsec_device(mdev))
byte_count += MLX5E_METADATA_ETHER_LEN;
if (mlx5e_rx_is_linear_skb(params, xsk)) { if (mlx5e_rx_is_linear_skb(params, xsk)) {
int frag_stride; int frag_stride;
......
...@@ -38,7 +38,6 @@ ...@@ -38,7 +38,6 @@
#include "accel/ipsec.h" #include "accel/ipsec.h"
#include "fpga/sdk.h" #include "fpga/sdk.h"
#include "en_accel/ipsec.h" #include "en_accel/ipsec.h"
#include "fpga/ipsec.h"
static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = { static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_in_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_in_packets) },
...@@ -105,7 +104,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw) ...@@ -105,7 +104,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw) static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw)
{ {
return (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0; return 0;
} }
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw) static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw)
...@@ -121,25 +120,11 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw) ...@@ -121,25 +120,11 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_hw) static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_hw)
{ {
unsigned int i;
if (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev))
for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
mlx5e_ipsec_hw_stats_desc[i].format);
return idx; return idx;
} }
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw) static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw)
{ {
int i;
if (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev))
for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR64_CPU(&priv->ipsec->stats,
mlx5e_ipsec_hw_stats_desc,
i);
return idx; return idx;
} }
......
...@@ -67,7 +67,6 @@ ...@@ -67,7 +67,6 @@
#include "en/ptp.h" #include "en/ptp.h"
#include "qos.h" #include "qos.h"
#include "en/trap.h" #include "en/trap.h"
#include "fpga/ipsec.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{ {
...@@ -4467,12 +4466,6 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog) ...@@ -4467,12 +4466,6 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
return -EINVAL; return -EINVAL;
} }
if (mlx5_fpga_is_ipsec_device(priv->mdev)) {
netdev_warn(netdev,
"XDP is not available on Innova cards with IPsec support\n");
return -EINVAL;
}
new_params = priv->channels.params; new_params = priv->channels.params;
new_params.xdp_prog = prog; new_params.xdp_prog = prog;
......
...@@ -49,7 +49,6 @@ ...@@ -49,7 +49,6 @@
#include "en/rep/tc.h" #include "en/rep/tc.h"
#include "ipoib/ipoib.h" #include "ipoib/ipoib.h"
#include "accel/ipsec.h" #include "accel/ipsec.h"
#include "fpga/ipsec.h"
#include "en_accel/ipsec_rxtx.h" #include "en_accel/ipsec_rxtx.h"
#include "en_accel/ktls_txrx.h" #include "en_accel/ktls_txrx.h"
#include "en/xdp.h" #include "en/xdp.h"
...@@ -2384,46 +2383,6 @@ const struct mlx5e_rx_handlers mlx5i_rx_handlers = { ...@@ -2384,46 +2383,6 @@ const struct mlx5e_rx_handlers mlx5i_rx_handlers = {
}; };
#endif /* CONFIG_MLX5_CORE_IPOIB */ #endif /* CONFIG_MLX5_CORE_IPOIB */
#ifdef CONFIG_MLX5_EN_IPSEC
static void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
struct sk_buff *skb;
u32 cqe_bcnt;
u16 ci;
ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
wi = get_frag(rq, ci);
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
rq->stats->wqe_err++;
goto wq_free_wqe;
}
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
rq, cqe, wi, cqe_bcnt);
if (unlikely(!skb)) /* a DROP, save the page-reuse checks */
goto wq_free_wqe;
skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt);
if (unlikely(!skb))
goto wq_free_wqe;
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
napi_gro_receive(rq->cq.napi, skb);
wq_free_wqe:
mlx5e_free_rx_wqe(rq, wi, true);
mlx5_wq_cyc_pop(wq);
}
#endif /* CONFIG_MLX5_EN_IPSEC */
int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk) int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk)
{ {
struct net_device *netdev = rq->netdev; struct net_device *netdev = rq->netdev;
...@@ -2440,10 +2399,6 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool ...@@ -2440,10 +2399,6 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
rq->post_wqes = mlx5e_post_rx_mpwqes; rq->post_wqes = mlx5e_post_rx_mpwqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
if (mlx5_fpga_is_ipsec_device(mdev)) {
netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n");
return -EINVAL;
}
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) { if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo; rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo;
if (!rq->handle_rx_cqe) { if (!rq->handle_rx_cqe) {
...@@ -2467,14 +2422,7 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool ...@@ -2467,14 +2422,7 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
mlx5e_skb_from_cqe_nonlinear; mlx5e_skb_from_cqe_nonlinear;
rq->post_wqes = mlx5e_post_rx_wqes; rq->post_wqes = mlx5e_post_rx_wqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe;
#ifdef CONFIG_MLX5_EN_IPSEC
if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) &&
priv->ipsec)
rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
else
#endif
rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe;
if (!rq->handle_rx_cqe) { if (!rq->handle_rx_cqe) {
netdev_err(netdev, "RX handler of RQ is not set\n"); netdev_err(netdev, "RX handler of RQ is not set\n");
return -EINVAL; return -EINVAL;
......
...@@ -57,8 +57,6 @@ struct mlx5_fpga_device { ...@@ -57,8 +57,6 @@ struct mlx5_fpga_device {
u32 mkey; u32 mkey;
struct mlx5_uars_page *uar; struct mlx5_uars_page *uar;
} conn_res; } conn_res;
struct mlx5_fpga_ipsec *ipsec;
}; };
#define mlx5_fpga_dbg(__adev, format, ...) \ #define mlx5_fpga_dbg(__adev, format, ...) \
......
/*
* Copyright (c) 2017 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/rhashtable.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/fs_helpers.h>
#include <linux/mlx5/fs.h>
#include <linux/rbtree.h>
#include "mlx5_core.h"
#include "fs_cmd.h"
#include "fpga/ipsec.h"
#include "fpga/sdk.h"
#include "fpga/core.h"
enum mlx5_fpga_ipsec_cmd_status {
MLX5_FPGA_IPSEC_CMD_PENDING,
MLX5_FPGA_IPSEC_CMD_SEND_FAIL,
MLX5_FPGA_IPSEC_CMD_COMPLETE,
};
struct mlx5_fpga_ipsec_cmd_context {
struct mlx5_fpga_dma_buf buf;
enum mlx5_fpga_ipsec_cmd_status status;
struct mlx5_ifc_fpga_ipsec_cmd_resp resp;
int status_code;
struct completion complete;
struct mlx5_fpga_device *dev;
struct list_head list; /* Item in pending_cmds */
u8 command[];
};
struct mlx5_fpga_esp_xfrm;
struct mlx5_fpga_ipsec_sa_ctx {
struct rhash_head hash;
struct mlx5_ifc_fpga_ipsec_sa hw_sa;
u32 sa_handle;
struct mlx5_core_dev *dev;
struct mlx5_fpga_esp_xfrm *fpga_xfrm;
};
struct mlx5_fpga_esp_xfrm {
unsigned int num_rules;
struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
struct mutex lock; /* xfrm lock */
struct mlx5_accel_esp_xfrm accel_xfrm;
};
struct mlx5_fpga_ipsec_rule {
struct rb_node node;
struct fs_fte *fte;
struct mlx5_fpga_ipsec_sa_ctx *ctx;
};
static const struct rhashtable_params rhash_sa = {
/* Keep out "cmd" field from the key as it's
* value is not constant during the lifetime
* of the key object.
*/
.key_len = sizeof_field(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) -
sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
.key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) +
sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
.head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
.automatic_shrinking = true,
.min_size = 1,
};
struct mlx5_fpga_ipsec {
struct mlx5_fpga_device *fdev;
struct list_head pending_cmds;
spinlock_t pending_cmds_lock; /* Protects pending_cmds */
u32 caps[MLX5_ST_SZ_DW(ipsec_extended_cap)];
struct mlx5_fpga_conn *conn;
struct notifier_block fs_notifier_ingress_bypass;
struct notifier_block fs_notifier_egress;
/* Map hardware SA --> SA context
* (mlx5_fpga_ipsec_sa) (mlx5_fpga_ipsec_sa_ctx)
* We will use this hash to avoid SAs duplication in fpga which
* aren't allowed
*/
struct rhashtable sa_hash; /* hw_sa -> mlx5_fpga_ipsec_sa_ctx */
struct mutex sa_hash_lock;
/* Tree holding all rules for this fpga device
* Key for searching a rule (mlx5_fpga_ipsec_rule) is (ft, id)
*/
struct rb_root rules_rb;
struct mutex rules_rb_lock; /* rules lock */
struct ida halloc;
};
bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
{
if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
return false;
if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
return false;
if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC)
return false;
return true;
}
static void mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn *conn,
struct mlx5_fpga_device *fdev,
struct mlx5_fpga_dma_buf *buf,
u8 status)
{
struct mlx5_fpga_ipsec_cmd_context *context;
if (status) {
context = container_of(buf, struct mlx5_fpga_ipsec_cmd_context,
buf);
mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n",
status);
context->status = MLX5_FPGA_IPSEC_CMD_SEND_FAIL;
complete(&context->complete);
}
}
static inline
int syndrome_to_errno(enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome)
{
switch (syndrome) {
case MLX5_FPGA_IPSEC_RESPONSE_SUCCESS:
return 0;
case MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE:
return -EEXIST;
case MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST:
return -EINVAL;
case MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE:
return -EIO;
}
return -EIO;
}
static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
{
struct mlx5_ifc_fpga_ipsec_cmd_resp *resp = buf->sg[0].data;
struct mlx5_fpga_ipsec_cmd_context *context;
enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome;
struct mlx5_fpga_device *fdev = cb_arg;
unsigned long flags;
if (buf->sg[0].size < sizeof(*resp)) {
mlx5_fpga_warn(fdev, "Short receive from FPGA IPSec: %u < %zu bytes\n",
buf->sg[0].size, sizeof(*resp));
return;
}
mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x\n",
ntohl(resp->syndrome));
spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
context = list_first_entry_or_null(&fdev->ipsec->pending_cmds,
struct mlx5_fpga_ipsec_cmd_context,
list);
if (context)
list_del(&context->list);
spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
if (!context) {
mlx5_fpga_warn(fdev, "Received IPSec offload response without pending command request\n");
return;
}
mlx5_fpga_dbg(fdev, "Handling response for %p\n", context);
syndrome = ntohl(resp->syndrome);
context->status_code = syndrome_to_errno(syndrome);
context->status = MLX5_FPGA_IPSEC_CMD_COMPLETE;
memcpy(&context->resp, resp, sizeof(*resp));
if (context->status_code)
mlx5_fpga_warn(fdev, "IPSec command failed with syndrome %08x\n",
syndrome);
complete(&context->complete);
}
static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
const void *cmd, int cmd_size)
{
struct mlx5_fpga_ipsec_cmd_context *context;
struct mlx5_fpga_device *fdev = mdev->fpga;
unsigned long flags;
int res;
if (!fdev || !fdev->ipsec)
return ERR_PTR(-EOPNOTSUPP);
if (cmd_size & 3)
return ERR_PTR(-EINVAL);
context = kzalloc(sizeof(*context) + cmd_size, GFP_ATOMIC);
if (!context)
return ERR_PTR(-ENOMEM);
context->status = MLX5_FPGA_IPSEC_CMD_PENDING;
context->dev = fdev;
context->buf.complete = mlx5_fpga_ipsec_send_complete;
init_completion(&context->complete);
memcpy(&context->command, cmd, cmd_size);
context->buf.sg[0].size = cmd_size;
context->buf.sg[0].data = &context->command;
spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
if (!res)
list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
if (res) {
mlx5_fpga_warn(fdev, "Failed to send IPSec command: %d\n", res);
kfree(context);
return ERR_PTR(res);
}
/* Context should be freed by the caller after completion. */
return context;
}
static int mlx5_fpga_ipsec_cmd_wait(void *ctx)
{
struct mlx5_fpga_ipsec_cmd_context *context = ctx;
unsigned long timeout =
msecs_to_jiffies(MLX5_FPGA_CMD_TIMEOUT_MSEC);
int res;
res = wait_for_completion_timeout(&context->complete, timeout);
if (!res) {
mlx5_fpga_warn(context->dev, "Failure waiting for IPSec command response\n");
return -ETIMEDOUT;
}
if (context->status == MLX5_FPGA_IPSEC_CMD_COMPLETE)
res = context->status_code;
else
res = -EIO;
return res;
}
static inline bool is_v2_sadb_supported(struct mlx5_fpga_ipsec *fipsec)
{
if (MLX5_GET(ipsec_extended_cap, fipsec->caps, v2_command))
return true;
return false;
}
static int mlx5_fpga_ipsec_update_hw_sa(struct mlx5_fpga_device *fdev,
struct mlx5_ifc_fpga_ipsec_sa *hw_sa,
int opcode)
{
struct mlx5_core_dev *dev = fdev->mdev;
struct mlx5_ifc_fpga_ipsec_sa *sa;
struct mlx5_fpga_ipsec_cmd_context *cmd_context;
size_t sa_cmd_size;
int err;
hw_sa->ipsec_sa_v1.cmd = htonl(opcode);
if (is_v2_sadb_supported(fdev->ipsec))
sa_cmd_size = sizeof(*hw_sa);
else
sa_cmd_size = sizeof(hw_sa->ipsec_sa_v1);
cmd_context = (struct mlx5_fpga_ipsec_cmd_context *)
mlx5_fpga_ipsec_cmd_exec(dev, hw_sa, sa_cmd_size);
if (IS_ERR(cmd_context))
return PTR_ERR(cmd_context);
err = mlx5_fpga_ipsec_cmd_wait(cmd_context);
if (err)
goto out;
sa = (struct mlx5_ifc_fpga_ipsec_sa *)&cmd_context->command;
if (sa->ipsec_sa_v1.sw_sa_handle != cmd_context->resp.sw_sa_handle) {
mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n",
ntohl(sa->ipsec_sa_v1.sw_sa_handle),
ntohl(cmd_context->resp.sw_sa_handle));
err = -EIO;
}
out:
kfree(cmd_context);
return err;
}
u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
{
struct mlx5_fpga_device *fdev = mdev->fpga;
u32 ret = 0;
if (mlx5_fpga_is_ipsec_device(mdev)) {
ret |= MLX5_ACCEL_IPSEC_CAP_DEVICE;
ret |= MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA;
} else {
return ret;
}
if (!fdev->ipsec)
return ret;
if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esp))
ret |= MLX5_ACCEL_IPSEC_CAP_ESP;
if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, ipv6))
ret |= MLX5_ACCEL_IPSEC_CAP_IPV6;
if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, lso))
ret |= MLX5_ACCEL_IPSEC_CAP_LSO;
if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer))
ret |= MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER;
if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esn)) {
ret |= MLX5_ACCEL_IPSEC_CAP_ESN;
ret |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN;
}
return ret;
}
static unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
{
struct mlx5_fpga_device *fdev = mdev->fpga;
if (!fdev || !fdev->ipsec)
return 0;
return MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
number_of_ipsec_counters);
}
static int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
unsigned int counters_count)
{
struct mlx5_fpga_device *fdev = mdev->fpga;
unsigned int i;
__be32 *data;
u32 count;
u64 addr;
int ret;
if (!fdev || !fdev->ipsec)
return 0;
addr = (u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
ipsec_counters_addr_low) +
((u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
ipsec_counters_addr_high) << 32);
count = mlx5_fpga_ipsec_counters_count(mdev);
data = kzalloc(array3_size(sizeof(*data), count, 2), GFP_KERNEL);
if (!data) {
ret = -ENOMEM;
goto out;
}
ret = mlx5_fpga_mem_read(fdev, count * sizeof(u64), addr, data,
MLX5_FPGA_ACCESS_TYPE_DONTCARE);
if (ret < 0) {
mlx5_fpga_err(fdev, "Failed to read IPSec counters from HW: %d\n",
ret);
goto out;
}
ret = 0;
if (count > counters_count)
count = counters_count;
/* Each counter is low word, then high. But each word is big-endian */
for (i = 0; i < count; i++)
counters[i] = (u64)ntohl(data[i * 2]) |
((u64)ntohl(data[i * 2 + 1]) << 32);
out:
kfree(data);
return ret;
}
static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
{
struct mlx5_fpga_ipsec_cmd_context *context;
struct mlx5_ifc_fpga_ipsec_cmd_cap cmd = {0};
int err;
cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP);
cmd.flags = htonl(flags);
context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd));
if (IS_ERR(context))
return PTR_ERR(context);
err = mlx5_fpga_ipsec_cmd_wait(context);
if (err)
goto out;
if ((context->resp.flags & cmd.flags) != cmd.flags) {
mlx5_fpga_err(context->dev, "Failed to set capabilities. cmd 0x%08x vs resp 0x%08x\n",
cmd.flags,
context->resp.flags);
err = -EIO;
}
out:
kfree(context);
return err;
}
static int mlx5_fpga_ipsec_enable_supported_caps(struct mlx5_core_dev *mdev)
{
u32 dev_caps = mlx5_fpga_ipsec_device_caps(mdev);
u32 flags = 0;
if (dev_caps & MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER)
flags |= MLX5_FPGA_IPSEC_CAP_NO_TRAILER;
return mlx5_fpga_ipsec_set_caps(mdev, flags);
}
static void
mlx5_fpga_ipsec_build_hw_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
{
const struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm;
/* key */
memcpy(&hw_sa->ipsec_sa_v1.key_enc, aes_gcm->aes_key,
aes_gcm->key_len / 8);
/* Duplicate 128 bit key twice according to HW layout */
if (aes_gcm->key_len == 128)
memcpy(&hw_sa->ipsec_sa_v1.key_enc[16],
aes_gcm->aes_key, aes_gcm->key_len / 8);
/* salt and seq_iv */
memcpy(&hw_sa->ipsec_sa_v1.gcm.salt_iv, &aes_gcm->seq_iv,
sizeof(aes_gcm->seq_iv));
memcpy(&hw_sa->ipsec_sa_v1.gcm.salt, &aes_gcm->salt,
sizeof(aes_gcm->salt));
/* esn */
if (xfrm_attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_ESN_EN;
hw_sa->ipsec_sa_v1.flags |=
(xfrm_attrs->flags &
MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
hw_sa->esn = htonl(xfrm_attrs->esn);
} else {
hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_ESN_EN;
hw_sa->ipsec_sa_v1.flags &=
~(xfrm_attrs->flags &
MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
hw_sa->esn = 0;
}
/* rx handle */
hw_sa->ipsec_sa_v1.sw_sa_handle = htonl(xfrm_attrs->sa_handle);
/* enc mode */
switch (aes_gcm->key_len) {
case 128:
hw_sa->ipsec_sa_v1.enc_mode =
MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128;
break;
case 256:
hw_sa->ipsec_sa_v1.enc_mode =
MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128;
break;
}
/* flags */
hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_SA_VALID |
MLX5_FPGA_IPSEC_SA_SPI_EN |
MLX5_FPGA_IPSEC_SA_IP_ESP;
if (xfrm_attrs->action & MLX5_ACCEL_ESP_ACTION_ENCRYPT)
hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_DIR_SX;
else
hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_DIR_SX;
}
static void
mlx5_fpga_ipsec_build_hw_sa(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
const __be32 saddr[4],
const __be32 daddr[4],
const __be32 spi, bool is_ipv6,
struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
{
mlx5_fpga_ipsec_build_hw_xfrm(mdev, xfrm_attrs, hw_sa);
/* IPs */
memcpy(hw_sa->ipsec_sa_v1.sip, saddr, sizeof(hw_sa->ipsec_sa_v1.sip));
memcpy(hw_sa->ipsec_sa_v1.dip, daddr, sizeof(hw_sa->ipsec_sa_v1.dip));
/* SPI */
hw_sa->ipsec_sa_v1.spi = spi;
/* flags */
if (is_ipv6)
hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_IPV6;
}
static bool is_full_mask(const void *p, size_t len)
{
WARN_ON(len % 4);
return !memchr_inv(p, 0xff, len);
}
static bool validate_fpga_full_mask(struct mlx5_core_dev *dev,
const u32 *match_c,
const u32 *match_v)
{
const void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
match_c,
misc_parameters);
const void *headers_c = MLX5_ADDR_OF(fte_match_param,
match_c,
outer_headers);
const void *headers_v = MLX5_ADDR_OF(fte_match_param,
match_v,
outer_headers);
if (mlx5_fs_is_outer_ipv4_flow(dev, headers_c, headers_v)) {
const void *s_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
headers_c,
src_ipv4_src_ipv6.ipv4_layout.ipv4);
const void *d_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
headers_c,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
if (!is_full_mask(s_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
ipv4)) ||
!is_full_mask(d_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
ipv4)))
return false;
} else {
const void *s_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
headers_c,
src_ipv4_src_ipv6.ipv6_layout.ipv6);
const void *d_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
headers_c,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
if (!is_full_mask(s_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
ipv6)) ||
!is_full_mask(d_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
ipv6)))
return false;
}
if (!is_full_mask(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
outer_esp_spi),
MLX5_FLD_SZ_BYTES(fte_match_set_misc, outer_esp_spi)))
return false;
return true;
}
static bool mlx5_is_fpga_ipsec_rule(struct mlx5_core_dev *dev,
u8 match_criteria_enable,
const u32 *match_c,
const u32 *match_v)
{
u32 ipsec_dev_caps = mlx5_fpga_ipsec_device_caps(dev);
bool ipv6_flow;
ipv6_flow = mlx5_fs_is_outer_ipv6_flow(dev, match_c, match_v);
if (!(match_criteria_enable & MLX5_MATCH_OUTER_HEADERS) ||
mlx5_fs_is_outer_udp_flow(match_c, match_v) ||
mlx5_fs_is_outer_tcp_flow(match_c, match_v) ||
mlx5_fs_is_vxlan_flow(match_c) ||
!(mlx5_fs_is_outer_ipv4_flow(dev, match_c, match_v) ||
ipv6_flow))
return false;
if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_DEVICE))
return false;
if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_ESP) &&
mlx5_fs_is_outer_ipsec_flow(match_c))
return false;
if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_IPV6) &&
ipv6_flow)
return false;
if (!validate_fpga_full_mask(dev, match_c, match_v))
return false;
return true;
}
static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev,
u8 match_criteria_enable,
const u32 *match_c,
const u32 *match_v,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_context *flow_context)
{
const void *outer_c = MLX5_ADDR_OF(fte_match_param, match_c,
outer_headers);
bool is_dmac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_47_16) ||
MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_15_0);
bool is_smac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_47_16) ||
MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_15_0);
int ret;
ret = mlx5_is_fpga_ipsec_rule(dev, match_criteria_enable, match_c,
match_v);
if (!ret)
return ret;
if (is_dmac || is_smac ||
(match_criteria_enable &
~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) ||
(flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) ||
(flow_context->flags & FLOW_CONTEXT_HAS_TAG))
return false;
return true;
}
static void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *accel_xfrm,
const __be32 saddr[4], const __be32 daddr[4],
const __be32 spi, bool is_ipv6, u32 *sa_handle)
{
struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
struct mlx5_fpga_esp_xfrm *fpga_xfrm =
container_of(accel_xfrm, typeof(*fpga_xfrm),
accel_xfrm);
struct mlx5_fpga_device *fdev = mdev->fpga;
struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
int opcode, err;
void *context;
/* alloc SA */
sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL);
if (!sa_ctx)
return ERR_PTR(-ENOMEM);
sa_ctx->dev = mdev;
/* build candidate SA */
mlx5_fpga_ipsec_build_hw_sa(mdev, &accel_xfrm->attrs,
saddr, daddr, spi, is_ipv6,
&sa_ctx->hw_sa);
mutex_lock(&fpga_xfrm->lock);
if (fpga_xfrm->sa_ctx) { /* multiple rules for same accel_xfrm */
/* all rules must be with same IPs and SPI */
if (memcmp(&sa_ctx->hw_sa, &fpga_xfrm->sa_ctx->hw_sa,
sizeof(sa_ctx->hw_sa))) {
context = ERR_PTR(-EINVAL);
goto exists;
}
++fpga_xfrm->num_rules;
context = fpga_xfrm->sa_ctx;
goto exists;
}
if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT) {
err = ida_alloc_min(&fipsec->halloc, 1, GFP_KERNEL);
if (err < 0) {
context = ERR_PTR(err);
goto exists;
}
sa_ctx->sa_handle = err;
if (sa_handle)
*sa_handle = sa_ctx->sa_handle;
}
/* This is unbounded fpga_xfrm, try to add to hash */
mutex_lock(&fipsec->sa_hash_lock);
err = rhashtable_lookup_insert_fast(&fipsec->sa_hash, &sa_ctx->hash,
rhash_sa);
if (err) {
/* Can't bound different accel_xfrm to already existing sa_ctx.
* This is because we can't support multiple ketmats for
* same IPs and SPI
*/
context = ERR_PTR(-EEXIST);
goto unlock_hash;
}
/* Bound accel_xfrm to sa_ctx */
opcode = is_v2_sadb_supported(fdev->ipsec) ?
MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 :
MLX5_FPGA_IPSEC_CMD_OP_ADD_SA;
err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
if (err) {
context = ERR_PTR(err);
goto delete_hash;
}
mutex_unlock(&fipsec->sa_hash_lock);
++fpga_xfrm->num_rules;
fpga_xfrm->sa_ctx = sa_ctx;
sa_ctx->fpga_xfrm = fpga_xfrm;
mutex_unlock(&fpga_xfrm->lock);
return sa_ctx;
delete_hash:
WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
rhash_sa));
unlock_hash:
mutex_unlock(&fipsec->sa_hash_lock);
if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
ida_free(&fipsec->halloc, sa_ctx->sa_handle);
exists:
mutex_unlock(&fpga_xfrm->lock);
kfree(sa_ctx);
return context;
}
static void *
mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev *mdev,
struct fs_fte *fte,
bool is_egress)
{
struct mlx5_accel_esp_xfrm *accel_xfrm;
__be32 saddr[4], daddr[4], spi;
struct mlx5_flow_group *fg;
bool is_ipv6 = false;
fs_get_obj(fg, fte->node.parent);
/* validate */
if (is_egress &&
!mlx5_is_fpga_egress_ipsec_rule(mdev,
fg->mask.match_criteria_enable,
fg->mask.match_criteria,
fte->val,
&fte->action,
&fte->flow_context))
return ERR_PTR(-EINVAL);
else if (!mlx5_is_fpga_ipsec_rule(mdev,
fg->mask.match_criteria_enable,
fg->mask.match_criteria,
fte->val))
return ERR_PTR(-EINVAL);
/* get xfrm context */
accel_xfrm =
(struct mlx5_accel_esp_xfrm *)fte->action.esp_id;
/* IPs */
if (mlx5_fs_is_outer_ipv4_flow(mdev, fg->mask.match_criteria,
fte->val)) {
memcpy(&saddr[3],
MLX5_ADDR_OF(fte_match_set_lyr_2_4,
fte->val,
src_ipv4_src_ipv6.ipv4_layout.ipv4),
sizeof(saddr[3]));
memcpy(&daddr[3],
MLX5_ADDR_OF(fte_match_set_lyr_2_4,
fte->val,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
sizeof(daddr[3]));
} else {
memcpy(saddr,
MLX5_ADDR_OF(fte_match_param,
fte->val,
outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
sizeof(saddr));
memcpy(daddr,
MLX5_ADDR_OF(fte_match_param,
fte->val,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
sizeof(daddr));
is_ipv6 = true;
}
/* SPI */
spi = MLX5_GET_BE(typeof(spi),
fte_match_param, fte->val,
misc_parameters.outer_esp_spi);
/* create */
return mlx5_fpga_ipsec_create_sa_ctx(mdev, accel_xfrm,
saddr, daddr,
spi, is_ipv6, NULL);
}
static void
mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
{
struct mlx5_fpga_device *fdev = sa_ctx->dev->fpga;
struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
int opcode = is_v2_sadb_supported(fdev->ipsec) ?
MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 :
MLX5_FPGA_IPSEC_CMD_OP_DEL_SA;
int err;
err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
if (err) {
WARN_ON(err);
return;
}
if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action ==
MLX5_ACCEL_ESP_ACTION_DECRYPT)
ida_free(&fipsec->halloc, sa_ctx->sa_handle);
mutex_lock(&fipsec->sa_hash_lock);
WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
rhash_sa));
mutex_unlock(&fipsec->sa_hash_lock);
}
static void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
{
struct mlx5_fpga_esp_xfrm *fpga_xfrm =
((struct mlx5_fpga_ipsec_sa_ctx *)context)->fpga_xfrm;
mutex_lock(&fpga_xfrm->lock);
if (!--fpga_xfrm->num_rules) {
mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx);
kfree(fpga_xfrm->sa_ctx);
fpga_xfrm->sa_ctx = NULL;
}
mutex_unlock(&fpga_xfrm->lock);
}
static inline struct mlx5_fpga_ipsec_rule *
_rule_search(struct rb_root *root, struct fs_fte *fte)
{
struct rb_node *node = root->rb_node;
while (node) {
struct mlx5_fpga_ipsec_rule *rule =
container_of(node, struct mlx5_fpga_ipsec_rule,
node);
if (rule->fte < fte)
node = node->rb_left;
else if (rule->fte > fte)
node = node->rb_right;
else
return rule;
}
return NULL;
}
static struct mlx5_fpga_ipsec_rule *
rule_search(struct mlx5_fpga_ipsec *ipsec_dev, struct fs_fte *fte)
{
struct mlx5_fpga_ipsec_rule *rule;
mutex_lock(&ipsec_dev->rules_rb_lock);
rule = _rule_search(&ipsec_dev->rules_rb, fte);
mutex_unlock(&ipsec_dev->rules_rb_lock);
return rule;
}
static inline int _rule_insert(struct rb_root *root,
struct mlx5_fpga_ipsec_rule *rule)
{
struct rb_node **new = &root->rb_node, *parent = NULL;
/* Figure out where to put new node */
while (*new) {
struct mlx5_fpga_ipsec_rule *this =
container_of(*new, struct mlx5_fpga_ipsec_rule,
node);
parent = *new;
if (rule->fte < this->fte)
new = &((*new)->rb_left);
else if (rule->fte > this->fte)
new = &((*new)->rb_right);
else
return -EEXIST;
}
/* Add new node and rebalance tree. */
rb_link_node(&rule->node, parent, new);
rb_insert_color(&rule->node, root);
return 0;
}
static int rule_insert(struct mlx5_fpga_ipsec *ipsec_dev,
struct mlx5_fpga_ipsec_rule *rule)
{
int ret;
mutex_lock(&ipsec_dev->rules_rb_lock);
ret = _rule_insert(&ipsec_dev->rules_rb, rule);
mutex_unlock(&ipsec_dev->rules_rb_lock);
return ret;
}
static inline void _rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
struct mlx5_fpga_ipsec_rule *rule)
{
struct rb_root *root = &ipsec_dev->rules_rb;
mutex_lock(&ipsec_dev->rules_rb_lock);
rb_erase(&rule->node, root);
mutex_unlock(&ipsec_dev->rules_rb_lock);
}
static void rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
struct mlx5_fpga_ipsec_rule *rule)
{
_rule_delete(ipsec_dev, rule);
kfree(rule);
}
struct mailbox_mod {
uintptr_t saved_esp_id;
u32 saved_action;
u32 saved_outer_esp_spi_value;
};
static void restore_spec_mailbox(struct fs_fte *fte,
struct mailbox_mod *mbox_mod)
{
char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
fte->val,
misc_parameters);
MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
mbox_mod->saved_outer_esp_spi_value);
fte->action.action |= mbox_mod->saved_action;
fte->action.esp_id = (uintptr_t)mbox_mod->saved_esp_id;
}
static void modify_spec_mailbox(struct mlx5_core_dev *mdev,
struct fs_fte *fte,
struct mailbox_mod *mbox_mod)
{
char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
fte->val,
misc_parameters);
mbox_mod->saved_esp_id = fte->action.esp_id;
mbox_mod->saved_action = fte->action.action &
(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
mbox_mod->saved_outer_esp_spi_value =
MLX5_GET(fte_match_set_misc, misc_params_v,
outer_esp_spi);
fte->action.esp_id = 0;
fte->action.action &= ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
if (!MLX5_CAP_FLOWTABLE(mdev,
flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 0);
}
static enum fs_flow_table_type egress_to_fs_ft(bool egress)
{
return egress ? FS_FT_NIC_TX : FS_FT_NIC_RX;
}
static int fpga_ipsec_fs_create_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 *in,
struct mlx5_flow_group *fg,
bool is_egress)
{
int (*create_flow_group)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, u32 *in,
struct mlx5_flow_group *fg) =
mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_flow_group;
char *misc_params_c = MLX5_ADDR_OF(create_flow_group_in, in,
match_criteria.misc_parameters);
struct mlx5_core_dev *dev = ns->dev;
u32 saved_outer_esp_spi_mask;
u8 match_criteria_enable;
int ret;
if (MLX5_CAP_FLOWTABLE(dev,
flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
return create_flow_group(ns, ft, in, fg);
match_criteria_enable =
MLX5_GET(create_flow_group_in, in, match_criteria_enable);
saved_outer_esp_spi_mask =
MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
if (!match_criteria_enable || !saved_outer_esp_spi_mask)
return create_flow_group(ns, ft, in, fg);
MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 0);
if (!(*misc_params_c) &&
!memcmp(misc_params_c, misc_params_c + 1, MLX5_ST_SZ_BYTES(fte_match_set_misc) - 1))
MLX5_SET(create_flow_group_in, in, match_criteria_enable,
match_criteria_enable & ~MLX5_MATCH_MISC_PARAMETERS);
ret = create_flow_group(ns, ft, in, fg);
MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, saved_outer_esp_spi_mask);
MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable);
return ret;
}
static int fpga_ipsec_fs_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
struct fs_fte *fte,
bool is_egress)
{
int (*create_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
struct fs_fte *fte) =
mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_fte;
struct mlx5_core_dev *dev = ns->dev;
struct mlx5_fpga_device *fdev = dev->fpga;
struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
struct mlx5_fpga_ipsec_rule *rule;
bool is_esp = fte->action.esp_id;
struct mailbox_mod mbox_mod;
int ret;
if (!is_esp ||
!(fte->action.action &
(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
return create_fte(ns, ft, fg, fte);
rule = kzalloc(sizeof(*rule), GFP_KERNEL);
if (!rule)
return -ENOMEM;
rule->ctx = mlx5_fpga_ipsec_fs_create_sa_ctx(dev, fte, is_egress);
if (IS_ERR(rule->ctx)) {
int err = PTR_ERR(rule->ctx);
kfree(rule);
return err;
}
rule->fte = fte;
WARN_ON(rule_insert(fipsec, rule));
modify_spec_mailbox(dev, fte, &mbox_mod);
ret = create_fte(ns, ft, fg, fte);
restore_spec_mailbox(fte, &mbox_mod);
if (ret) {
_rule_delete(fipsec, rule);
mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
kfree(rule);
}
return ret;
}
static int fpga_ipsec_fs_update_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
int modify_mask,
struct fs_fte *fte,
bool is_egress)
{
int (*update_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
int modify_mask,
struct fs_fte *fte) =
mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->update_fte;
struct mlx5_core_dev *dev = ns->dev;
bool is_esp = fte->action.esp_id;
struct mailbox_mod mbox_mod;
int ret;
if (!is_esp ||
!(fte->action.action &
(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
return update_fte(ns, ft, fg, modify_mask, fte);
modify_spec_mailbox(dev, fte, &mbox_mod);
ret = update_fte(ns, ft, fg, modify_mask, fte);
restore_spec_mailbox(fte, &mbox_mod);
return ret;
}
static int fpga_ipsec_fs_delete_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte,
bool is_egress)
{
int (*delete_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte) =
mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->delete_fte;
struct mlx5_core_dev *dev = ns->dev;
struct mlx5_fpga_device *fdev = dev->fpga;
struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
struct mlx5_fpga_ipsec_rule *rule;
bool is_esp = fte->action.esp_id;
struct mailbox_mod mbox_mod;
int ret;
if (!is_esp ||
!(fte->action.action &
(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
return delete_fte(ns, ft, fte);
rule = rule_search(fipsec, fte);
if (!rule)
return -ENOENT;
mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
rule_delete(fipsec, rule);
modify_spec_mailbox(dev, fte, &mbox_mod);
ret = delete_fte(ns, ft, fte);
restore_spec_mailbox(fte, &mbox_mod);
return ret;
}
static int
mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 *in,
struct mlx5_flow_group *fg)
{
return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, true);
}
static int
mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
struct fs_fte *fte)
{
return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, true);
}
static int
mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
int modify_mask,
struct fs_fte *fte)
{
return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte,
true);
}
static int
mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
{
return fpga_ipsec_fs_delete_fte(ns, ft, fte, true);
}
static int
mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 *in,
struct mlx5_flow_group *fg)
{
return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, false);
}
static int
mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
struct fs_fte *fte)
{
return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, false);
}
static int
mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
int modify_mask,
struct fs_fte *fte)
{
return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte,
false);
}
static int
mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
{
return fpga_ipsec_fs_delete_fte(ns, ft, fte, false);
}
static struct mlx5_flow_cmds fpga_ipsec_ingress;
static struct mlx5_flow_cmds fpga_ipsec_egress;
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
{
switch (type) {
case FS_FT_NIC_RX:
return &fpga_ipsec_ingress;
case FS_FT_NIC_TX:
return &fpga_ipsec_egress;
default:
WARN_ON(true);
return NULL;
}
}
static int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
{
struct mlx5_fpga_conn_attr init_attr = {0};
struct mlx5_fpga_device *fdev = mdev->fpga;
struct mlx5_fpga_conn *conn;
int err;
if (!mlx5_fpga_is_ipsec_device(mdev))
return 0;
fdev->ipsec = kzalloc(sizeof(*fdev->ipsec), GFP_KERNEL);
if (!fdev->ipsec)
return -ENOMEM;
fdev->ipsec->fdev = fdev;
err = mlx5_fpga_get_sbu_caps(fdev, sizeof(fdev->ipsec->caps),
fdev->ipsec->caps);
if (err) {
mlx5_fpga_err(fdev, "Failed to retrieve IPSec extended capabilities: %d\n",
err);
goto error;
}
INIT_LIST_HEAD(&fdev->ipsec->pending_cmds);
spin_lock_init(&fdev->ipsec->pending_cmds_lock);
init_attr.rx_size = SBU_QP_QUEUE_SIZE;
init_attr.tx_size = SBU_QP_QUEUE_SIZE;
init_attr.recv_cb = mlx5_fpga_ipsec_recv;
init_attr.cb_arg = fdev;
conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
if (IS_ERR(conn)) {
err = PTR_ERR(conn);
mlx5_fpga_err(fdev, "Error creating IPSec command connection %d\n",
err);
goto error;
}
fdev->ipsec->conn = conn;
err = rhashtable_init(&fdev->ipsec->sa_hash, &rhash_sa);
if (err)
goto err_destroy_conn;
mutex_init(&fdev->ipsec->sa_hash_lock);
fdev->ipsec->rules_rb = RB_ROOT;
mutex_init(&fdev->ipsec->rules_rb_lock);
err = mlx5_fpga_ipsec_enable_supported_caps(mdev);
if (err) {
mlx5_fpga_err(fdev, "Failed to enable IPSec extended capabilities: %d\n",
err);
goto err_destroy_hash;
}
ida_init(&fdev->ipsec->halloc);
return 0;
err_destroy_hash:
rhashtable_destroy(&fdev->ipsec->sa_hash);
err_destroy_conn:
mlx5_fpga_sbu_conn_destroy(conn);
error:
kfree(fdev->ipsec);
fdev->ipsec = NULL;
return err;
}
static void destroy_rules_rb(struct rb_root *root)
{
struct mlx5_fpga_ipsec_rule *r, *tmp;
rbtree_postorder_for_each_entry_safe(r, tmp, root, node) {
rb_erase(&r->node, root);
mlx5_fpga_ipsec_delete_sa_ctx(r->ctx);
kfree(r);
}
}
static void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
{
struct mlx5_fpga_device *fdev = mdev->fpga;
if (!mlx5_fpga_is_ipsec_device(mdev))
return;
ida_destroy(&fdev->ipsec->halloc);
destroy_rules_rb(&fdev->ipsec->rules_rb);
rhashtable_destroy(&fdev->ipsec->sa_hash);
mlx5_fpga_sbu_conn_destroy(fdev->ipsec->conn);
kfree(fdev->ipsec);
fdev->ipsec = NULL;
}
void mlx5_fpga_ipsec_build_fs_cmds(void)
{
/* ingress */
fpga_ipsec_ingress.create_flow_table =
mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->create_flow_table;
fpga_ipsec_ingress.destroy_flow_table =
mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_table;
fpga_ipsec_ingress.modify_flow_table =
mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->modify_flow_table;
fpga_ipsec_ingress.create_flow_group =
mlx5_fpga_ipsec_fs_create_flow_group_ingress;
fpga_ipsec_ingress.destroy_flow_group =
mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_group;
fpga_ipsec_ingress.create_fte =
mlx5_fpga_ipsec_fs_create_fte_ingress;
fpga_ipsec_ingress.update_fte =
mlx5_fpga_ipsec_fs_update_fte_ingress;
fpga_ipsec_ingress.delete_fte =
mlx5_fpga_ipsec_fs_delete_fte_ingress;
fpga_ipsec_ingress.update_root_ft =
mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->update_root_ft;
/* egress */
fpga_ipsec_egress.create_flow_table =
mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->create_flow_table;
fpga_ipsec_egress.destroy_flow_table =
mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_table;
fpga_ipsec_egress.modify_flow_table =
mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->modify_flow_table;
fpga_ipsec_egress.create_flow_group =
mlx5_fpga_ipsec_fs_create_flow_group_egress;
fpga_ipsec_egress.destroy_flow_group =
mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_group;
fpga_ipsec_egress.create_fte =
mlx5_fpga_ipsec_fs_create_fte_egress;
fpga_ipsec_egress.update_fte =
mlx5_fpga_ipsec_fs_update_fte_egress;
fpga_ipsec_egress.delete_fte =
mlx5_fpga_ipsec_fs_delete_fte_egress;
fpga_ipsec_egress.update_root_ft =
mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->update_root_ft;
}
static int
mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
if (attrs->tfc_pad) {
mlx5_core_err(mdev, "Cannot offload xfrm states with tfc padding\n");
return -EOPNOTSUPP;
}
if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) {
mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay\n");
return -EOPNOTSUPP;
}
if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) {
mlx5_core_err(mdev, "Only aes gcm keymat is supported\n");
return -EOPNOTSUPP;
}
if (attrs->keymat.aes_gcm.iv_algo !=
MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) {
mlx5_core_err(mdev, "Only iv sequence algo is supported\n");
return -EOPNOTSUPP;
}
if (attrs->keymat.aes_gcm.icv_len != 128) {
mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
return -EOPNOTSUPP;
}
if (attrs->keymat.aes_gcm.key_len != 128 &&
attrs->keymat.aes_gcm.key_len != 256) {
mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
return -EOPNOTSUPP;
}
if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) &&
(!MLX5_GET(ipsec_extended_cap, mdev->fpga->ipsec->caps,
v2_command))) {
mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
return -EOPNOTSUPP;
}
return 0;
}
static struct mlx5_accel_esp_xfrm *
mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags)
{
struct mlx5_fpga_esp_xfrm *fpga_xfrm;
if (!(flags & MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA)) {
mlx5_core_warn(mdev, "Tried to create an esp action without metadata\n");
return ERR_PTR(-EINVAL);
}
if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
return ERR_PTR(-EOPNOTSUPP);
}
fpga_xfrm = kzalloc(sizeof(*fpga_xfrm), GFP_KERNEL);
if (!fpga_xfrm)
return ERR_PTR(-ENOMEM);
mutex_init(&fpga_xfrm->lock);
memcpy(&fpga_xfrm->accel_xfrm.attrs, attrs,
sizeof(fpga_xfrm->accel_xfrm.attrs));
return &fpga_xfrm->accel_xfrm;
}
static void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
{
struct mlx5_fpga_esp_xfrm *fpga_xfrm =
container_of(xfrm, struct mlx5_fpga_esp_xfrm,
accel_xfrm);
/* assuming no sa_ctx are connected to this xfrm_ctx */
kfree(fpga_xfrm);
}
static int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct mlx5_core_dev *mdev = xfrm->mdev;
struct mlx5_fpga_device *fdev = mdev->fpga;
struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
struct mlx5_fpga_esp_xfrm *fpga_xfrm;
struct mlx5_ifc_fpga_ipsec_sa org_hw_sa;
int err = 0;
if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
return 0;
if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
return -EOPNOTSUPP;
}
if (is_v2_sadb_supported(fipsec)) {
mlx5_core_warn(mdev, "Modify esp is not supported\n");
return -EOPNOTSUPP;
}
fpga_xfrm = container_of(xfrm, struct mlx5_fpga_esp_xfrm, accel_xfrm);
mutex_lock(&fpga_xfrm->lock);
if (!fpga_xfrm->sa_ctx)
/* Unbounded xfrm, change only sw attrs */
goto change_sw_xfrm_attrs;
/* copy original hw sa */
memcpy(&org_hw_sa, &fpga_xfrm->sa_ctx->hw_sa, sizeof(org_hw_sa));
mutex_lock(&fipsec->sa_hash_lock);
/* remove original hw sa from hash */
WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
&fpga_xfrm->sa_ctx->hash, rhash_sa));
/* update hw_sa with new xfrm attrs*/
mlx5_fpga_ipsec_build_hw_xfrm(xfrm->mdev, attrs,
&fpga_xfrm->sa_ctx->hw_sa);
/* try to insert new hw_sa to hash */
err = rhashtable_insert_fast(&fipsec->sa_hash,
&fpga_xfrm->sa_ctx->hash, rhash_sa);
if (err)
goto rollback_sa;
/* modify device with new hw_sa */
err = mlx5_fpga_ipsec_update_hw_sa(fdev, &fpga_xfrm->sa_ctx->hw_sa,
MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2);
fpga_xfrm->sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
if (err)
WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
&fpga_xfrm->sa_ctx->hash,
rhash_sa));
rollback_sa:
if (err) {
/* return original hw_sa to hash */
memcpy(&fpga_xfrm->sa_ctx->hw_sa, &org_hw_sa,
sizeof(org_hw_sa));
WARN_ON(rhashtable_insert_fast(&fipsec->sa_hash,
&fpga_xfrm->sa_ctx->hash,
rhash_sa));
}
mutex_unlock(&fipsec->sa_hash_lock);
change_sw_xfrm_attrs:
if (!err)
memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs));
mutex_unlock(&fpga_xfrm->lock);
return err;
}
static const struct mlx5_accel_ipsec_ops fpga_ipsec_ops = {
.device_caps = mlx5_fpga_ipsec_device_caps,
.counters_count = mlx5_fpga_ipsec_counters_count,
.counters_read = mlx5_fpga_ipsec_counters_read,
.create_hw_context = mlx5_fpga_ipsec_create_sa_ctx,
.free_hw_context = mlx5_fpga_ipsec_delete_sa_ctx,
.init = mlx5_fpga_ipsec_init,
.cleanup = mlx5_fpga_ipsec_cleanup,
.esp_create_xfrm = mlx5_fpga_esp_create_xfrm,
.esp_modify_xfrm = mlx5_fpga_esp_modify_xfrm,
.esp_destroy_xfrm = mlx5_fpga_esp_destroy_xfrm,
};
const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev)
{
if (!mlx5_fpga_is_ipsec_device(mdev))
return NULL;
return &fpga_ipsec_ops;
}
/*
* Copyright (c) 2017 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef __MLX5_FPGA_IPSEC_H__
#define __MLX5_FPGA_IPSEC_H__
#include "accel/ipsec.h"
#include "fs_cmd.h"
#ifdef CONFIG_MLX5_FPGA_IPSEC
const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev);
u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
const struct mlx5_flow_cmds *
mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type);
void mlx5_fpga_ipsec_build_fs_cmds(void);
bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev);
#else
static inline
const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev)
{ return NULL; }
static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; }
static inline const struct mlx5_flow_cmds *
mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
{
return mlx5_fs_cmd_get_default(type);
}
static inline void mlx5_fpga_ipsec_build_fs_cmds(void) {};
static inline bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev) { return false; }
#endif /* CONFIG_MLX5_FPGA_IPSEC */
#endif /* __MLX5_FPGA_IPSEC_H__ */
...@@ -40,8 +40,6 @@ ...@@ -40,8 +40,6 @@
#include "fs_cmd.h" #include "fs_cmd.h"
#include "fs_ft_pool.h" #include "fs_ft_pool.h"
#include "diag/fs_tracepoint.h" #include "diag/fs_tracepoint.h"
#include "accel/ipsec.h"
#include "fpga/ipsec.h"
#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\ #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
sizeof(struct init_tree_node)) sizeof(struct init_tree_node))
...@@ -2519,10 +2517,6 @@ static struct mlx5_flow_root_namespace ...@@ -2519,10 +2517,6 @@ static struct mlx5_flow_root_namespace
struct mlx5_flow_root_namespace *root_ns; struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_namespace *ns; struct mlx5_flow_namespace *ns;
if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
(table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
/* Create the root namespace */ /* Create the root namespace */
root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL); root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
if (!root_ns) if (!root_ns)
...@@ -3172,8 +3166,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) ...@@ -3172,8 +3166,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err; goto err;
} }
if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE || if (MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
err = init_egress_root_ns(steering); err = init_egress_root_ns(steering);
if (err) if (err)
goto err; goto err;
......
...@@ -62,7 +62,6 @@ ...@@ -62,7 +62,6 @@
#include "lib/mlx5.h" #include "lib/mlx5.h"
#include "lib/tout.h" #include "lib/tout.h"
#include "fpga/core.h" #include "fpga/core.h"
#include "fpga/ipsec.h"
#include "accel/ipsec.h" #include "accel/ipsec.h"
#include "lib/clock.h" #include "lib/clock.h"
#include "lib/vxlan.h" #include "lib/vxlan.h"
...@@ -1937,7 +1936,6 @@ static int __init init(void) ...@@ -1937,7 +1936,6 @@ static int __init init(void)
get_random_bytes(&sw_owner_id, sizeof(sw_owner_id)); get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
mlx5_core_verify_params(); mlx5_core_verify_params();
mlx5_fpga_ipsec_build_fs_cmds();
mlx5_register_debugfs(); mlx5_register_debugfs();
err = pci_register_driver(&mlx5_core_driver); err = pci_register_driver(&mlx5_core_driver);
......
...@@ -386,68 +386,6 @@ struct mlx5_ifc_fpga_destroy_qp_out_bits { ...@@ -386,68 +386,6 @@ struct mlx5_ifc_fpga_destroy_qp_out_bits {
u8 reserved_at_40[0x40]; u8 reserved_at_40[0x40];
}; };
struct mlx5_ifc_ipsec_extended_cap_bits {
u8 encapsulation[0x20];
u8 reserved_0[0x12];
u8 v2_command[0x1];
u8 udp_encap[0x1];
u8 rx_no_trailer[0x1];
u8 ipv4_fragment[0x1];
u8 ipv6[0x1];
u8 esn[0x1];
u8 lso[0x1];
u8 transport_and_tunnel_mode[0x1];
u8 tunnel_mode[0x1];
u8 transport_mode[0x1];
u8 ah_esp[0x1];
u8 esp[0x1];
u8 ah[0x1];
u8 ipv4_options[0x1];
u8 auth_alg[0x20];
u8 enc_alg[0x20];
u8 sa_cap[0x20];
u8 reserved_1[0x10];
u8 number_of_ipsec_counters[0x10];
u8 ipsec_counters_addr_low[0x20];
u8 ipsec_counters_addr_high[0x20];
};
struct mlx5_ifc_ipsec_counters_bits {
u8 dec_in_packets[0x40];
u8 dec_out_packets[0x40];
u8 dec_bypass_packets[0x40];
u8 enc_in_packets[0x40];
u8 enc_out_packets[0x40];
u8 enc_bypass_packets[0x40];
u8 drop_dec_packets[0x40];
u8 failed_auth_dec_packets[0x40];
u8 drop_enc_packets[0x40];
u8 success_add_sa[0x40];
u8 fail_add_sa[0x40];
u8 success_delete_sa[0x40];
u8 fail_delete_sa[0x40];
u8 dropped_cmd[0x40];
};
enum { enum {
MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RETRY_COUNTER_EXPIRED = 0x1, MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RETRY_COUNTER_EXPIRED = 0x1,
MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RNR_EXPIRED = 0x2, MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RNR_EXPIRED = 0x2,
...@@ -464,90 +402,4 @@ struct mlx5_ifc_fpga_qp_error_event_bits { ...@@ -464,90 +402,4 @@ struct mlx5_ifc_fpga_qp_error_event_bits {
u8 reserved_at_c0[0x8]; u8 reserved_at_c0[0x8];
u8 fpga_qpn[0x18]; u8 fpga_qpn[0x18];
}; };
enum mlx5_ifc_fpga_ipsec_response_syndrome {
MLX5_FPGA_IPSEC_RESPONSE_SUCCESS = 0,
MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST = 1,
MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE = 2,
MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE = 3,
};
struct mlx5_ifc_fpga_ipsec_cmd_resp {
__be32 syndrome;
union {
__be32 sw_sa_handle;
__be32 flags;
};
u8 reserved[24];
} __packed;
enum mlx5_ifc_fpga_ipsec_cmd_opcode {
MLX5_FPGA_IPSEC_CMD_OP_ADD_SA = 0,
MLX5_FPGA_IPSEC_CMD_OP_DEL_SA = 1,
MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 = 2,
MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 = 3,
MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2 = 4,
MLX5_FPGA_IPSEC_CMD_OP_SET_CAP = 5,
};
enum mlx5_ifc_fpga_ipsec_cap {
MLX5_FPGA_IPSEC_CAP_NO_TRAILER = BIT(0),
};
struct mlx5_ifc_fpga_ipsec_cmd_cap {
__be32 cmd;
__be32 flags;
u8 reserved[24];
} __packed;
enum mlx5_ifc_fpga_ipsec_sa_flags {
MLX5_FPGA_IPSEC_SA_ESN_EN = BIT(0),
MLX5_FPGA_IPSEC_SA_ESN_OVERLAP = BIT(1),
MLX5_FPGA_IPSEC_SA_IPV6 = BIT(2),
MLX5_FPGA_IPSEC_SA_DIR_SX = BIT(3),
MLX5_FPGA_IPSEC_SA_SPI_EN = BIT(4),
MLX5_FPGA_IPSEC_SA_SA_VALID = BIT(5),
MLX5_FPGA_IPSEC_SA_IP_ESP = BIT(6),
MLX5_FPGA_IPSEC_SA_IP_AH = BIT(7),
};
enum mlx5_ifc_fpga_ipsec_sa_enc_mode {
MLX5_FPGA_IPSEC_SA_ENC_MODE_NONE = 0,
MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128 = 1,
MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128 = 3,
};
struct mlx5_ifc_fpga_ipsec_sa_v1 {
__be32 cmd;
u8 key_enc[32];
u8 key_auth[32];
__be32 sip[4];
__be32 dip[4];
union {
struct {
__be32 reserved;
u8 salt_iv[8];
__be32 salt;
} __packed gcm;
struct {
u8 salt[16];
} __packed cbc;
};
__be32 spi;
__be32 sw_sa_handle;
__be16 tfclen;
u8 enc_mode;
u8 reserved1[2];
u8 flags;
u8 reserved2[2];
};
struct mlx5_ifc_fpga_ipsec_sa {
struct mlx5_ifc_fpga_ipsec_sa_v1 ipsec_sa_v1;
__be16 udp_sp;
__be16 udp_dp;
u8 reserved1[4];
__be32 esn;
__be16 vid; /* only 12 bits, rest is reserved */
__be16 reserved2;
} __packed;
#endif /* MLX5_IFC_FPGA_H */ #endif /* MLX5_IFC_FPGA_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment