Commit 1182f365 authored by Tariq Toukan's avatar Tariq Toukan Committed by Saeed Mahameed

net/mlx5e: kTLS, Add kTLS RX HW offload support

Implement driver support for the kTLS RX HW offload feature.
Resync support is added in a downstream patch.

New offload contexts post their static/progress params WQEs
over the per-channel async ICOSQ, protected under a spin-lock.
The Channel/RQ is selected according to the socket's rxq index.

Feature is OFF by default. Can be turned on by:
$ ethtool -K <if> tls-hw-rx-offload on

A new TLS-RX workqueue is used to allow asynchronous addition of
steering rules, out of the NAPI context.
It will be also used in a downstream patch in the resync procedure.
Signed-off-by: default avatarTariq Toukan <tariqt@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent df8d8667
...@@ -173,6 +173,7 @@ config MLX5_TLS ...@@ -173,6 +173,7 @@ config MLX5_TLS
config MLX5_EN_TLS config MLX5_EN_TLS
bool "TLS cryptography-offload accelaration" bool "TLS cryptography-offload accelaration"
depends on MLX5_CORE_EN depends on MLX5_CORE_EN
depends on XPS
depends on MLX5_FPGA_TLS || MLX5_TLS depends on MLX5_FPGA_TLS || MLX5_TLS
default y default y
help help
......
...@@ -75,7 +75,7 @@ mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ ...@@ -75,7 +75,7 @@ mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o \ mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o \
en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \ en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \
en_accel/ktls_tx.o en_accel/ktls_tx.o en_accel/ktls_rx.o
mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o \ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o \
steering/dr_matcher.o steering/dr_rule.o \ steering/dr_matcher.o steering/dr_rule.o \
......
...@@ -43,9 +43,20 @@ int mlx5_ktls_create_key(struct mlx5_core_dev *mdev, ...@@ -43,9 +43,20 @@ int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
u32 *p_key_id); u32 *p_key_id);
void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id); void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id);
static inline bool mlx5_accel_is_ktls_tx(struct mlx5_core_dev *mdev)
{
return MLX5_CAP_GEN(mdev, tls_tx);
}
static inline bool mlx5_accel_is_ktls_rx(struct mlx5_core_dev *mdev)
{
return MLX5_CAP_GEN(mdev, tls_rx);
}
static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev)
{ {
if (!MLX5_CAP_GEN(mdev, tls_tx)) if (!mlx5_accel_is_ktls_tx(mdev) &&
!mlx5_accel_is_ktls_rx(mdev))
return false; return false;
if (!MLX5_CAP_GEN(mdev, log_max_dek)) if (!MLX5_CAP_GEN(mdev, log_max_dek))
...@@ -67,6 +78,12 @@ static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev, ...@@ -67,6 +78,12 @@ static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
return false; return false;
} }
#else #else
static inline bool mlx5_accel_is_ktls_tx(struct mlx5_core_dev *mdev)
{ return false; }
static inline bool mlx5_accel_is_ktls_rx(struct mlx5_core_dev *mdev)
{ return false; }
static inline int static inline int
mlx5_ktls_create_key(struct mlx5_core_dev *mdev, mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
struct tls_crypto_info *crypto_info, struct tls_crypto_info *crypto_info,
......
...@@ -11,6 +11,10 @@ ...@@ -11,6 +11,10 @@
enum mlx5e_icosq_wqe_type { enum mlx5e_icosq_wqe_type {
MLX5E_ICOSQ_WQE_NOP, MLX5E_ICOSQ_WQE_NOP,
MLX5E_ICOSQ_WQE_UMR_RX, MLX5E_ICOSQ_WQE_UMR_RX,
#ifdef CONFIG_MLX5_EN_TLS
MLX5E_ICOSQ_WQE_UMR_TLS,
MLX5E_ICOSQ_WQE_SET_PSV_TLS,
#endif
}; };
static inline bool static inline bool
...@@ -114,9 +118,16 @@ struct mlx5e_icosq_wqe_info { ...@@ -114,9 +118,16 @@ struct mlx5e_icosq_wqe_info {
struct { struct {
struct mlx5e_rq *rq; struct mlx5e_rq *rq;
} umr; } umr;
#ifdef CONFIG_MLX5_EN_TLS
struct {
struct mlx5e_ktls_offload_context_rx *priv_rx;
} tls_set_params;
#endif
}; };
}; };
void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq);
static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size) static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size)
{ {
struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5_wq_cyc *wq = &sq->wq;
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include "en_accel/ipsec_rxtx.h" #include "en_accel/ipsec_rxtx.h"
#include "en_accel/tls.h"
#include "en_accel/tls_rxtx.h" #include "en_accel/tls_rxtx.h"
#include "en.h" #include "en.h"
#include "en/txrx.h" #include "en/txrx.h"
...@@ -147,4 +148,23 @@ static inline bool mlx5e_accel_tx_finish(struct mlx5e_priv *priv, ...@@ -147,4 +148,23 @@ static inline bool mlx5e_accel_tx_finish(struct mlx5e_priv *priv,
return true; return true;
} }
static inline int mlx5e_accel_sk_get_rxq(struct sock *sk)
{
int rxq = sk_rx_queue_get(sk);
if (unlikely(rxq == -1))
rxq = 0;
return rxq;
}
static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv)
{
return mlx5e_ktls_init_rx(priv);
}
static inline void mlx5e_accel_cleanup_rx(struct mlx5e_priv *priv)
{
mlx5e_ktls_cleanup_rx(priv);
}
#endif /* __MLX5E_EN_ACCEL_H__ */ #endif /* __MLX5E_EN_ACCEL_H__ */
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include "en.h" #include "en.h"
#include "en_accel/ktls.h" #include "en_accel/ktls.h"
#include "en_accel/ktls_utils.h" #include "en_accel/ktls_utils.h"
#include "en_accel/fs_tcp.h"
static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk, static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
enum tls_offload_ctx_dir direction, enum tls_offload_ctx_dir direction,
...@@ -14,13 +15,13 @@ static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk, ...@@ -14,13 +15,13 @@ static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
int err; int err;
if (WARN_ON(direction != TLS_OFFLOAD_CTX_DIR_TX))
return -EINVAL;
if (WARN_ON(!mlx5e_ktls_type_check(mdev, crypto_info))) if (WARN_ON(!mlx5e_ktls_type_check(mdev, crypto_info)))
return -EOPNOTSUPP; return -EOPNOTSUPP;
err = mlx5e_ktls_add_tx(netdev, sk, crypto_info, start_offload_tcp_sn); if (direction == TLS_OFFLOAD_CTX_DIR_TX)
err = mlx5e_ktls_add_tx(netdev, sk, crypto_info, start_offload_tcp_sn);
else
err = mlx5e_ktls_add_rx(netdev, sk, crypto_info, start_offload_tcp_sn);
return err; return err;
} }
...@@ -29,26 +30,71 @@ static void mlx5e_ktls_del(struct net_device *netdev, ...@@ -29,26 +30,71 @@ static void mlx5e_ktls_del(struct net_device *netdev,
struct tls_context *tls_ctx, struct tls_context *tls_ctx,
enum tls_offload_ctx_dir direction) enum tls_offload_ctx_dir direction)
{ {
if (direction != TLS_OFFLOAD_CTX_DIR_TX) if (direction == TLS_OFFLOAD_CTX_DIR_TX)
return; mlx5e_ktls_del_tx(netdev, tls_ctx);
else
mlx5e_ktls_del_rx(netdev, tls_ctx);
}
mlx5e_ktls_del_tx(netdev, tls_ctx); static int mlx5e_ktls_resync(struct net_device *netdev,
struct sock *sk, u32 seq, u8 *rcd_sn,
enum tls_offload_ctx_dir direction)
{
return -EOPNOTSUPP;
} }
static const struct tlsdev_ops mlx5e_ktls_ops = { static const struct tlsdev_ops mlx5e_ktls_ops = {
.tls_dev_add = mlx5e_ktls_add, .tls_dev_add = mlx5e_ktls_add,
.tls_dev_del = mlx5e_ktls_del, .tls_dev_del = mlx5e_ktls_del,
.tls_dev_resync = mlx5e_ktls_resync,
}; };
void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv) void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
{ {
struct net_device *netdev = priv->netdev; struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
if (!mlx5_accel_is_ktls_device(priv->mdev)) if (!mlx5_accel_is_ktls_device(mdev))
return; return;
netdev->hw_features |= NETIF_F_HW_TLS_TX; if (mlx5_accel_is_ktls_tx(mdev)) {
netdev->features |= NETIF_F_HW_TLS_TX; netdev->hw_features |= NETIF_F_HW_TLS_TX;
netdev->features |= NETIF_F_HW_TLS_TX;
}
if (mlx5_accel_is_ktls_rx(mdev))
netdev->hw_features |= NETIF_F_HW_TLS_RX;
netdev->tlsdev_ops = &mlx5e_ktls_ops; netdev->tlsdev_ops = &mlx5e_ktls_ops;
} }
int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err = 0;
mutex_lock(&priv->state_lock);
if (enable)
err = mlx5e_accel_fs_tcp_create(priv);
else
mlx5e_accel_fs_tcp_destroy(priv);
mutex_unlock(&priv->state_lock);
return err;
}
int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
{
int err = 0;
if (priv->netdev->features & NETIF_F_HW_TLS_RX)
err = mlx5e_accel_fs_tcp_create(priv);
return err;
}
void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
{
if (priv->netdev->features & NETIF_F_HW_TLS_RX)
mlx5e_accel_fs_tcp_destroy(priv);
}
...@@ -9,13 +9,30 @@ ...@@ -9,13 +9,30 @@
#ifdef CONFIG_MLX5_EN_TLS #ifdef CONFIG_MLX5_EN_TLS
void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv); void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
int mlx5e_ktls_init_rx(struct mlx5e_priv *priv);
void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv);
int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable);
#else #else
static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv) static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
{ {
} }
static inline int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
{
return 0;
}
static inline void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
{
}
static inline int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable)
{
netdev_warn(netdev, "kTLS is not supported\n");
return -EOPNOTSUPP;
}
#endif #endif
#endif /* __MLX5E_TLS_H__ */ #endif /* __MLX5E_TLS_H__ */
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2019 Mellanox Technologies.
#include "en_accel/en_accel.h"
#include "en_accel/ktls_txrx.h"
#include "en_accel/ktls_utils.h"
#include "en_accel/fs_tcp.h"
struct accel_rule {
struct work_struct work;
struct mlx5e_priv *priv;
struct mlx5_flow_handle *rule;
};
enum {
MLX5E_PRIV_RX_FLAG_DELETING,
MLX5E_NUM_PRIV_RX_FLAGS,
};
struct mlx5e_ktls_offload_context_rx {
struct tls12_crypto_info_aes_gcm_128 crypto_info;
struct accel_rule rule;
struct sock *sk;
struct completion add_ctx;
u32 tirn;
u32 key_id;
u32 rxq;
DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS);
};
static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn)
{
int err, inlen;
void *tirc;
u32 *in;
inlen = MLX5_ST_SZ_BYTES(create_tir_in);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.td.tdn);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
MLX5_SET(tirc, tirc, indirect_table, rqtn);
MLX5_SET(tirc, tirc, tls_en, 1);
MLX5_SET(tirc, tirc, self_lb_block,
MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST |
MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST);
err = mlx5_core_create_tir(mdev, in, tirn);
kvfree(in);
return err;
}
static void accel_rule_handle_work(struct work_struct *work)
{
struct mlx5e_ktls_offload_context_rx *priv_rx;
struct accel_rule *accel_rule;
struct mlx5_flow_handle *rule;
accel_rule = container_of(work, struct accel_rule, work);
priv_rx = container_of(accel_rule, struct mlx5e_ktls_offload_context_rx, rule);
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
goto out;
rule = mlx5e_accel_fs_add_sk(accel_rule->priv, priv_rx->sk,
priv_rx->tirn, MLX5_FS_DEFAULT_FLOW_TAG);
if (!IS_ERR_OR_NULL(rule))
accel_rule->rule = rule;
out:
complete(&priv_rx->add_ctx);
}
static void accel_rule_init(struct accel_rule *rule, struct mlx5e_priv *priv,
struct sock *sk)
{
INIT_WORK(&rule->work, accel_rule_handle_work);
rule->priv = priv;
}
static void icosq_fill_wi(struct mlx5e_icosq *sq, u16 pi,
struct mlx5e_icosq_wqe_info *wi)
{
sq->db.wqe_info[pi] = *wi;
}
static struct mlx5_wqe_ctrl_seg *
post_static_params(struct mlx5e_icosq *sq,
struct mlx5e_ktls_offload_context_rx *priv_rx)
{
struct mlx5e_set_tls_static_params_wqe *wqe;
struct mlx5e_icosq_wqe_info wi;
u16 pi, num_wqebbs, room;
num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS;
room = mlx5e_stop_room_for_wqe(num_wqebbs);
if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room)))
return ERR_PTR(-ENOSPC);
pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi);
mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info,
priv_rx->tirn, priv_rx->key_id, false,
TLS_OFFLOAD_CTX_DIR_RX);
wi = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_UMR_TLS,
.num_wqebbs = num_wqebbs,
.tls_set_params.priv_rx = priv_rx,
};
icosq_fill_wi(sq, pi, &wi);
sq->pc += num_wqebbs;
return &wqe->ctrl;
}
static struct mlx5_wqe_ctrl_seg *
post_progress_params(struct mlx5e_icosq *sq,
struct mlx5e_ktls_offload_context_rx *priv_rx,
u32 next_record_tcp_sn)
{
struct mlx5e_set_tls_progress_params_wqe *wqe;
struct mlx5e_icosq_wqe_info wi;
u16 pi, num_wqebbs, room;
num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS;
room = mlx5e_stop_room_for_wqe(num_wqebbs);
if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room)))
return ERR_PTR(-ENOSPC);
pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi);
mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_rx->tirn, false,
next_record_tcp_sn,
TLS_OFFLOAD_CTX_DIR_RX);
wi = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_SET_PSV_TLS,
.num_wqebbs = num_wqebbs,
.tls_set_params.priv_rx = priv_rx,
};
icosq_fill_wi(sq, pi, &wi);
sq->pc += num_wqebbs;
return &wqe->ctrl;
}
static int post_rx_param_wqes(struct mlx5e_channel *c,
struct mlx5e_ktls_offload_context_rx *priv_rx,
u32 next_record_tcp_sn)
{
struct mlx5_wqe_ctrl_seg *cseg;
struct mlx5e_icosq *sq;
int err;
err = 0;
sq = &c->async_icosq;
spin_lock(&c->async_icosq_lock);
cseg = post_static_params(sq, priv_rx);
if (IS_ERR(cseg))
goto err_out;
cseg = post_progress_params(sq, priv_rx, next_record_tcp_sn);
if (IS_ERR(cseg))
goto err_out;
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
unlock:
spin_unlock(&c->async_icosq_lock);
return err;
err_out:
err = PTR_ERR(cseg);
complete(&priv_rx->add_ctx);
goto unlock;
}
static void
mlx5e_set_ktls_rx_priv_ctx(struct tls_context *tls_ctx,
struct mlx5e_ktls_offload_context_rx *priv_rx)
{
struct mlx5e_ktls_offload_context_rx **ctx =
__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX);
BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_rx *) >
TLS_OFFLOAD_CONTEXT_SIZE_RX);
*ctx = priv_rx;
}
static struct mlx5e_ktls_offload_context_rx *
mlx5e_get_ktls_rx_priv_ctx(struct tls_context *tls_ctx)
{
struct mlx5e_ktls_offload_context_rx **ctx =
__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX);
return *ctx;
}
void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
struct mlx5_cqe64 *cqe, u32 *cqe_bcnt)
{
u8 tls_offload = get_cqe_tls_offload(cqe);
if (likely(tls_offload == CQE_TLS_OFFLOAD_NOT_DECRYPTED))
return;
switch (tls_offload) {
case CQE_TLS_OFFLOAD_DECRYPTED:
skb->decrypted = 1;
break;
case CQE_TLS_OFFLOAD_RESYNC:
break;
default: /* CQE_TLS_OFFLOAD_ERROR: */
break;
}
}
void mlx5e_ktls_handle_ctx_completion(struct mlx5e_icosq_wqe_info *wi)
{
struct mlx5e_ktls_offload_context_rx *priv_rx = wi->tls_set_params.priv_rx;
struct accel_rule *rule = &priv_rx->rule;
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {
complete(&priv_rx->add_ctx);
return;
}
queue_work(rule->priv->tls->rx_wq, &rule->work);
}
int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
struct tls_crypto_info *crypto_info,
u32 start_offload_tcp_sn)
{
struct mlx5e_ktls_offload_context_rx *priv_rx;
struct tls_context *tls_ctx;
struct mlx5_core_dev *mdev;
struct mlx5e_priv *priv;
int rxq, err;
u32 rqtn;
tls_ctx = tls_get_ctx(sk);
priv = netdev_priv(netdev);
mdev = priv->mdev;
priv_rx = kzalloc(sizeof(*priv_rx), GFP_KERNEL);
if (unlikely(!priv_rx))
return -ENOMEM;
err = mlx5_ktls_create_key(mdev, crypto_info, &priv_rx->key_id);
if (err)
goto err_create_key;
priv_rx->crypto_info =
*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
priv_rx->sk = sk;
priv_rx->rxq = mlx5e_accel_sk_get_rxq(sk);
mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
rxq = priv_rx->rxq;
rqtn = priv->direct_tir[rxq].rqt.rqtn;
err = mlx5e_ktls_create_tir(mdev, &priv_rx->tirn, rqtn);
if (err)
goto err_create_tir;
init_completion(&priv_rx->add_ctx);
accel_rule_init(&priv_rx->rule, priv, sk);
err = post_rx_param_wqes(priv->channels.c[rxq], priv_rx, start_offload_tcp_sn);
if (err)
goto err_post_wqes;
return 0;
err_post_wqes:
mlx5_core_destroy_tir(mdev, priv_rx->tirn);
err_create_tir:
mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
err_create_key:
kfree(priv_rx);
return err;
}
void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
{
struct mlx5e_ktls_offload_context_rx *priv_rx;
struct mlx5_core_dev *mdev;
struct mlx5e_priv *priv;
priv = netdev_priv(netdev);
mdev = priv->mdev;
priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx);
set_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags);
if (!cancel_work_sync(&priv_rx->rule.work))
/* completion is needed, as the priv_rx in the add flow
* is maintained on the wqe info (wi), not on the socket.
*/
wait_for_completion(&priv_rx->add_ctx);
if (priv_rx->rule.rule)
mlx5e_accel_fs_del_sk(priv_rx->rule.rule);
mlx5_core_destroy_tir(mdev, priv_rx->tirn);
mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
kfree(priv_rx);
}
...@@ -188,7 +188,7 @@ post_progress_params(struct mlx5e_txqsq *sq, ...@@ -188,7 +188,7 @@ post_progress_params(struct mlx5e_txqsq *sq,
num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS; num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS;
pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs); pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi); wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi);
mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_tx->tisn, fence, mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_tx->tisn, fence, 0,
TLS_OFFLOAD_CTX_DIR_TX); TLS_OFFLOAD_CTX_DIR_TX);
tx_fill_wi(sq, pi, num_wqebbs, 0, NULL); tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
sq->pc += num_wqebbs; sq->pc += num_wqebbs;
......
...@@ -59,11 +59,13 @@ mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe, ...@@ -59,11 +59,13 @@ mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe,
{ {
struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl; struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
u8 opmod = direction == TLS_OFFLOAD_CTX_DIR_TX ?
MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS :
MLX5_OPC_MOD_TLS_TIR_STATIC_PARAMS;
#define STATIC_PARAMS_DS_CNT DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS) #define STATIC_PARAMS_DS_CNT DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS)
cseg->opmod_idx_opcode = cpu_to_be32((pc << 8) | MLX5_OPCODE_UMR | cseg->opmod_idx_opcode = cpu_to_be32((pc << 8) | MLX5_OPCODE_UMR | (opmod << 24));
(MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS << 24));
cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) | cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
STATIC_PARAMS_DS_CNT); STATIC_PARAMS_DS_CNT);
cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
...@@ -76,12 +78,15 @@ mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe, ...@@ -76,12 +78,15 @@ mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe,
} }
static void static void
fill_progress_params(struct mlx5_wqe_tls_progress_params_seg *params, u32 tis_tir_num) fill_progress_params(struct mlx5_wqe_tls_progress_params_seg *params, u32 tis_tir_num,
u32 next_record_tcp_sn)
{ {
u8 *ctx = params->ctx; u8 *ctx = params->ctx;
params->tis_tir_num = cpu_to_be32(tis_tir_num); params->tis_tir_num = cpu_to_be32(tis_tir_num);
MLX5_SET(tls_progress_params, ctx, next_record_tcp_sn,
next_record_tcp_sn);
MLX5_SET(tls_progress_params, ctx, record_tracker_state, MLX5_SET(tls_progress_params, ctx, record_tracker_state,
MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START); MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START);
MLX5_SET(tls_progress_params, ctx, auth_state, MLX5_SET(tls_progress_params, ctx, auth_state,
...@@ -92,19 +97,22 @@ void ...@@ -92,19 +97,22 @@ void
mlx5e_ktls_build_progress_params(struct mlx5e_set_tls_progress_params_wqe *wqe, mlx5e_ktls_build_progress_params(struct mlx5e_set_tls_progress_params_wqe *wqe,
u16 pc, u32 sqn, u16 pc, u32 sqn,
u32 tis_tir_num, bool fence, u32 tis_tir_num, bool fence,
u32 next_record_tcp_sn,
enum tls_offload_ctx_dir direction) enum tls_offload_ctx_dir direction)
{ {
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
u8 opmod = direction == TLS_OFFLOAD_CTX_DIR_TX ?
MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS :
MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS;
#define PROGRESS_PARAMS_DS_CNT DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS) #define PROGRESS_PARAMS_DS_CNT DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS)
cseg->opmod_idx_opcode = cseg->opmod_idx_opcode =
cpu_to_be32((pc << 8) | MLX5_OPCODE_SET_PSV | cpu_to_be32((pc << 8) | MLX5_OPCODE_SET_PSV | (opmod << 24));
(MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS << 24));
cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) | cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
PROGRESS_PARAMS_DS_CNT); PROGRESS_PARAMS_DS_CNT);
cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
fill_progress_params(&wqe->params, tis_tir_num); fill_progress_params(&wqe->params, tis_tir_num, next_record_tcp_sn);
} }
...@@ -19,6 +19,10 @@ u16 mlx5e_ktls_get_stop_room(struct mlx5e_txqsq *sq); ...@@ -19,6 +19,10 @@ u16 mlx5e_ktls_get_stop_room(struct mlx5e_txqsq *sq);
bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq, bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
struct sk_buff *skb, int datalen, struct sk_buff *skb, int datalen,
struct mlx5e_accel_tx_tls_state *state); struct mlx5e_accel_tx_tls_state *state);
void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
struct mlx5_cqe64 *cqe, u32 *cqe_bcnt);
void mlx5e_ktls_handle_ctx_completion(struct mlx5e_icosq_wqe_info *wi);
void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi, struct mlx5e_tx_wqe_info *wi,
......
...@@ -23,6 +23,9 @@ enum { ...@@ -23,6 +23,9 @@ enum {
int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk, int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn); struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn);
void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx); void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx);
int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn);
void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx);
struct mlx5e_set_tls_static_params_wqe { struct mlx5e_set_tls_static_params_wqe {
struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_ctrl_seg ctrl;
...@@ -64,6 +67,7 @@ void ...@@ -64,6 +67,7 @@ void
mlx5e_ktls_build_progress_params(struct mlx5e_set_tls_progress_params_wqe *wqe, mlx5e_ktls_build_progress_params(struct mlx5e_set_tls_progress_params_wqe *wqe,
u16 pc, u32 sqn, u16 pc, u32 sqn,
u32 tis_tir_num, bool fence, u32 tis_tir_num, bool fence,
u32 next_record_tcp_sn,
enum tls_offload_ctx_dir direction); enum tls_offload_ctx_dir direction);
#endif /* __MLX5E_TLS_UTILS_H__ */ #endif /* __MLX5E_TLS_UTILS_H__ */
...@@ -197,6 +197,7 @@ void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) ...@@ -197,6 +197,7 @@ void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
return; return;
} }
/* FPGA */
if (!mlx5_accel_is_tls_device(priv->mdev)) if (!mlx5_accel_is_tls_device(priv->mdev))
return; return;
...@@ -221,11 +222,19 @@ void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) ...@@ -221,11 +222,19 @@ void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
int mlx5e_tls_init(struct mlx5e_priv *priv) int mlx5e_tls_init(struct mlx5e_priv *priv)
{ {
struct mlx5e_tls *tls = kzalloc(sizeof(*tls), GFP_KERNEL); struct mlx5e_tls *tls;
if (!mlx5_accel_is_tls_device(priv->mdev))
return 0;
tls = kzalloc(sizeof(*tls), GFP_KERNEL);
if (!tls) if (!tls)
return -ENOMEM; return -ENOMEM;
tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx");
if (!tls->rx_wq)
return -ENOMEM;
priv->tls = tls; priv->tls = tls;
return 0; return 0;
} }
...@@ -237,6 +246,7 @@ void mlx5e_tls_cleanup(struct mlx5e_priv *priv) ...@@ -237,6 +246,7 @@ void mlx5e_tls_cleanup(struct mlx5e_priv *priv)
if (!tls) if (!tls)
return; return;
destroy_workqueue(tls->rx_wq);
kfree(tls); kfree(tls);
priv->tls = NULL; priv->tls = NULL;
} }
...@@ -53,6 +53,7 @@ struct mlx5e_tls_sw_stats { ...@@ -53,6 +53,7 @@ struct mlx5e_tls_sw_stats {
struct mlx5e_tls { struct mlx5e_tls {
struct mlx5e_tls_sw_stats sw_stats; struct mlx5e_tls_sw_stats sw_stats;
struct workqueue_struct *rx_wq;
}; };
struct mlx5e_tls_offload_context_tx { struct mlx5e_tls_offload_context_tx {
......
...@@ -278,9 +278,10 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, ...@@ -278,9 +278,10 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
if (WARN_ON_ONCE(tls_ctx->netdev != netdev)) if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
goto err_out; goto err_out;
if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx)) if (mlx5_accel_is_ktls_tx(sq->channel->mdev))
return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state); return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state);
/* FPGA */
skb_seq = ntohl(tcp_hdr(skb)->seq); skb_seq = ntohl(tcp_hdr(skb)->seq);
context = mlx5e_get_tls_tx_context(tls_ctx); context = mlx5e_get_tls_tx_context(tls_ctx);
expected_seq = context->expected_seq; expected_seq = context->expected_seq;
...@@ -354,12 +355,16 @@ static int tls_update_resync_sn(struct net_device *netdev, ...@@ -354,12 +355,16 @@ static int tls_update_resync_sn(struct net_device *netdev,
return 0; return 0;
} }
void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, void mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
u32 *cqe_bcnt) struct mlx5_cqe64 *cqe, u32 *cqe_bcnt)
{ {
struct mlx5e_tls_metadata *mdata; struct mlx5e_tls_metadata *mdata;
struct mlx5e_priv *priv; struct mlx5e_priv *priv;
if (likely(mlx5_accel_is_ktls_rx(rq->mdev)))
return mlx5e_ktls_handle_rx_skb(rq, skb, cqe, cqe_bcnt);
/* FPGA */
if (!is_metadata_hdr_valid(skb)) if (!is_metadata_hdr_valid(skb))
return; return;
...@@ -370,13 +375,13 @@ void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, ...@@ -370,13 +375,13 @@ void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
skb->decrypted = 1; skb->decrypted = 1;
break; break;
case SYNDROM_RESYNC_REQUEST: case SYNDROM_RESYNC_REQUEST:
tls_update_resync_sn(netdev, skb, mdata); tls_update_resync_sn(rq->netdev, skb, mdata);
priv = netdev_priv(netdev); priv = netdev_priv(rq->netdev);
atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_request); atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_request);
break; break;
case SYNDROM_AUTH_FAILED: case SYNDROM_AUTH_FAILED:
/* Authentication failure will be observed and verified by kTLS */ /* Authentication failure will be observed and verified by kTLS */
priv = netdev_priv(netdev); priv = netdev_priv(rq->netdev);
atomic64_inc(&priv->tls->sw_stats.rx_tls_auth_fail); atomic64_inc(&priv->tls->sw_stats.rx_tls_auth_fail);
break; break;
default: default:
...@@ -395,9 +400,10 @@ u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq) ...@@ -395,9 +400,10 @@ u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq)
if (!mlx5_accel_is_tls_device(mdev)) if (!mlx5_accel_is_tls_device(mdev))
return 0; return 0;
if (MLX5_CAP_GEN(mdev, tls_tx)) if (mlx5_accel_is_ktls_device(mdev))
return mlx5e_ktls_get_stop_room(sq); return mlx5e_ktls_get_stop_room(sq);
/* FPGA */
/* Resync SKB. */ /* Resync SKB. */
return mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); return mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
} }
...@@ -49,8 +49,8 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, ...@@ -49,8 +49,8 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg, void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
struct mlx5e_accel_tx_tls_state *state); struct mlx5e_accel_tx_tls_state *state);
void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, void mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
u32 *cqe_bcnt); struct mlx5_cqe64 *cqe, u32 *cqe_bcnt);
#else #else
......
...@@ -1441,6 +1441,7 @@ void mlx5e_close_icosq(struct mlx5e_icosq *sq) ...@@ -1441,6 +1441,7 @@ void mlx5e_close_icosq(struct mlx5e_icosq *sq)
struct mlx5e_channel *c = sq->channel; struct mlx5e_channel *c = sq->channel;
mlx5e_destroy_sq(c->mdev, sq->sqn); mlx5e_destroy_sq(c->mdev, sq->sqn);
mlx5e_free_icosq_descs(sq);
mlx5e_free_icosq(sq); mlx5e_free_icosq(sq);
} }
...@@ -3853,6 +3854,7 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) ...@@ -3853,6 +3854,7 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
#ifdef CONFIG_MLX5_EN_ARFS #ifdef CONFIG_MLX5_EN_ARFS
err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs); err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs);
#endif #endif
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TLS_RX, mlx5e_ktls_set_feature_rx);
if (err) { if (err) {
netdev->features = oper_features; netdev->features = oper_features;
...@@ -5143,8 +5145,14 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) ...@@ -5143,8 +5145,14 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
if (err) if (err)
goto err_destroy_flow_steering; goto err_destroy_flow_steering;
err = mlx5e_accel_init_rx(priv);
if (err)
goto err_tc_nic_cleanup;
return 0; return 0;
err_tc_nic_cleanup:
mlx5e_tc_nic_cleanup(priv);
err_destroy_flow_steering: err_destroy_flow_steering:
mlx5e_destroy_flow_steering(priv); mlx5e_destroy_flow_steering(priv);
err_destroy_xsk_tirs: err_destroy_xsk_tirs:
...@@ -5168,6 +5176,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) ...@@ -5168,6 +5176,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
{ {
mlx5e_accel_cleanup_rx(priv);
mlx5e_tc_nic_cleanup(priv); mlx5e_tc_nic_cleanup(priv);
mlx5e_destroy_flow_steering(priv); mlx5e_destroy_flow_steering(priv);
mlx5e_destroy_direct_tirs(priv, priv->xsk_tir); mlx5e_destroy_direct_tirs(priv, priv->xsk_tir);
......
...@@ -578,6 +578,30 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) ...@@ -578,6 +578,30 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
return !!err; return !!err;
} }
void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq)
{
u16 sqcc;
sqcc = sq->cc;
while (sqcc != sq->pc) {
struct mlx5e_icosq_wqe_info *wi;
u16 ci;
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
wi = &sq->db.wqe_info[ci];
sqcc += wi->num_wqebbs;
#ifdef CONFIG_MLX5_EN_TLS
switch (wi->wqe_type) {
case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
mlx5e_ktls_handle_ctx_completion(wi);
break;
}
#endif
}
sq->cc = sqcc;
}
int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
{ {
struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq); struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
...@@ -633,6 +657,13 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) ...@@ -633,6 +657,13 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
break; break;
case MLX5E_ICOSQ_WQE_NOP: case MLX5E_ICOSQ_WQE_NOP:
break; break;
#ifdef CONFIG_MLX5_EN_TLS
case MLX5E_ICOSQ_WQE_UMR_TLS:
break;
case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
mlx5e_ktls_handle_ctx_completion(wi);
break;
#endif
default: default:
netdev_WARN_ONCE(cq->channel->netdev, netdev_WARN_ONCE(cq->channel->netdev,
"Bad WQE type in ICOSQ WQE info: 0x%x\n", "Bad WQE type in ICOSQ WQE info: 0x%x\n",
...@@ -983,7 +1014,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, ...@@ -983,7 +1014,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
skb->mac_len = ETH_HLEN; skb->mac_len = ETH_HLEN;
#ifdef CONFIG_MLX5_EN_TLS #ifdef CONFIG_MLX5_EN_TLS
mlx5e_tls_handle_rx_skb(netdev, skb, &cqe_bcnt); mlx5e_tls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
#endif #endif
if (lro_num_seg > 1) { if (lro_num_seg > 1) {
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <linux/module.h> #include <linux/module.h>
#include "mlx5_core.h" #include "mlx5_core.h"
#include "../../mlxfw/mlxfw.h" #include "../../mlxfw/mlxfw.h"
#include "accel/tls.h"
enum { enum {
MCQS_IDENTIFIER_BOOT_IMG = 0x1, MCQS_IDENTIFIER_BOOT_IMG = 0x1,
...@@ -236,7 +237,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) ...@@ -236,7 +237,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err; return err;
} }
if (MLX5_CAP_GEN(dev, tls_tx)) { if (mlx5_accel_is_ktls_tx(dev) || mlx5_accel_is_ktls_rx(dev)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_TLS); err = mlx5_core_get_caps(dev, MLX5_CAP_TLS);
if (err) if (err)
return err; return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment