Commit b5d9a834 authored by Dirk van der Merwe's avatar Dirk van der Merwe Committed by David S. Miller

net/tls: don't clear TX resync flag on error

Introduce a return code for the tls_dev_resync callback.

When the driver TX resync fails, kernel can retry the resync again
until it succeeds.  This prevents drivers from attempting to offload
TLS packets if the connection is known to be out of sync.

We don't worry about the RX resync since they will be retried naturally
as more encrypted records get received.
Signed-off-by: default avatarDirk van der Merwe <dirk.vandermerwe@netronome.com>
Reviewed-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 427545b3
...@@ -160,9 +160,9 @@ static void mlx5e_tls_del(struct net_device *netdev, ...@@ -160,9 +160,9 @@ static void mlx5e_tls_del(struct net_device *netdev,
direction == TLS_OFFLOAD_CTX_DIR_TX); direction == TLS_OFFLOAD_CTX_DIR_TX);
} }
static void mlx5e_tls_resync(struct net_device *netdev, struct sock *sk, static int mlx5e_tls_resync(struct net_device *netdev, struct sock *sk,
u32 seq, u8 *rcd_sn_data, u32 seq, u8 *rcd_sn_data,
enum tls_offload_ctx_dir direction) enum tls_offload_ctx_dir direction)
{ {
struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_context *tls_ctx = tls_get_ctx(sk);
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
...@@ -177,6 +177,8 @@ static void mlx5e_tls_resync(struct net_device *netdev, struct sock *sk, ...@@ -177,6 +177,8 @@ static void mlx5e_tls_resync(struct net_device *netdev, struct sock *sk,
be64_to_cpu(rcd_sn)); be64_to_cpu(rcd_sn));
mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn); mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn);
atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply); atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply);
return 0;
} }
static const struct tlsdev_ops mlx5e_tls_ops = { static const struct tlsdev_ops mlx5e_tls_ops = {
......
...@@ -403,7 +403,7 @@ nfp_net_tls_del(struct net_device *netdev, struct tls_context *tls_ctx, ...@@ -403,7 +403,7 @@ nfp_net_tls_del(struct net_device *netdev, struct tls_context *tls_ctx,
nfp_net_tls_del_fw(nn, ntls->fw_handle); nfp_net_tls_del_fw(nn, ntls->fw_handle);
} }
static void static int
nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq, nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
u8 *rcd_sn, enum tls_offload_ctx_dir direction) u8 *rcd_sn, enum tls_offload_ctx_dir direction)
{ {
...@@ -412,11 +412,12 @@ nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq, ...@@ -412,11 +412,12 @@ nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
struct nfp_crypto_req_update *req; struct nfp_crypto_req_update *req;
struct sk_buff *skb; struct sk_buff *skb;
gfp_t flags; gfp_t flags;
int err;
flags = direction == TLS_OFFLOAD_CTX_DIR_TX ? GFP_KERNEL : GFP_ATOMIC; flags = direction == TLS_OFFLOAD_CTX_DIR_TX ? GFP_KERNEL : GFP_ATOMIC;
skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), flags); skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), flags);
if (!skb) if (!skb)
return; return -ENOMEM;
ntls = tls_driver_ctx(sk, direction); ntls = tls_driver_ctx(sk, direction);
req = (void *)skb->data; req = (void *)skb->data;
...@@ -428,13 +429,17 @@ nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq, ...@@ -428,13 +429,17 @@ nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
memcpy(req->rec_no, rcd_sn, sizeof(req->rec_no)); memcpy(req->rec_no, rcd_sn, sizeof(req->rec_no));
if (direction == TLS_OFFLOAD_CTX_DIR_TX) { if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
nfp_net_tls_communicate_simple(nn, skb, "sync", err = nfp_net_tls_communicate_simple(nn, skb, "sync",
NFP_CCM_TYPE_CRYPTO_UPDATE); NFP_CCM_TYPE_CRYPTO_UPDATE);
if (err)
return err;
ntls->next_seq = seq; ntls->next_seq = seq;
} else { } else {
nfp_ccm_mbox_post(nn, skb, NFP_CCM_TYPE_CRYPTO_UPDATE, nfp_ccm_mbox_post(nn, skb, NFP_CCM_TYPE_CRYPTO_UPDATE,
sizeof(struct nfp_crypto_reply_simple)); sizeof(struct nfp_crypto_reply_simple));
} }
return 0;
} }
static const struct tlsdev_ops nfp_net_tls_ops = { static const struct tlsdev_ops nfp_net_tls_ops = {
......
...@@ -304,9 +304,9 @@ struct tlsdev_ops { ...@@ -304,9 +304,9 @@ struct tlsdev_ops {
void (*tls_dev_del)(struct net_device *netdev, void (*tls_dev_del)(struct net_device *netdev,
struct tls_context *ctx, struct tls_context *ctx,
enum tls_offload_ctx_dir direction); enum tls_offload_ctx_dir direction);
void (*tls_dev_resync)(struct net_device *netdev, int (*tls_dev_resync)(struct net_device *netdev,
struct sock *sk, u32 seq, u8 *rcd_sn, struct sock *sk, u32 seq, u8 *rcd_sn,
enum tls_offload_ctx_dir direction); enum tls_offload_ctx_dir direction);
}; };
enum tls_offload_sync_type { enum tls_offload_sync_type {
......
...@@ -214,6 +214,7 @@ static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx, ...@@ -214,6 +214,7 @@ static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
{ {
struct net_device *netdev; struct net_device *netdev;
struct sk_buff *skb; struct sk_buff *skb;
int err = 0;
u8 *rcd_sn; u8 *rcd_sn;
skb = tcp_write_queue_tail(sk); skb = tcp_write_queue_tail(sk);
...@@ -225,9 +226,12 @@ static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx, ...@@ -225,9 +226,12 @@ static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
down_read(&device_offload_lock); down_read(&device_offload_lock);
netdev = tls_ctx->netdev; netdev = tls_ctx->netdev;
if (netdev) if (netdev)
netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn, err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq,
TLS_OFFLOAD_CTX_DIR_TX); rcd_sn,
TLS_OFFLOAD_CTX_DIR_TX);
up_read(&device_offload_lock); up_read(&device_offload_lock);
if (err)
return;
clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags); clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment