Commit 9749d62b authored by Jakub Kicinski's avatar Jakub Kicinski Committed by Greg Kroah-Hartman

net/tls: replace the sleeping lock around RX resync with a bit lock

[ Upstream commit e52972c1 ]

Commit 38030d7c ("net/tls: avoid NULL-deref on resync during device removal")
tried to fix a potential NULL-dereference by taking the
context rwsem.  Unfortunately the RX resync may get called
from soft IRQ, so we can't use the rwsem to protect from
the device disappearing.  Because we are guaranteed there
can be only one resync at a time (it's called from strparser)
use a bit to indicate resync is busy and make device
removal wait for the bit to get cleared.

Note that there is a leftover "flags" field in struct
tls_context already.

Fixes: 4799ac81 ("tls: Add rx inline crypto offload")
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent e2465055
...@@ -199,6 +199,10 @@ struct tls_offload_context_tx { ...@@ -199,6 +199,10 @@ struct tls_offload_context_tx {
(ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \ (ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \
TLS_DRIVER_STATE_SIZE) TLS_DRIVER_STATE_SIZE)
enum tls_context_flags {
TLS_RX_SYNC_RUNNING = 0,
};
struct cipher_context { struct cipher_context {
char *iv; char *iv;
char *rec_seq; char *rec_seq;
......
...@@ -570,10 +570,22 @@ void tls_device_write_space(struct sock *sk, struct tls_context *ctx) ...@@ -570,10 +570,22 @@ void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
} }
} }
static void tls_device_resync_rx(struct tls_context *tls_ctx,
struct sock *sk, u32 seq, u64 rcd_sn)
{
struct net_device *netdev;
if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
return;
netdev = READ_ONCE(tls_ctx->netdev);
if (netdev)
netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, seq, rcd_sn);
clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
}
void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn) void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
{ {
struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_context *tls_ctx = tls_get_ctx(sk);
struct net_device *netdev = tls_ctx->netdev;
struct tls_offload_context_rx *rx_ctx; struct tls_offload_context_rx *rx_ctx;
u32 is_req_pending; u32 is_req_pending;
s64 resync_req; s64 resync_req;
...@@ -588,10 +600,10 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn) ...@@ -588,10 +600,10 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
is_req_pending = resync_req; is_req_pending = resync_req;
if (unlikely(is_req_pending) && req_seq == seq && if (unlikely(is_req_pending) && req_seq == seq &&
atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) {
netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, seq += TLS_HEADER_SIZE - 1;
seq + TLS_HEADER_SIZE - 1, tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
rcd_sn); }
} }
static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb) static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
...@@ -981,7 +993,10 @@ static int tls_device_down(struct net_device *netdev) ...@@ -981,7 +993,10 @@ static int tls_device_down(struct net_device *netdev)
if (ctx->rx_conf == TLS_HW) if (ctx->rx_conf == TLS_HW)
netdev->tlsdev_ops->tls_dev_del(netdev, ctx, netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
TLS_OFFLOAD_CTX_DIR_RX); TLS_OFFLOAD_CTX_DIR_RX);
ctx->netdev = NULL; WRITE_ONCE(ctx->netdev, NULL);
smp_mb__before_atomic(); /* pairs with test_and_set_bit() */
while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
usleep_range(10, 200);
dev_put(netdev); dev_put(netdev);
list_del_init(&ctx->list); list_del_init(&ctx->list);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment