Commit aec79619 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

tls: fix race between async notify and socket close

The submitting thread (one which called recvmsg/sendmsg)
may exit as soon as the async crypto handler calls complete()
so any code past that point risks touching already freed data.

Try to avoid the locking and extra flags altogether.
Have the main thread hold an extra reference, this way
we can depend solely on the atomic ref counter for
synchronization.

Don't futz with reiniting the completion, either, we are now
tightly controlling when completion fires.
Reported-by: default avatarvalis <sec@valis.email>
Fixes: 0cada332 ("net/tls: fix race condition causing kernel panic")
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
Reviewed-by: default avatarSimon Horman <horms@kernel.org>
Reviewed-by: default avatarEric Dumazet <edumazet@google.com>
Reviewed-by: default avatarSabrina Dubroca <sd@queasysnail.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c57ca512
...@@ -97,9 +97,6 @@ struct tls_sw_context_tx { ...@@ -97,9 +97,6 @@ struct tls_sw_context_tx {
struct tls_rec *open_rec; struct tls_rec *open_rec;
struct list_head tx_list; struct list_head tx_list;
atomic_t encrypt_pending; atomic_t encrypt_pending;
/* protect crypto_wait with encrypt_pending */
spinlock_t encrypt_compl_lock;
int async_notify;
u8 async_capable:1; u8 async_capable:1;
#define BIT_TX_SCHEDULED 0 #define BIT_TX_SCHEDULED 0
...@@ -136,8 +133,6 @@ struct tls_sw_context_rx { ...@@ -136,8 +133,6 @@ struct tls_sw_context_rx {
struct tls_strparser strp; struct tls_strparser strp;
atomic_t decrypt_pending; atomic_t decrypt_pending;
/* protect crypto_wait with decrypt_pending*/
spinlock_t decrypt_compl_lock;
struct sk_buff_head async_hold; struct sk_buff_head async_hold;
struct wait_queue_head wq; struct wait_queue_head wq;
}; };
......
...@@ -224,22 +224,15 @@ static void tls_decrypt_done(void *data, int err) ...@@ -224,22 +224,15 @@ static void tls_decrypt_done(void *data, int err)
kfree(aead_req); kfree(aead_req);
spin_lock_bh(&ctx->decrypt_compl_lock); if (atomic_dec_and_test(&ctx->decrypt_pending))
if (!atomic_dec_return(&ctx->decrypt_pending))
complete(&ctx->async_wait.completion); complete(&ctx->async_wait.completion);
spin_unlock_bh(&ctx->decrypt_compl_lock);
} }
static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx) static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx)
{ {
int pending; if (!atomic_dec_and_test(&ctx->decrypt_pending))
spin_lock_bh(&ctx->decrypt_compl_lock);
reinit_completion(&ctx->async_wait.completion);
pending = atomic_read(&ctx->decrypt_pending);
spin_unlock_bh(&ctx->decrypt_compl_lock);
if (pending)
crypto_wait_req(-EINPROGRESS, &ctx->async_wait); crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
atomic_inc(&ctx->decrypt_pending);
return ctx->async_wait.err; return ctx->async_wait.err;
} }
...@@ -267,6 +260,7 @@ static int tls_do_decryption(struct sock *sk, ...@@ -267,6 +260,7 @@ static int tls_do_decryption(struct sock *sk,
aead_request_set_callback(aead_req, aead_request_set_callback(aead_req,
CRYPTO_TFM_REQ_MAY_BACKLOG, CRYPTO_TFM_REQ_MAY_BACKLOG,
tls_decrypt_done, aead_req); tls_decrypt_done, aead_req);
DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->decrypt_pending) < 1);
atomic_inc(&ctx->decrypt_pending); atomic_inc(&ctx->decrypt_pending);
} else { } else {
aead_request_set_callback(aead_req, aead_request_set_callback(aead_req,
...@@ -455,7 +449,6 @@ static void tls_encrypt_done(void *data, int err) ...@@ -455,7 +449,6 @@ static void tls_encrypt_done(void *data, int err)
struct sk_msg *msg_en; struct sk_msg *msg_en;
bool ready = false; bool ready = false;
struct sock *sk; struct sock *sk;
int pending;
msg_en = &rec->msg_encrypted; msg_en = &rec->msg_encrypted;
...@@ -494,12 +487,8 @@ static void tls_encrypt_done(void *data, int err) ...@@ -494,12 +487,8 @@ static void tls_encrypt_done(void *data, int err)
ready = true; ready = true;
} }
spin_lock_bh(&ctx->encrypt_compl_lock); if (atomic_dec_and_test(&ctx->encrypt_pending))
pending = atomic_dec_return(&ctx->encrypt_pending);
if (!pending && ctx->async_notify)
complete(&ctx->async_wait.completion); complete(&ctx->async_wait.completion);
spin_unlock_bh(&ctx->encrypt_compl_lock);
if (!ready) if (!ready)
return; return;
...@@ -511,22 +500,9 @@ static void tls_encrypt_done(void *data, int err) ...@@ -511,22 +500,9 @@ static void tls_encrypt_done(void *data, int err)
static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx) static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx)
{ {
int pending; if (!atomic_dec_and_test(&ctx->encrypt_pending))
spin_lock_bh(&ctx->encrypt_compl_lock);
ctx->async_notify = true;
pending = atomic_read(&ctx->encrypt_pending);
spin_unlock_bh(&ctx->encrypt_compl_lock);
if (pending)
crypto_wait_req(-EINPROGRESS, &ctx->async_wait); crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
else atomic_inc(&ctx->encrypt_pending);
reinit_completion(&ctx->async_wait.completion);
/* There can be no concurrent accesses, since we have no
* pending encrypt operations
*/
WRITE_ONCE(ctx->async_notify, false);
return ctx->async_wait.err; return ctx->async_wait.err;
} }
...@@ -577,6 +553,7 @@ static int tls_do_encryption(struct sock *sk, ...@@ -577,6 +553,7 @@ static int tls_do_encryption(struct sock *sk,
/* Add the record in tx_list */ /* Add the record in tx_list */
list_add_tail((struct list_head *)&rec->list, &ctx->tx_list); list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->encrypt_pending) < 1);
atomic_inc(&ctx->encrypt_pending); atomic_inc(&ctx->encrypt_pending);
rc = crypto_aead_encrypt(aead_req); rc = crypto_aead_encrypt(aead_req);
...@@ -2601,7 +2578,7 @@ static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct soc ...@@ -2601,7 +2578,7 @@ static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct soc
} }
crypto_init_wait(&sw_ctx_tx->async_wait); crypto_init_wait(&sw_ctx_tx->async_wait);
spin_lock_init(&sw_ctx_tx->encrypt_compl_lock); atomic_set(&sw_ctx_tx->encrypt_pending, 1);
INIT_LIST_HEAD(&sw_ctx_tx->tx_list); INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler); INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
sw_ctx_tx->tx_work.sk = sk; sw_ctx_tx->tx_work.sk = sk;
...@@ -2622,7 +2599,7 @@ static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx) ...@@ -2622,7 +2599,7 @@ static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx)
} }
crypto_init_wait(&sw_ctx_rx->async_wait); crypto_init_wait(&sw_ctx_rx->async_wait);
spin_lock_init(&sw_ctx_rx->decrypt_compl_lock); atomic_set(&sw_ctx_rx->decrypt_pending, 1);
init_waitqueue_head(&sw_ctx_rx->wq); init_waitqueue_head(&sw_ctx_rx->wq);
skb_queue_head_init(&sw_ctx_rx->rx_list); skb_queue_head_init(&sw_ctx_rx->rx_list);
skb_queue_head_init(&sw_ctx_rx->async_hold); skb_queue_head_init(&sw_ctx_rx->async_hold);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment