Commit 3544c98a authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

net/tls: narrow down the critical area of device_offload_lock

On setsockopt path we need to hold device_offload_lock from
the moment we check netdev is up until the context is fully
ready to be added to the tls_device_list.

No need to hold it around the get_netdev_for_sock().
Change the code and remove the confusing comment.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: default avatarJohn Hurley <john.hurley@netronome.com>
Reviewed-by: default avatarDirk van der Merwe <dirk.vandermerwe@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 90962b48
...@@ -935,17 +935,11 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) ...@@ -935,17 +935,11 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
if (skb) if (skb)
TCP_SKB_CB(skb)->eor = 1; TCP_SKB_CB(skb)->eor = 1;
/* We support starting offload on multiple sockets
* concurrently, so we only need a read lock here.
* This lock must precede get_netdev_for_sock to prevent races between
* NETDEV_DOWN and setsockopt.
*/
down_read(&device_offload_lock);
netdev = get_netdev_for_sock(sk); netdev = get_netdev_for_sock(sk);
if (!netdev) { if (!netdev) {
pr_err_ratelimited("%s: netdev not found\n", __func__); pr_err_ratelimited("%s: netdev not found\n", __func__);
rc = -EINVAL; rc = -EINVAL;
goto release_lock; goto disable_cad;
} }
if (!(netdev->features & NETIF_F_HW_TLS_TX)) { if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
...@@ -956,10 +950,15 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) ...@@ -956,10 +950,15 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
/* Avoid offloading if the device is down /* Avoid offloading if the device is down
* We don't want to offload new flows after * We don't want to offload new flows after
* the NETDEV_DOWN event * the NETDEV_DOWN event
*
* device_offload_lock is taken in tls_devices's NETDEV_DOWN
* handler thus protecting from the device going down before
* ctx was added to tls_device_list.
*/ */
down_read(&device_offload_lock);
if (!(netdev->flags & IFF_UP)) { if (!(netdev->flags & IFF_UP)) {
rc = -EINVAL; rc = -EINVAL;
goto release_netdev; goto release_lock;
} }
ctx->priv_ctx_tx = offload_ctx; ctx->priv_ctx_tx = offload_ctx;
...@@ -967,9 +966,10 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) ...@@ -967,9 +966,10 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
&ctx->crypto_send.info, &ctx->crypto_send.info,
tcp_sk(sk)->write_seq); tcp_sk(sk)->write_seq);
if (rc) if (rc)
goto release_netdev; goto release_lock;
tls_device_attach(ctx, sk, netdev); tls_device_attach(ctx, sk, netdev);
up_read(&device_offload_lock);
/* following this assignment tls_is_sk_tx_device_offloaded /* following this assignment tls_is_sk_tx_device_offloaded
* will return true and the context might be accessed * will return true and the context might be accessed
...@@ -977,14 +977,14 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) ...@@ -977,14 +977,14 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
*/ */
smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb); smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
dev_put(netdev); dev_put(netdev);
up_read(&device_offload_lock);
return 0; return 0;
release_netdev:
dev_put(netdev);
release_lock: release_lock:
up_read(&device_offload_lock); up_read(&device_offload_lock);
release_netdev:
dev_put(netdev);
disable_cad:
clean_acked_data_disable(inet_csk(sk)); clean_acked_data_disable(inet_csk(sk));
crypto_free_aead(offload_ctx->aead_send); crypto_free_aead(offload_ctx->aead_send);
free_rec_seq: free_rec_seq:
...@@ -1008,17 +1008,10 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) ...@@ -1008,17 +1008,10 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
if (ctx->crypto_recv.info.version != TLS_1_2_VERSION) if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* We support starting offload on multiple sockets
* concurrently, so we only need a read lock here.
* This lock must precede get_netdev_for_sock to prevent races between
* NETDEV_DOWN and setsockopt.
*/
down_read(&device_offload_lock);
netdev = get_netdev_for_sock(sk); netdev = get_netdev_for_sock(sk);
if (!netdev) { if (!netdev) {
pr_err_ratelimited("%s: netdev not found\n", __func__); pr_err_ratelimited("%s: netdev not found\n", __func__);
rc = -EINVAL; return -EINVAL;
goto release_lock;
} }
if (!(netdev->features & NETIF_F_HW_TLS_RX)) { if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
...@@ -1029,16 +1022,21 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) ...@@ -1029,16 +1022,21 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
/* Avoid offloading if the device is down /* Avoid offloading if the device is down
* We don't want to offload new flows after * We don't want to offload new flows after
* the NETDEV_DOWN event * the NETDEV_DOWN event
*
* device_offload_lock is taken in tls_devices's NETDEV_DOWN
* handler thus protecting from the device going down before
* ctx was added to tls_device_list.
*/ */
down_read(&device_offload_lock);
if (!(netdev->flags & IFF_UP)) { if (!(netdev->flags & IFF_UP)) {
rc = -EINVAL; rc = -EINVAL;
goto release_netdev; goto release_lock;
} }
context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL); context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL);
if (!context) { if (!context) {
rc = -ENOMEM; rc = -ENOMEM;
goto release_netdev; goto release_lock;
} }
context->resync_nh_reset = 1; context->resync_nh_reset = 1;
...@@ -1066,10 +1064,10 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) ...@@ -1066,10 +1064,10 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
down_read(&device_offload_lock); down_read(&device_offload_lock);
release_ctx: release_ctx:
ctx->priv_ctx_rx = NULL; ctx->priv_ctx_rx = NULL;
release_netdev:
dev_put(netdev);
release_lock: release_lock:
up_read(&device_offload_lock); up_read(&device_offload_lock);
release_netdev:
dev_put(netdev);
return rc; return rc;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment