net/tls: narrow down the critical area of device_offload_lock
authorJakub Kicinski <jakub.kicinski@netronome.com>
Tue, 3 Sep 2019 04:31:04 +0000 (21:31 -0700)
committerDavid S. Miller <davem@davemloft.net>
Thu, 5 Sep 2019 07:49:49 +0000 (09:49 +0200)
On setsockopt path we need to hold device_offload_lock from
the moment we check netdev is up until the context is fully
ready to be added to the tls_device_list.

No need to hold it around the get_netdev_for_sock().
Change the code and remove the confusing comment.

Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: John Hurley <john.hurley@netronome.com>
Reviewed-by: Dirk van der Merwe <dirk.vandermerwe@netronome.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/tls/tls_device.c

index 2cd7318..9e1bec1 100644 (file)
@@ -935,17 +935,11 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
        if (skb)
                TCP_SKB_CB(skb)->eor = 1;
 
-       /* We support starting offload on multiple sockets
-        * concurrently, so we only need a read lock here.
-        * This lock must precede get_netdev_for_sock to prevent races between
-        * NETDEV_DOWN and setsockopt.
-        */
-       down_read(&device_offload_lock);
        netdev = get_netdev_for_sock(sk);
        if (!netdev) {
                pr_err_ratelimited("%s: netdev not found\n", __func__);
                rc = -EINVAL;
-               goto release_lock;
+               goto disable_cad;
        }
 
        if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
@@ -956,10 +950,15 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
        /* Avoid offloading if the device is down
         * We don't want to offload new flows after
         * the NETDEV_DOWN event
+        *
+        * device_offload_lock is taken in tls_devices's NETDEV_DOWN
+        * handler thus protecting from the device going down before
+        * ctx was added to tls_device_list.
         */
+       down_read(&device_offload_lock);
        if (!(netdev->flags & IFF_UP)) {
                rc = -EINVAL;
-               goto release_netdev;
+               goto release_lock;
        }
 
        ctx->priv_ctx_tx = offload_ctx;
@@ -967,9 +966,10 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
                                             &ctx->crypto_send.info,
                                             tcp_sk(sk)->write_seq);
        if (rc)
-               goto release_netdev;
+               goto release_lock;
 
        tls_device_attach(ctx, sk, netdev);
+       up_read(&device_offload_lock);
 
        /* following this assignment tls_is_sk_tx_device_offloaded
         * will return true and the context might be accessed
@@ -977,14 +977,14 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
         */
        smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
        dev_put(netdev);
-       up_read(&device_offload_lock);
 
        return 0;
 
-release_netdev:
-       dev_put(netdev);
 release_lock:
        up_read(&device_offload_lock);
+release_netdev:
+       dev_put(netdev);
+disable_cad:
        clean_acked_data_disable(inet_csk(sk));
        crypto_free_aead(offload_ctx->aead_send);
 free_rec_seq:
@@ -1008,17 +1008,10 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
        if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
                return -EOPNOTSUPP;
 
-       /* We support starting offload on multiple sockets
-        * concurrently, so we only need a read lock here.
-        * This lock must precede get_netdev_for_sock to prevent races between
-        * NETDEV_DOWN and setsockopt.
-        */
-       down_read(&device_offload_lock);
        netdev = get_netdev_for_sock(sk);
        if (!netdev) {
                pr_err_ratelimited("%s: netdev not found\n", __func__);
-               rc = -EINVAL;
-               goto release_lock;
+               return -EINVAL;
        }
 
        if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
@@ -1029,16 +1022,21 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
        /* Avoid offloading if the device is down
         * We don't want to offload new flows after
         * the NETDEV_DOWN event
+        *
+        * device_offload_lock is taken in tls_devices's NETDEV_DOWN
+        * handler thus protecting from the device going down before
+        * ctx was added to tls_device_list.
         */
+       down_read(&device_offload_lock);
        if (!(netdev->flags & IFF_UP)) {
                rc = -EINVAL;
-               goto release_netdev;
+               goto release_lock;
        }
 
        context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL);
        if (!context) {
                rc = -ENOMEM;
-               goto release_netdev;
+               goto release_lock;
        }
        context->resync_nh_reset = 1;
 
@@ -1066,10 +1064,10 @@ free_sw_resources:
        down_read(&device_offload_lock);
 release_ctx:
        ctx->priv_ctx_rx = NULL;
-release_netdev:
-       dev_put(netdev);
 release_lock:
        up_read(&device_offload_lock);
+release_netdev:
+       dev_put(netdev);
        return rc;
 }