Merge tag 'for-5.12-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[linux-2.6-microblaze.git] / drivers / net / wireguard / peer.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4  */
5
6 #include "peer.h"
7 #include "device.h"
8 #include "queueing.h"
9 #include "timers.h"
10 #include "peerlookup.h"
11 #include "noise.h"
12
13 #include <linux/kref.h>
14 #include <linux/lockdep.h>
15 #include <linux/rcupdate.h>
16 #include <linux/list.h>
17
18 static atomic64_t peer_counter = ATOMIC64_INIT(0);
19
20 struct wg_peer *wg_peer_create(struct wg_device *wg,
21                                const u8 public_key[NOISE_PUBLIC_KEY_LEN],
22                                const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN])
23 {
24         struct wg_peer *peer;
25         int ret = -ENOMEM;
26
27         lockdep_assert_held(&wg->device_update_lock);
28
29         if (wg->num_peers >= MAX_PEERS_PER_DEVICE)
30                 return ERR_PTR(ret);
31
32         peer = kzalloc(sizeof(*peer), GFP_KERNEL);
33         if (unlikely(!peer))
34                 return ERR_PTR(ret);
35         if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
36                 goto err;
37
38         peer->device = wg;
39         wg_noise_handshake_init(&peer->handshake, &wg->static_identity,
40                                 public_key, preshared_key, peer);
41         peer->internal_id = atomic64_inc_return(&peer_counter);
42         peer->serial_work_cpu = nr_cpumask_bits;
43         wg_cookie_init(&peer->latest_cookie);
44         wg_timers_init(peer);
45         wg_cookie_checker_precompute_peer_keys(peer);
46         spin_lock_init(&peer->keypairs.keypair_update_lock);
47         INIT_WORK(&peer->transmit_handshake_work, wg_packet_handshake_send_worker);
48         INIT_WORK(&peer->transmit_packet_work, wg_packet_tx_worker);
49         wg_prev_queue_init(&peer->tx_queue);
50         wg_prev_queue_init(&peer->rx_queue);
51         rwlock_init(&peer->endpoint_lock);
52         kref_init(&peer->refcount);
53         skb_queue_head_init(&peer->staged_packet_queue);
54         wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
55         set_bit(NAPI_STATE_NO_BUSY_POLL, &peer->napi.state);
56         netif_napi_add(wg->dev, &peer->napi, wg_packet_rx_poll,
57                        NAPI_POLL_WEIGHT);
58         napi_enable(&peer->napi);
59         list_add_tail(&peer->peer_list, &wg->peer_list);
60         INIT_LIST_HEAD(&peer->allowedips_list);
61         wg_pubkey_hashtable_add(wg->peer_hashtable, peer);
62         ++wg->num_peers;
63         pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id);
64         return peer;
65
66 err:
67         kfree(peer);
68         return ERR_PTR(ret);
69 }
70
71 struct wg_peer *wg_peer_get_maybe_zero(struct wg_peer *peer)
72 {
73         RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(),
74                          "Taking peer reference without holding the RCU read lock");
75         if (unlikely(!peer || !kref_get_unless_zero(&peer->refcount)))
76                 return NULL;
77         return peer;
78 }
79
80 static void peer_make_dead(struct wg_peer *peer)
81 {
82         /* Remove from configuration-time lookup structures. */
83         list_del_init(&peer->peer_list);
84         wg_allowedips_remove_by_peer(&peer->device->peer_allowedips, peer,
85                                      &peer->device->device_update_lock);
86         wg_pubkey_hashtable_remove(peer->device->peer_hashtable, peer);
87
88         /* Mark as dead, so that we don't allow jumping contexts after. */
89         WRITE_ONCE(peer->is_dead, true);
90
91         /* The caller must now synchronize_rcu() for this to take effect. */
92 }
93
94 static void peer_remove_after_dead(struct wg_peer *peer)
95 {
96         WARN_ON(!peer->is_dead);
97
98         /* No more keypairs can be created for this peer, since is_dead protects
99          * add_new_keypair, so we can now destroy existing ones.
100          */
101         wg_noise_keypairs_clear(&peer->keypairs);
102
103         /* Destroy all ongoing timers that were in-flight at the beginning of
104          * this function.
105          */
106         wg_timers_stop(peer);
107
108         /* The transition between packet encryption/decryption queues isn't
109          * guarded by is_dead, but each reference's life is strictly bounded by
110          * two generations: once for parallel crypto and once for serial
111          * ingestion, so we can simply flush twice, and be sure that we no
112          * longer have references inside these queues.
113          */
114
115         /* a) For encrypt/decrypt. */
116         flush_workqueue(peer->device->packet_crypt_wq);
117         /* b.1) For send (but not receive, since that's napi). */
118         flush_workqueue(peer->device->packet_crypt_wq);
119         /* b.2.1) For receive (but not send, since that's wq). */
120         napi_disable(&peer->napi);
121         /* b.2.1) It's now safe to remove the napi struct, which must be done
122          * here from process context.
123          */
124         netif_napi_del(&peer->napi);
125
126         /* Ensure any workstructs we own (like transmit_handshake_work or
127          * clear_peer_work) no longer are in use.
128          */
129         flush_workqueue(peer->device->handshake_send_wq);
130
131         /* After the above flushes, a peer might still be active in a few
132          * different contexts: 1) from xmit(), before hitting is_dead and
133          * returning, 2) from wg_packet_consume_data(), before hitting is_dead
134          * and returning, 3) from wg_receive_handshake_packet() after a point
135          * where it has processed an incoming handshake packet, but where
136          * all calls to pass it off to timers fails because of is_dead. We won't
137          * have new references in (1) eventually, because we're removed from
138          * allowedips; we won't have new references in (2) eventually, because
139          * wg_index_hashtable_lookup will always return NULL, since we removed
140          * all existing keypairs and no more can be created; we won't have new
141          * references in (3) eventually, because we're removed from the pubkey
142          * hash table, which allows for a maximum of one handshake response,
143          * via the still-uncleared index hashtable entry, but not more than one,
144          * and in wg_cookie_message_consume, the lookup eventually gets a peer
145          * with a refcount of zero, so no new reference is taken.
146          */
147
148         --peer->device->num_peers;
149         wg_peer_put(peer);
150 }
151
152 /* We have a separate "remove" function make sure that all active places where
153  * a peer is currently operating will eventually come to an end and not pass
154  * their reference onto another context.
155  */
156 void wg_peer_remove(struct wg_peer *peer)
157 {
158         if (unlikely(!peer))
159                 return;
160         lockdep_assert_held(&peer->device->device_update_lock);
161
162         peer_make_dead(peer);
163         synchronize_rcu();
164         peer_remove_after_dead(peer);
165 }
166
167 void wg_peer_remove_all(struct wg_device *wg)
168 {
169         struct wg_peer *peer, *temp;
170         LIST_HEAD(dead_peers);
171
172         lockdep_assert_held(&wg->device_update_lock);
173
174         /* Avoid having to traverse individually for each one. */
175         wg_allowedips_free(&wg->peer_allowedips, &wg->device_update_lock);
176
177         list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list) {
178                 peer_make_dead(peer);
179                 list_add_tail(&peer->peer_list, &dead_peers);
180         }
181         synchronize_rcu();
182         list_for_each_entry_safe(peer, temp, &dead_peers, peer_list)
183                 peer_remove_after_dead(peer);
184 }
185
186 static void rcu_release(struct rcu_head *rcu)
187 {
188         struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu);
189
190         dst_cache_destroy(&peer->endpoint_cache);
191         WARN_ON(wg_prev_queue_peek(&peer->tx_queue) || wg_prev_queue_peek(&peer->rx_queue));
192
193         /* The final zeroing takes care of clearing any remaining handshake key
194          * material and other potentially sensitive information.
195          */
196         kfree_sensitive(peer);
197 }
198
199 static void kref_release(struct kref *refcount)
200 {
201         struct wg_peer *peer = container_of(refcount, struct wg_peer, refcount);
202
203         pr_debug("%s: Peer %llu (%pISpfsc) destroyed\n",
204                  peer->device->dev->name, peer->internal_id,
205                  &peer->endpoint.addr);
206
207         /* Remove ourself from dynamic runtime lookup structures, now that the
208          * last reference is gone.
209          */
210         wg_index_hashtable_remove(peer->device->index_hashtable,
211                                   &peer->handshake.entry);
212
213         /* Remove any lingering packets that didn't have a chance to be
214          * transmitted.
215          */
216         wg_packet_purge_staged_packets(peer);
217
218         /* Free the memory used. */
219         call_rcu(&peer->rcu, rcu_release);
220 }
221
222 void wg_peer_put(struct wg_peer *peer)
223 {
224         if (unlikely(!peer))
225                 return;
226         kref_put(&peer->refcount, kref_release);
227 }