Merge remote-tracking branch 'torvalds/master' into perf/core
[linux-2.6-microblaze.git] / drivers / net / wireguard / peer.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4  */
5
6 #include "peer.h"
7 #include "device.h"
8 #include "queueing.h"
9 #include "timers.h"
10 #include "peerlookup.h"
11 #include "noise.h"
12
13 #include <linux/kref.h>
14 #include <linux/lockdep.h>
15 #include <linux/rcupdate.h>
16 #include <linux/list.h>
17
18 static struct kmem_cache *peer_cache;
19 static atomic64_t peer_counter = ATOMIC64_INIT(0);
20
21 struct wg_peer *wg_peer_create(struct wg_device *wg,
22                                const u8 public_key[NOISE_PUBLIC_KEY_LEN],
23                                const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN])
24 {
25         struct wg_peer *peer;
26         int ret = -ENOMEM;
27
28         lockdep_assert_held(&wg->device_update_lock);
29
30         if (wg->num_peers >= MAX_PEERS_PER_DEVICE)
31                 return ERR_PTR(ret);
32
33         peer = kmem_cache_zalloc(peer_cache, GFP_KERNEL);
34         if (unlikely(!peer))
35                 return ERR_PTR(ret);
36         if (unlikely(dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)))
37                 goto err;
38
39         peer->device = wg;
40         wg_noise_handshake_init(&peer->handshake, &wg->static_identity,
41                                 public_key, preshared_key, peer);
42         peer->internal_id = atomic64_inc_return(&peer_counter);
43         peer->serial_work_cpu = nr_cpumask_bits;
44         wg_cookie_init(&peer->latest_cookie);
45         wg_timers_init(peer);
46         wg_cookie_checker_precompute_peer_keys(peer);
47         spin_lock_init(&peer->keypairs.keypair_update_lock);
48         INIT_WORK(&peer->transmit_handshake_work, wg_packet_handshake_send_worker);
49         INIT_WORK(&peer->transmit_packet_work, wg_packet_tx_worker);
50         wg_prev_queue_init(&peer->tx_queue);
51         wg_prev_queue_init(&peer->rx_queue);
52         rwlock_init(&peer->endpoint_lock);
53         kref_init(&peer->refcount);
54         skb_queue_head_init(&peer->staged_packet_queue);
55         wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
56         set_bit(NAPI_STATE_NO_BUSY_POLL, &peer->napi.state);
57         netif_napi_add(wg->dev, &peer->napi, wg_packet_rx_poll,
58                        NAPI_POLL_WEIGHT);
59         napi_enable(&peer->napi);
60         list_add_tail(&peer->peer_list, &wg->peer_list);
61         INIT_LIST_HEAD(&peer->allowedips_list);
62         wg_pubkey_hashtable_add(wg->peer_hashtable, peer);
63         ++wg->num_peers;
64         pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id);
65         return peer;
66
67 err:
68         kmem_cache_free(peer_cache, peer);
69         return ERR_PTR(ret);
70 }
71
72 struct wg_peer *wg_peer_get_maybe_zero(struct wg_peer *peer)
73 {
74         RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(),
75                          "Taking peer reference without holding the RCU read lock");
76         if (unlikely(!peer || !kref_get_unless_zero(&peer->refcount)))
77                 return NULL;
78         return peer;
79 }
80
81 static void peer_make_dead(struct wg_peer *peer)
82 {
83         /* Remove from configuration-time lookup structures. */
84         list_del_init(&peer->peer_list);
85         wg_allowedips_remove_by_peer(&peer->device->peer_allowedips, peer,
86                                      &peer->device->device_update_lock);
87         wg_pubkey_hashtable_remove(peer->device->peer_hashtable, peer);
88
89         /* Mark as dead, so that we don't allow jumping contexts after. */
90         WRITE_ONCE(peer->is_dead, true);
91
92         /* The caller must now synchronize_net() for this to take effect. */
93 }
94
95 static void peer_remove_after_dead(struct wg_peer *peer)
96 {
97         WARN_ON(!peer->is_dead);
98
99         /* No more keypairs can be created for this peer, since is_dead protects
100          * add_new_keypair, so we can now destroy existing ones.
101          */
102         wg_noise_keypairs_clear(&peer->keypairs);
103
104         /* Destroy all ongoing timers that were in-flight at the beginning of
105          * this function.
106          */
107         wg_timers_stop(peer);
108
109         /* The transition between packet encryption/decryption queues isn't
110          * guarded by is_dead, but each reference's life is strictly bounded by
111          * two generations: once for parallel crypto and once for serial
112          * ingestion, so we can simply flush twice, and be sure that we no
113          * longer have references inside these queues.
114          */
115
116         /* a) For encrypt/decrypt. */
117         flush_workqueue(peer->device->packet_crypt_wq);
118         /* b.1) For send (but not receive, since that's napi). */
119         flush_workqueue(peer->device->packet_crypt_wq);
120         /* b.2.1) For receive (but not send, since that's wq). */
121         napi_disable(&peer->napi);
122         /* b.2.1) It's now safe to remove the napi struct, which must be done
123          * here from process context.
124          */
125         netif_napi_del(&peer->napi);
126
127         /* Ensure any workstructs we own (like transmit_handshake_work or
128          * clear_peer_work) no longer are in use.
129          */
130         flush_workqueue(peer->device->handshake_send_wq);
131
132         /* After the above flushes, a peer might still be active in a few
133          * different contexts: 1) from xmit(), before hitting is_dead and
134          * returning, 2) from wg_packet_consume_data(), before hitting is_dead
135          * and returning, 3) from wg_receive_handshake_packet() after a point
136          * where it has processed an incoming handshake packet, but where
137          * all calls to pass it off to timers fails because of is_dead. We won't
138          * have new references in (1) eventually, because we're removed from
139          * allowedips; we won't have new references in (2) eventually, because
140          * wg_index_hashtable_lookup will always return NULL, since we removed
141          * all existing keypairs and no more can be created; we won't have new
142          * references in (3) eventually, because we're removed from the pubkey
143          * hash table, which allows for a maximum of one handshake response,
144          * via the still-uncleared index hashtable entry, but not more than one,
145          * and in wg_cookie_message_consume, the lookup eventually gets a peer
146          * with a refcount of zero, so no new reference is taken.
147          */
148
149         --peer->device->num_peers;
150         wg_peer_put(peer);
151 }
152
153 /* We have a separate "remove" function make sure that all active places where
154  * a peer is currently operating will eventually come to an end and not pass
155  * their reference onto another context.
156  */
157 void wg_peer_remove(struct wg_peer *peer)
158 {
159         if (unlikely(!peer))
160                 return;
161         lockdep_assert_held(&peer->device->device_update_lock);
162
163         peer_make_dead(peer);
164         synchronize_net();
165         peer_remove_after_dead(peer);
166 }
167
168 void wg_peer_remove_all(struct wg_device *wg)
169 {
170         struct wg_peer *peer, *temp;
171         LIST_HEAD(dead_peers);
172
173         lockdep_assert_held(&wg->device_update_lock);
174
175         /* Avoid having to traverse individually for each one. */
176         wg_allowedips_free(&wg->peer_allowedips, &wg->device_update_lock);
177
178         list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list) {
179                 peer_make_dead(peer);
180                 list_add_tail(&peer->peer_list, &dead_peers);
181         }
182         synchronize_net();
183         list_for_each_entry_safe(peer, temp, &dead_peers, peer_list)
184                 peer_remove_after_dead(peer);
185 }
186
187 static void rcu_release(struct rcu_head *rcu)
188 {
189         struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu);
190
191         dst_cache_destroy(&peer->endpoint_cache);
192         WARN_ON(wg_prev_queue_peek(&peer->tx_queue) || wg_prev_queue_peek(&peer->rx_queue));
193
194         /* The final zeroing takes care of clearing any remaining handshake key
195          * material and other potentially sensitive information.
196          */
197         memzero_explicit(peer, sizeof(*peer));
198         kmem_cache_free(peer_cache, peer);
199 }
200
201 static void kref_release(struct kref *refcount)
202 {
203         struct wg_peer *peer = container_of(refcount, struct wg_peer, refcount);
204
205         pr_debug("%s: Peer %llu (%pISpfsc) destroyed\n",
206                  peer->device->dev->name, peer->internal_id,
207                  &peer->endpoint.addr);
208
209         /* Remove ourself from dynamic runtime lookup structures, now that the
210          * last reference is gone.
211          */
212         wg_index_hashtable_remove(peer->device->index_hashtable,
213                                   &peer->handshake.entry);
214
215         /* Remove any lingering packets that didn't have a chance to be
216          * transmitted.
217          */
218         wg_packet_purge_staged_packets(peer);
219
220         /* Free the memory used. */
221         call_rcu(&peer->rcu, rcu_release);
222 }
223
224 void wg_peer_put(struct wg_peer *peer)
225 {
226         if (unlikely(!peer))
227                 return;
228         kref_put(&peer->refcount, kref_release);
229 }
230
231 int __init wg_peer_init(void)
232 {
233         peer_cache = KMEM_CACHE(wg_peer, 0);
234         return peer_cache ? 0 : -ENOMEM;
235 }
236
237 void wg_peer_uninit(void)
238 {
239         kmem_cache_destroy(peer_cache);
240 }