arm64: zynqmp: Make zynqmp_firmware driver optional
[linux-2.6-microblaze.git] / drivers / net / wireguard / send.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4  */
5
6 #include "queueing.h"
7 #include "timers.h"
8 #include "device.h"
9 #include "peer.h"
10 #include "socket.h"
11 #include "messages.h"
12 #include "cookie.h"
13
14 #include <linux/uio.h>
15 #include <linux/inetdevice.h>
16 #include <linux/socket.h>
17 #include <net/ip_tunnels.h>
18 #include <net/udp.h>
19 #include <net/sock.h>
20
21 static void wg_packet_send_handshake_initiation(struct wg_peer *peer)
22 {
23         struct message_handshake_initiation packet;
24
25         if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake),
26                                       REKEY_TIMEOUT))
27                 return; /* This function is rate limited. */
28
29         atomic64_set(&peer->last_sent_handshake, ktime_get_coarse_boottime_ns());
30         net_dbg_ratelimited("%s: Sending handshake initiation to peer %llu (%pISpfsc)\n",
31                             peer->device->dev->name, peer->internal_id,
32                             &peer->endpoint.addr);
33
34         if (wg_noise_handshake_create_initiation(&packet, &peer->handshake)) {
35                 wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer);
36                 wg_timers_any_authenticated_packet_traversal(peer);
37                 wg_timers_any_authenticated_packet_sent(peer);
38                 atomic64_set(&peer->last_sent_handshake,
39                              ktime_get_coarse_boottime_ns());
40                 wg_socket_send_buffer_to_peer(peer, &packet, sizeof(packet),
41                                               HANDSHAKE_DSCP);
42                 wg_timers_handshake_initiated(peer);
43         }
44 }
45
46 void wg_packet_handshake_send_worker(struct work_struct *work)
47 {
48         struct wg_peer *peer = container_of(work, struct wg_peer,
49                                             transmit_handshake_work);
50
51         wg_packet_send_handshake_initiation(peer);
52         wg_peer_put(peer);
53 }
54
55 void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer,
56                                                 bool is_retry)
57 {
58         if (!is_retry)
59                 peer->timer_handshake_attempts = 0;
60
61         rcu_read_lock_bh();
62         /* We check last_sent_handshake here in addition to the actual function
63          * we're queueing up, so that we don't queue things if not strictly
64          * necessary:
65          */
66         if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake),
67                                       REKEY_TIMEOUT) ||
68                         unlikely(READ_ONCE(peer->is_dead)))
69                 goto out;
70
71         wg_peer_get(peer);
72         /* Queues up calling packet_send_queued_handshakes(peer), where we do a
73          * peer_put(peer) after:
74          */
75         if (!queue_work(peer->device->handshake_send_wq,
76                         &peer->transmit_handshake_work))
77                 /* If the work was already queued, we want to drop the
78                  * extra reference:
79                  */
80                 wg_peer_put(peer);
81 out:
82         rcu_read_unlock_bh();
83 }
84
85 void wg_packet_send_handshake_response(struct wg_peer *peer)
86 {
87         struct message_handshake_response packet;
88
89         atomic64_set(&peer->last_sent_handshake, ktime_get_coarse_boottime_ns());
90         net_dbg_ratelimited("%s: Sending handshake response to peer %llu (%pISpfsc)\n",
91                             peer->device->dev->name, peer->internal_id,
92                             &peer->endpoint.addr);
93
94         if (wg_noise_handshake_create_response(&packet, &peer->handshake)) {
95                 wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer);
96                 if (wg_noise_handshake_begin_session(&peer->handshake,
97                                                      &peer->keypairs)) {
98                         wg_timers_session_derived(peer);
99                         wg_timers_any_authenticated_packet_traversal(peer);
100                         wg_timers_any_authenticated_packet_sent(peer);
101                         atomic64_set(&peer->last_sent_handshake,
102                                      ktime_get_coarse_boottime_ns());
103                         wg_socket_send_buffer_to_peer(peer, &packet,
104                                                       sizeof(packet),
105                                                       HANDSHAKE_DSCP);
106                 }
107         }
108 }
109
110 void wg_packet_send_handshake_cookie(struct wg_device *wg,
111                                      struct sk_buff *initiating_skb,
112                                      __le32 sender_index)
113 {
114         struct message_handshake_cookie packet;
115
116         net_dbg_skb_ratelimited("%s: Sending cookie response for denied handshake message for %pISpfsc\n",
117                                 wg->dev->name, initiating_skb);
118         wg_cookie_message_create(&packet, initiating_skb, sender_index,
119                                  &wg->cookie_checker);
120         wg_socket_send_buffer_as_reply_to_skb(wg, initiating_skb, &packet,
121                                               sizeof(packet));
122 }
123
124 static void keep_key_fresh(struct wg_peer *peer)
125 {
126         struct noise_keypair *keypair;
127         bool send = false;
128
129         rcu_read_lock_bh();
130         keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
131         if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) &&
132             (unlikely(atomic64_read(&keypair->sending.counter.counter) >
133                       REKEY_AFTER_MESSAGES) ||
134              (keypair->i_am_the_initiator &&
135               unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
136                                                 REKEY_AFTER_TIME)))))
137                 send = true;
138         rcu_read_unlock_bh();
139
140         if (send)
141                 wg_packet_send_queued_handshake_initiation(peer, false);
142 }
143
144 static unsigned int calculate_skb_padding(struct sk_buff *skb)
145 {
146         /* We do this modulo business with the MTU, just in case the networking
147          * layer gives us a packet that's bigger than the MTU. In that case, we
148          * wouldn't want the final subtraction to overflow in the case of the
149          * padded_size being clamped.
150          */
151         unsigned int last_unit = skb->len % PACKET_CB(skb)->mtu;
152         unsigned int padded_size = ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE);
153
154         if (padded_size > PACKET_CB(skb)->mtu)
155                 padded_size = PACKET_CB(skb)->mtu;
156         return padded_size - last_unit;
157 }
158
159 static bool encrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
160 {
161         unsigned int padding_len, plaintext_len, trailer_len;
162         struct scatterlist sg[MAX_SKB_FRAGS + 8];
163         struct message_data *header;
164         struct sk_buff *trailer;
165         int num_frags;
166
167         /* Calculate lengths. */
168         padding_len = calculate_skb_padding(skb);
169         trailer_len = padding_len + noise_encrypted_len(0);
170         plaintext_len = skb->len + padding_len;
171
172         /* Expand data section to have room for padding and auth tag. */
173         num_frags = skb_cow_data(skb, trailer_len, &trailer);
174         if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg)))
175                 return false;
176
177         /* Set the padding to zeros, and make sure it and the auth tag are part
178          * of the skb.
179          */
180         memset(skb_tail_pointer(trailer), 0, padding_len);
181
182         /* Expand head section to have room for our header and the network
183          * stack's headers.
184          */
185         if (unlikely(skb_cow_head(skb, DATA_PACKET_HEAD_ROOM) < 0))
186                 return false;
187
188         /* Finalize checksum calculation for the inner packet, if required. */
189         if (unlikely(skb->ip_summed == CHECKSUM_PARTIAL &&
190                      skb_checksum_help(skb)))
191                 return false;
192
193         /* Only after checksumming can we safely add on the padding at the end
194          * and the header.
195          */
196         skb_set_inner_network_header(skb, 0);
197         header = (struct message_data *)skb_push(skb, sizeof(*header));
198         header->header.type = cpu_to_le32(MESSAGE_DATA);
199         header->key_idx = keypair->remote_index;
200         header->counter = cpu_to_le64(PACKET_CB(skb)->nonce);
201         pskb_put(skb, trailer, trailer_len);
202
203         /* Now we can encrypt the scattergather segments */
204         sg_init_table(sg, num_frags);
205         if (skb_to_sgvec(skb, sg, sizeof(struct message_data),
206                          noise_encrypted_len(plaintext_len)) <= 0)
207                 return false;
208         return chacha20poly1305_encrypt_sg_inplace(sg, plaintext_len, NULL, 0,
209                                                    PACKET_CB(skb)->nonce,
210                                                    keypair->sending.key);
211 }
212
213 void wg_packet_send_keepalive(struct wg_peer *peer)
214 {
215         struct sk_buff *skb;
216
217         if (skb_queue_empty(&peer->staged_packet_queue)) {
218                 skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH,
219                                 GFP_ATOMIC);
220                 if (unlikely(!skb))
221                         return;
222                 skb_reserve(skb, DATA_PACKET_HEAD_ROOM);
223                 skb->dev = peer->device->dev;
224                 PACKET_CB(skb)->mtu = skb->dev->mtu;
225                 skb_queue_tail(&peer->staged_packet_queue, skb);
226                 net_dbg_ratelimited("%s: Sending keepalive packet to peer %llu (%pISpfsc)\n",
227                                     peer->device->dev->name, peer->internal_id,
228                                     &peer->endpoint.addr);
229         }
230
231         wg_packet_send_staged_packets(peer);
232 }
233
234 static void wg_packet_create_data_done(struct sk_buff *first,
235                                        struct wg_peer *peer)
236 {
237         struct sk_buff *skb, *next;
238         bool is_keepalive, data_sent = false;
239
240         wg_timers_any_authenticated_packet_traversal(peer);
241         wg_timers_any_authenticated_packet_sent(peer);
242         skb_list_walk_safe(first, skb, next) {
243                 is_keepalive = skb->len == message_data_len(0);
244                 if (likely(!wg_socket_send_skb_to_peer(peer, skb,
245                                 PACKET_CB(skb)->ds) && !is_keepalive))
246                         data_sent = true;
247         }
248
249         if (likely(data_sent))
250                 wg_timers_data_sent(peer);
251
252         keep_key_fresh(peer);
253 }
254
255 void wg_packet_tx_worker(struct work_struct *work)
256 {
257         struct crypt_queue *queue = container_of(work, struct crypt_queue,
258                                                  work);
259         struct noise_keypair *keypair;
260         enum packet_state state;
261         struct sk_buff *first;
262         struct wg_peer *peer;
263
264         while ((first = __ptr_ring_peek(&queue->ring)) != NULL &&
265                (state = atomic_read_acquire(&PACKET_CB(first)->state)) !=
266                        PACKET_STATE_UNCRYPTED) {
267                 __ptr_ring_discard_one(&queue->ring);
268                 peer = PACKET_PEER(first);
269                 keypair = PACKET_CB(first)->keypair;
270
271                 if (likely(state == PACKET_STATE_CRYPTED))
272                         wg_packet_create_data_done(first, peer);
273                 else
274                         kfree_skb_list(first);
275
276                 wg_noise_keypair_put(keypair, false);
277                 wg_peer_put(peer);
278         }
279 }
280
281 void wg_packet_encrypt_worker(struct work_struct *work)
282 {
283         struct crypt_queue *queue = container_of(work, struct multicore_worker,
284                                                  work)->ptr;
285         struct sk_buff *first, *skb, *next;
286
287         while ((first = ptr_ring_consume_bh(&queue->ring)) != NULL) {
288                 enum packet_state state = PACKET_STATE_CRYPTED;
289
290                 skb_list_walk_safe(first, skb, next) {
291                         if (likely(encrypt_packet(skb,
292                                         PACKET_CB(first)->keypair))) {
293                                 wg_reset_packet(skb);
294                         } else {
295                                 state = PACKET_STATE_DEAD;
296                                 break;
297                         }
298                 }
299                 wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first,
300                                           state);
301
302         }
303 }
304
305 static void wg_packet_create_data(struct sk_buff *first)
306 {
307         struct wg_peer *peer = PACKET_PEER(first);
308         struct wg_device *wg = peer->device;
309         int ret = -EINVAL;
310
311         rcu_read_lock_bh();
312         if (unlikely(READ_ONCE(peer->is_dead)))
313                 goto err;
314
315         ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue,
316                                                    &peer->tx_queue, first,
317                                                    wg->packet_crypt_wq,
318                                                    &wg->encrypt_queue.last_cpu);
319         if (unlikely(ret == -EPIPE))
320                 wg_queue_enqueue_per_peer(&peer->tx_queue, first,
321                                           PACKET_STATE_DEAD);
322 err:
323         rcu_read_unlock_bh();
324         if (likely(!ret || ret == -EPIPE))
325                 return;
326         wg_noise_keypair_put(PACKET_CB(first)->keypair, false);
327         wg_peer_put(peer);
328         kfree_skb_list(first);
329 }
330
331 void wg_packet_purge_staged_packets(struct wg_peer *peer)
332 {
333         spin_lock_bh(&peer->staged_packet_queue.lock);
334         peer->device->dev->stats.tx_dropped += peer->staged_packet_queue.qlen;
335         __skb_queue_purge(&peer->staged_packet_queue);
336         spin_unlock_bh(&peer->staged_packet_queue.lock);
337 }
338
339 void wg_packet_send_staged_packets(struct wg_peer *peer)
340 {
341         struct noise_symmetric_key *key;
342         struct noise_keypair *keypair;
343         struct sk_buff_head packets;
344         struct sk_buff *skb;
345
346         /* Steal the current queue into our local one. */
347         __skb_queue_head_init(&packets);
348         spin_lock_bh(&peer->staged_packet_queue.lock);
349         skb_queue_splice_init(&peer->staged_packet_queue, &packets);
350         spin_unlock_bh(&peer->staged_packet_queue.lock);
351         if (unlikely(skb_queue_empty(&packets)))
352                 return;
353
354         /* First we make sure we have a valid reference to a valid key. */
355         rcu_read_lock_bh();
356         keypair = wg_noise_keypair_get(
357                 rcu_dereference_bh(peer->keypairs.current_keypair));
358         rcu_read_unlock_bh();
359         if (unlikely(!keypair))
360                 goto out_nokey;
361         key = &keypair->sending;
362         if (unlikely(!READ_ONCE(key->is_valid)))
363                 goto out_nokey;
364         if (unlikely(wg_birthdate_has_expired(key->birthdate,
365                                               REJECT_AFTER_TIME)))
366                 goto out_invalid;
367
368         /* After we know we have a somewhat valid key, we now try to assign
369          * nonces to all of the packets in the queue. If we can't assign nonces
370          * for all of them, we just consider it a failure and wait for the next
371          * handshake.
372          */
373         skb_queue_walk(&packets, skb) {
374                 /* 0 for no outer TOS: no leak. TODO: at some later point, we
375                  * might consider using flowi->tos as outer instead.
376                  */
377                 PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0, ip_hdr(skb), skb);
378                 PACKET_CB(skb)->nonce =
379                                 atomic64_inc_return(&key->counter.counter) - 1;
380                 if (unlikely(PACKET_CB(skb)->nonce >= REJECT_AFTER_MESSAGES))
381                         goto out_invalid;
382         }
383
384         packets.prev->next = NULL;
385         wg_peer_get(keypair->entry.peer);
386         PACKET_CB(packets.next)->keypair = keypair;
387         wg_packet_create_data(packets.next);
388         return;
389
390 out_invalid:
391         WRITE_ONCE(key->is_valid, false);
392 out_nokey:
393         wg_noise_keypair_put(keypair, false);
394
395         /* We orphan the packets if we're waiting on a handshake, so that they
396          * don't block a socket's pool.
397          */
398         skb_queue_walk(&packets, skb)
399                 skb_orphan(skb);
400         /* Then we put them back on the top of the queue. We're not too
401          * concerned about accidentally getting things a little out of order if
402          * packets are being added really fast, because this queue is for before
403          * packets can even be sent and it's small anyway.
404          */
405         spin_lock_bh(&peer->staged_packet_queue.lock);
406         skb_queue_splice(&packets, &peer->staged_packet_queue);
407         spin_unlock_bh(&peer->staged_packet_queue.lock);
408
409         /* If we're exiting because there's something wrong with the key, it
410          * means we should initiate a new handshake.
411          */
412         wg_packet_send_queued_handshake_initiation(peer, false);
413 }