Merge tag 'drm-misc-next-fixes-2020-02-07' of git://anongit.freedesktop.org/drm/drm...
[linux-2.6-microblaze.git] / drivers / net / wireguard / peerlookup.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4  */
5
6 #include "peerlookup.h"
7 #include "peer.h"
8 #include "noise.h"
9
10 static struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table,
11                                         const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
12 {
13         /* siphash gives us a secure 64bit number based on a random key. Since
14          * the bits are uniformly distributed, we can then mask off to get the
15          * bits we need.
16          */
17         const u64 hash = siphash(pubkey, NOISE_PUBLIC_KEY_LEN, &table->key);
18
19         return &table->hashtable[hash & (HASH_SIZE(table->hashtable) - 1)];
20 }
21
22 struct pubkey_hashtable *wg_pubkey_hashtable_alloc(void)
23 {
24         struct pubkey_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL);
25
26         if (!table)
27                 return NULL;
28
29         get_random_bytes(&table->key, sizeof(table->key));
30         hash_init(table->hashtable);
31         mutex_init(&table->lock);
32         return table;
33 }
34
35 void wg_pubkey_hashtable_add(struct pubkey_hashtable *table,
36                              struct wg_peer *peer)
37 {
38         mutex_lock(&table->lock);
39         hlist_add_head_rcu(&peer->pubkey_hash,
40                            pubkey_bucket(table, peer->handshake.remote_static));
41         mutex_unlock(&table->lock);
42 }
43
44 void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table,
45                                 struct wg_peer *peer)
46 {
47         mutex_lock(&table->lock);
48         hlist_del_init_rcu(&peer->pubkey_hash);
49         mutex_unlock(&table->lock);
50 }
51
52 /* Returns a strong reference to a peer */
53 struct wg_peer *
54 wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table,
55                            const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
56 {
57         struct wg_peer *iter_peer, *peer = NULL;
58
59         rcu_read_lock_bh();
60         hlist_for_each_entry_rcu_bh(iter_peer, pubkey_bucket(table, pubkey),
61                                     pubkey_hash) {
62                 if (!memcmp(pubkey, iter_peer->handshake.remote_static,
63                             NOISE_PUBLIC_KEY_LEN)) {
64                         peer = iter_peer;
65                         break;
66                 }
67         }
68         peer = wg_peer_get_maybe_zero(peer);
69         rcu_read_unlock_bh();
70         return peer;
71 }
72
73 static struct hlist_head *index_bucket(struct index_hashtable *table,
74                                        const __le32 index)
75 {
76         /* Since the indices are random and thus all bits are uniformly
77          * distributed, we can find its bucket simply by masking.
78          */
79         return &table->hashtable[(__force u32)index &
80                                  (HASH_SIZE(table->hashtable) - 1)];
81 }
82
83 struct index_hashtable *wg_index_hashtable_alloc(void)
84 {
85         struct index_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL);
86
87         if (!table)
88                 return NULL;
89
90         hash_init(table->hashtable);
91         spin_lock_init(&table->lock);
92         return table;
93 }
94
95 /* At the moment, we limit ourselves to 2^20 total peers, which generally might
96  * amount to 2^20*3 items in this hashtable. The algorithm below works by
97  * picking a random number and testing it. We can see that these limits mean we
98  * usually succeed pretty quickly:
99  *
100  * >>> def calculation(tries, size):
101  * ...     return (size / 2**32)**(tries - 1) *  (1 - (size / 2**32))
102  * ...
103  * >>> calculation(1, 2**20 * 3)
104  * 0.999267578125
105  * >>> calculation(2, 2**20 * 3)
106  * 0.0007318854331970215
107  * >>> calculation(3, 2**20 * 3)
108  * 5.360489012673497e-07
109  * >>> calculation(4, 2**20 * 3)
110  * 3.9261394135792216e-10
111  *
112  * At the moment, we don't do any masking, so this algorithm isn't exactly
113  * constant time in either the random guessing or in the hash list lookup. We
114  * could require a minimum of 3 tries, which would successfully mask the
115  * guessing. this would not, however, help with the growing hash lengths, which
116  * is another thing to consider moving forward.
117  */
118
119 __le32 wg_index_hashtable_insert(struct index_hashtable *table,
120                                  struct index_hashtable_entry *entry)
121 {
122         struct index_hashtable_entry *existing_entry;
123
124         spin_lock_bh(&table->lock);
125         hlist_del_init_rcu(&entry->index_hash);
126         spin_unlock_bh(&table->lock);
127
128         rcu_read_lock_bh();
129
130 search_unused_slot:
131         /* First we try to find an unused slot, randomly, while unlocked. */
132         entry->index = (__force __le32)get_random_u32();
133         hlist_for_each_entry_rcu_bh(existing_entry,
134                                     index_bucket(table, entry->index),
135                                     index_hash) {
136                 if (existing_entry->index == entry->index)
137                         /* If it's already in use, we continue searching. */
138                         goto search_unused_slot;
139         }
140
141         /* Once we've found an unused slot, we lock it, and then double-check
142          * that nobody else stole it from us.
143          */
144         spin_lock_bh(&table->lock);
145         hlist_for_each_entry_rcu_bh(existing_entry,
146                                     index_bucket(table, entry->index),
147                                     index_hash) {
148                 if (existing_entry->index == entry->index) {
149                         spin_unlock_bh(&table->lock);
150                         /* If it was stolen, we start over. */
151                         goto search_unused_slot;
152                 }
153         }
154         /* Otherwise, we know we have it exclusively (since we're locked),
155          * so we insert.
156          */
157         hlist_add_head_rcu(&entry->index_hash,
158                            index_bucket(table, entry->index));
159         spin_unlock_bh(&table->lock);
160
161         rcu_read_unlock_bh();
162
163         return entry->index;
164 }
165
166 bool wg_index_hashtable_replace(struct index_hashtable *table,
167                                 struct index_hashtable_entry *old,
168                                 struct index_hashtable_entry *new)
169 {
170         if (unlikely(hlist_unhashed(&old->index_hash)))
171                 return false;
172         spin_lock_bh(&table->lock);
173         new->index = old->index;
174         hlist_replace_rcu(&old->index_hash, &new->index_hash);
175
176         /* Calling init here NULLs out index_hash, and in fact after this
177          * function returns, it's theoretically possible for this to get
178          * reinserted elsewhere. That means the RCU lookup below might either
179          * terminate early or jump between buckets, in which case the packet
180          * simply gets dropped, which isn't terrible.
181          */
182         INIT_HLIST_NODE(&old->index_hash);
183         spin_unlock_bh(&table->lock);
184         return true;
185 }
186
187 void wg_index_hashtable_remove(struct index_hashtable *table,
188                                struct index_hashtable_entry *entry)
189 {
190         spin_lock_bh(&table->lock);
191         hlist_del_init_rcu(&entry->index_hash);
192         spin_unlock_bh(&table->lock);
193 }
194
195 /* Returns a strong reference to a entry->peer */
196 struct index_hashtable_entry *
197 wg_index_hashtable_lookup(struct index_hashtable *table,
198                           const enum index_hashtable_type type_mask,
199                           const __le32 index, struct wg_peer **peer)
200 {
201         struct index_hashtable_entry *iter_entry, *entry = NULL;
202
203         rcu_read_lock_bh();
204         hlist_for_each_entry_rcu_bh(iter_entry, index_bucket(table, index),
205                                     index_hash) {
206                 if (iter_entry->index == index) {
207                         if (likely(iter_entry->type & type_mask))
208                                 entry = iter_entry;
209                         break;
210                 }
211         }
212         if (likely(entry)) {
213                 entry->peer = wg_peer_get_maybe_zero(entry->peer);
214                 if (likely(entry->peer))
215                         *peer = entry->peer;
216                 else
217                         entry = NULL;
218         }
219         rcu_read_unlock_bh();
220         return entry;
221 }