Merge branch 'i2c/for-current' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa...
[linux-2.6-microblaze.git] / net / netfilter / nf_conncount.c
1 /*
2  * count the number of connections matching an arbitrary key.
3  *
4  * (C) 2017 Red Hat GmbH
5  * Author: Florian Westphal <fw@strlen.de>
6  *
7  * split from xt_connlimit.c:
8  *   (c) 2000 Gerd Knorr <kraxel@bytesex.org>
9  *   Nov 2002: Martin Bene <martin.bene@icomedias.com>:
10  *              only ignore TIME_WAIT or gone connections
11  *   (C) CC Computer Consultants GmbH, 2007
12  */
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/in.h>
15 #include <linux/in6.h>
16 #include <linux/ip.h>
17 #include <linux/ipv6.h>
18 #include <linux/jhash.h>
19 #include <linux/slab.h>
20 #include <linux/list.h>
21 #include <linux/rbtree.h>
22 #include <linux/module.h>
23 #include <linux/random.h>
24 #include <linux/skbuff.h>
25 #include <linux/spinlock.h>
26 #include <linux/netfilter/nf_conntrack_tcp.h>
27 #include <linux/netfilter/x_tables.h>
28 #include <net/netfilter/nf_conntrack.h>
29 #include <net/netfilter/nf_conntrack_count.h>
30 #include <net/netfilter/nf_conntrack_core.h>
31 #include <net/netfilter/nf_conntrack_tuple.h>
32 #include <net/netfilter/nf_conntrack_zones.h>
33
34 #define CONNCOUNT_SLOTS         256U
35
36 #ifdef CONFIG_LOCKDEP
37 #define CONNCOUNT_LOCK_SLOTS    8U
38 #else
39 #define CONNCOUNT_LOCK_SLOTS    256U
40 #endif
41
42 #define CONNCOUNT_GC_MAX_NODES  8
43 #define MAX_KEYLEN              5
44
45 /* we will save the tuples of all connections we care about */
46 struct nf_conncount_tuple {
47         struct hlist_node               node;
48         struct nf_conntrack_tuple       tuple;
49         struct nf_conntrack_zone        zone;
50         int                             cpu;
51         u32                             jiffies32;
52 };
53
54 struct nf_conncount_rb {
55         struct rb_node node;
56         struct hlist_head hhead; /* connections/hosts in same subnet */
57         u32 key[MAX_KEYLEN];
58 };
59
60 static spinlock_t nf_conncount_locks[CONNCOUNT_LOCK_SLOTS] __cacheline_aligned_in_smp;
61
62 struct nf_conncount_data {
63         unsigned int keylen;
64         struct rb_root root[CONNCOUNT_SLOTS];
65 };
66
67 static u_int32_t conncount_rnd __read_mostly;
68 static struct kmem_cache *conncount_rb_cachep __read_mostly;
69 static struct kmem_cache *conncount_conn_cachep __read_mostly;
70
71 static inline bool already_closed(const struct nf_conn *conn)
72 {
73         if (nf_ct_protonum(conn) == IPPROTO_TCP)
74                 return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT ||
75                        conn->proto.tcp.state == TCP_CONNTRACK_CLOSE;
76         else
77                 return false;
78 }
79
80 static int key_diff(const u32 *a, const u32 *b, unsigned int klen)
81 {
82         return memcmp(a, b, klen * sizeof(u32));
83 }
84
85 bool nf_conncount_add(struct hlist_head *head,
86                       const struct nf_conntrack_tuple *tuple,
87                       const struct nf_conntrack_zone *zone)
88 {
89         struct nf_conncount_tuple *conn;
90
91         conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
92         if (conn == NULL)
93                 return false;
94         conn->tuple = *tuple;
95         conn->zone = *zone;
96         conn->cpu = raw_smp_processor_id();
97         conn->jiffies32 = (u32)jiffies;
98         hlist_add_head(&conn->node, head);
99         return true;
100 }
101 EXPORT_SYMBOL_GPL(nf_conncount_add);
102
103 static const struct nf_conntrack_tuple_hash *
104 find_or_evict(struct net *net, struct nf_conncount_tuple *conn)
105 {
106         const struct nf_conntrack_tuple_hash *found;
107         unsigned long a, b;
108         int cpu = raw_smp_processor_id();
109         __s32 age;
110
111         found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
112         if (found)
113                 return found;
114         b = conn->jiffies32;
115         a = (u32)jiffies;
116
117         /* conn might have been added just before by another cpu and
118          * might still be unconfirmed.  In this case, nf_conntrack_find()
119          * returns no result.  Thus only evict if this cpu added the
120          * stale entry or if the entry is older than two jiffies.
121          */
122         age = a - b;
123         if (conn->cpu == cpu || age >= 2) {
124                 hlist_del(&conn->node);
125                 kmem_cache_free(conncount_conn_cachep, conn);
126                 return ERR_PTR(-ENOENT);
127         }
128
129         return ERR_PTR(-EAGAIN);
130 }
131
132 unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
133                                  const struct nf_conntrack_tuple *tuple,
134                                  const struct nf_conntrack_zone *zone,
135                                  bool *addit)
136 {
137         const struct nf_conntrack_tuple_hash *found;
138         struct nf_conncount_tuple *conn;
139         struct nf_conn *found_ct;
140         struct hlist_node *n;
141         unsigned int length = 0;
142
143         *addit = tuple ? true : false;
144
145         /* check the saved connections */
146         hlist_for_each_entry_safe(conn, n, head, node) {
147                 found = find_or_evict(net, conn);
148                 if (IS_ERR(found)) {
149                         /* Not found, but might be about to be confirmed */
150                         if (PTR_ERR(found) == -EAGAIN) {
151                                 length++;
152                                 if (!tuple)
153                                         continue;
154
155                                 if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
156                                     nf_ct_zone_id(&conn->zone, conn->zone.dir) ==
157                                     nf_ct_zone_id(zone, zone->dir))
158                                         *addit = false;
159                         }
160                         continue;
161                 }
162
163                 found_ct = nf_ct_tuplehash_to_ctrack(found);
164
165                 if (tuple && nf_ct_tuple_equal(&conn->tuple, tuple) &&
166                     nf_ct_zone_equal(found_ct, zone, zone->dir)) {
167                         /*
168                          * Just to be sure we have it only once in the list.
169                          * We should not see tuples twice unless someone hooks
170                          * this into a table without "-p tcp --syn".
171                          */
172                         *addit = false;
173                 } else if (already_closed(found_ct)) {
174                         /*
175                          * we do not care about connections which are
176                          * closed already -> ditch it
177                          */
178                         nf_ct_put(found_ct);
179                         hlist_del(&conn->node);
180                         kmem_cache_free(conncount_conn_cachep, conn);
181                         continue;
182                 }
183
184                 nf_ct_put(found_ct);
185                 length++;
186         }
187
188         return length;
189 }
190 EXPORT_SYMBOL_GPL(nf_conncount_lookup);
191
192 static void tree_nodes_free(struct rb_root *root,
193                             struct nf_conncount_rb *gc_nodes[],
194                             unsigned int gc_count)
195 {
196         struct nf_conncount_rb *rbconn;
197
198         while (gc_count) {
199                 rbconn = gc_nodes[--gc_count];
200                 rb_erase(&rbconn->node, root);
201                 kmem_cache_free(conncount_rb_cachep, rbconn);
202         }
203 }
204
205 static unsigned int
206 count_tree(struct net *net, struct rb_root *root,
207            const u32 *key, u8 keylen,
208            const struct nf_conntrack_tuple *tuple,
209            const struct nf_conntrack_zone *zone)
210 {
211         struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES];
212         struct rb_node **rbnode, *parent;
213         struct nf_conncount_rb *rbconn;
214         struct nf_conncount_tuple *conn;
215         unsigned int gc_count;
216         bool no_gc = false;
217
218  restart:
219         gc_count = 0;
220         parent = NULL;
221         rbnode = &(root->rb_node);
222         while (*rbnode) {
223                 int diff;
224                 bool addit;
225
226                 rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node);
227
228                 parent = *rbnode;
229                 diff = key_diff(key, rbconn->key, keylen);
230                 if (diff < 0) {
231                         rbnode = &((*rbnode)->rb_left);
232                 } else if (diff > 0) {
233                         rbnode = &((*rbnode)->rb_right);
234                 } else {
235                         /* same source network -> be counted! */
236                         unsigned int count;
237
238                         count = nf_conncount_lookup(net, &rbconn->hhead, tuple,
239                                                     zone, &addit);
240
241                         tree_nodes_free(root, gc_nodes, gc_count);
242                         if (!addit)
243                                 return count;
244
245                         if (!nf_conncount_add(&rbconn->hhead, tuple, zone))
246                                 return 0; /* hotdrop */
247
248                         return count + 1;
249                 }
250
251                 if (no_gc || gc_count >= ARRAY_SIZE(gc_nodes))
252                         continue;
253
254                 /* only used for GC on hhead, retval and 'addit' ignored */
255                 nf_conncount_lookup(net, &rbconn->hhead, tuple, zone, &addit);
256                 if (hlist_empty(&rbconn->hhead))
257                         gc_nodes[gc_count++] = rbconn;
258         }
259
260         if (gc_count) {
261                 no_gc = true;
262                 tree_nodes_free(root, gc_nodes, gc_count);
263                 /* tree_node_free before new allocation permits
264                  * allocator to re-use newly free'd object.
265                  *
266                  * This is a rare event; in most cases we will find
267                  * existing node to re-use. (or gc_count is 0).
268                  */
269                 goto restart;
270         }
271
272         if (!tuple)
273                 return 0;
274
275         /* no match, need to insert new node */
276         rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC);
277         if (rbconn == NULL)
278                 return 0;
279
280         conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
281         if (conn == NULL) {
282                 kmem_cache_free(conncount_rb_cachep, rbconn);
283                 return 0;
284         }
285
286         conn->tuple = *tuple;
287         conn->zone = *zone;
288         memcpy(rbconn->key, key, sizeof(u32) * keylen);
289
290         INIT_HLIST_HEAD(&rbconn->hhead);
291         hlist_add_head(&conn->node, &rbconn->hhead);
292
293         rb_link_node(&rbconn->node, parent, rbnode);
294         rb_insert_color(&rbconn->node, root);
295         return 1;
296 }
297
298 /* Count and return number of conntrack entries in 'net' with particular 'key'.
299  * If 'tuple' is not null, insert it into the accounting data structure.
300  */
301 unsigned int nf_conncount_count(struct net *net,
302                                 struct nf_conncount_data *data,
303                                 const u32 *key,
304                                 const struct nf_conntrack_tuple *tuple,
305                                 const struct nf_conntrack_zone *zone)
306 {
307         struct rb_root *root;
308         int count;
309         u32 hash;
310
311         hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS;
312         root = &data->root[hash];
313
314         spin_lock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
315
316         count = count_tree(net, root, key, data->keylen, tuple, zone);
317
318         spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
319
320         return count;
321 }
322 EXPORT_SYMBOL_GPL(nf_conncount_count);
323
324 struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
325                                             unsigned int keylen)
326 {
327         struct nf_conncount_data *data;
328         int ret, i;
329
330         if (keylen % sizeof(u32) ||
331             keylen / sizeof(u32) > MAX_KEYLEN ||
332             keylen == 0)
333                 return ERR_PTR(-EINVAL);
334
335         net_get_random_once(&conncount_rnd, sizeof(conncount_rnd));
336
337         data = kmalloc(sizeof(*data), GFP_KERNEL);
338         if (!data)
339                 return ERR_PTR(-ENOMEM);
340
341         ret = nf_ct_netns_get(net, family);
342         if (ret < 0) {
343                 kfree(data);
344                 return ERR_PTR(ret);
345         }
346
347         for (i = 0; i < ARRAY_SIZE(data->root); ++i)
348                 data->root[i] = RB_ROOT;
349
350         data->keylen = keylen / sizeof(u32);
351
352         return data;
353 }
354 EXPORT_SYMBOL_GPL(nf_conncount_init);
355
356 void nf_conncount_cache_free(struct hlist_head *hhead)
357 {
358         struct nf_conncount_tuple *conn;
359         struct hlist_node *n;
360
361         hlist_for_each_entry_safe(conn, n, hhead, node)
362                 kmem_cache_free(conncount_conn_cachep, conn);
363 }
364 EXPORT_SYMBOL_GPL(nf_conncount_cache_free);
365
366 static void destroy_tree(struct rb_root *r)
367 {
368         struct nf_conncount_rb *rbconn;
369         struct rb_node *node;
370
371         while ((node = rb_first(r)) != NULL) {
372                 rbconn = rb_entry(node, struct nf_conncount_rb, node);
373
374                 rb_erase(node, r);
375
376                 nf_conncount_cache_free(&rbconn->hhead);
377
378                 kmem_cache_free(conncount_rb_cachep, rbconn);
379         }
380 }
381
382 void nf_conncount_destroy(struct net *net, unsigned int family,
383                           struct nf_conncount_data *data)
384 {
385         unsigned int i;
386
387         nf_ct_netns_put(net, family);
388
389         for (i = 0; i < ARRAY_SIZE(data->root); ++i)
390                 destroy_tree(&data->root[i]);
391
392         kfree(data);
393 }
394 EXPORT_SYMBOL_GPL(nf_conncount_destroy);
395
396 static int __init nf_conncount_modinit(void)
397 {
398         int i;
399
400         BUILD_BUG_ON(CONNCOUNT_LOCK_SLOTS > CONNCOUNT_SLOTS);
401         BUILD_BUG_ON((CONNCOUNT_SLOTS % CONNCOUNT_LOCK_SLOTS) != 0);
402
403         for (i = 0; i < CONNCOUNT_LOCK_SLOTS; ++i)
404                 spin_lock_init(&nf_conncount_locks[i]);
405
406         conncount_conn_cachep = kmem_cache_create("nf_conncount_tuple",
407                                            sizeof(struct nf_conncount_tuple),
408                                            0, 0, NULL);
409         if (!conncount_conn_cachep)
410                 return -ENOMEM;
411
412         conncount_rb_cachep = kmem_cache_create("nf_conncount_rb",
413                                            sizeof(struct nf_conncount_rb),
414                                            0, 0, NULL);
415         if (!conncount_rb_cachep) {
416                 kmem_cache_destroy(conncount_conn_cachep);
417                 return -ENOMEM;
418         }
419
420         return 0;
421 }
422
423 static void __exit nf_conncount_modexit(void)
424 {
425         kmem_cache_destroy(conncount_conn_cachep);
426         kmem_cache_destroy(conncount_rb_cachep);
427 }
428
429 module_init(nf_conncount_modinit);
430 module_exit(nf_conncount_modexit);
431 MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
432 MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
433 MODULE_DESCRIPTION("netfilter: count number of connections matching a key");
434 MODULE_LICENSE("GPL");