Merge tag 'gpio-v4.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[linux-2.6-microblaze.git] / net / netfilter / nf_conntrack_core.c
1 /* Connection state tracking for netfilter.  This is separated from,
2    but required by, the NAT layer; it can also be used by an iptables
3    extension. */
4
5 /* (C) 1999-2001 Paul `Rusty' Russell
6  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
8  * (C) 2005-2012 Patrick McHardy <kaber@trash.net>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/types.h>
18 #include <linux/netfilter.h>
19 #include <linux/module.h>
20 #include <linux/sched.h>
21 #include <linux/skbuff.h>
22 #include <linux/proc_fs.h>
23 #include <linux/vmalloc.h>
24 #include <linux/stddef.h>
25 #include <linux/slab.h>
26 #include <linux/random.h>
27 #include <linux/jhash.h>
28 #include <linux/err.h>
29 #include <linux/percpu.h>
30 #include <linux/moduleparam.h>
31 #include <linux/notifier.h>
32 #include <linux/kernel.h>
33 #include <linux/netdevice.h>
34 #include <linux/socket.h>
35 #include <linux/mm.h>
36 #include <linux/nsproxy.h>
37 #include <linux/rculist_nulls.h>
38
39 #include <net/netfilter/nf_conntrack.h>
40 #include <net/netfilter/nf_conntrack_l4proto.h>
41 #include <net/netfilter/nf_conntrack_expect.h>
42 #include <net/netfilter/nf_conntrack_helper.h>
43 #include <net/netfilter/nf_conntrack_seqadj.h>
44 #include <net/netfilter/nf_conntrack_core.h>
45 #include <net/netfilter/nf_conntrack_extend.h>
46 #include <net/netfilter/nf_conntrack_acct.h>
47 #include <net/netfilter/nf_conntrack_ecache.h>
48 #include <net/netfilter/nf_conntrack_zones.h>
49 #include <net/netfilter/nf_conntrack_timestamp.h>
50 #include <net/netfilter/nf_conntrack_timeout.h>
51 #include <net/netfilter/nf_conntrack_labels.h>
52 #include <net/netfilter/nf_conntrack_synproxy.h>
53 #include <net/netfilter/nf_nat.h>
54 #include <net/netfilter/nf_nat_core.h>
55 #include <net/netfilter/nf_nat_helper.h>
56 #include <net/netns/hash.h>
57 #include <net/ip.h>
58
59 #include "nf_internals.h"
60
61 __cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
62 EXPORT_SYMBOL_GPL(nf_conntrack_locks);
63
64 __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
65 EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
66
67 struct hlist_nulls_head *nf_conntrack_hash __read_mostly;
68 EXPORT_SYMBOL_GPL(nf_conntrack_hash);
69
70 struct conntrack_gc_work {
71         struct delayed_work     dwork;
72         u32                     last_bucket;
73         bool                    exiting;
74         bool                    early_drop;
75         long                    next_gc_run;
76 };
77
78 static __read_mostly struct kmem_cache *nf_conntrack_cachep;
79 static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
80 static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
81 static __read_mostly bool nf_conntrack_locks_all;
82
83 /* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
84 #define GC_MAX_BUCKETS_DIV      128u
85 /* upper bound of full table scan */
86 #define GC_MAX_SCAN_JIFFIES     (16u * HZ)
87 /* desired ratio of entries found to be expired */
88 #define GC_EVICT_RATIO  50u
89
90 static struct conntrack_gc_work conntrack_gc_work;
91
92 void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
93 {
94         /* 1) Acquire the lock */
95         spin_lock(lock);
96
97         /* 2) read nf_conntrack_locks_all, with ACQUIRE semantics
98          * It pairs with the smp_store_release() in nf_conntrack_all_unlock()
99          */
100         if (likely(smp_load_acquire(&nf_conntrack_locks_all) == false))
101                 return;
102
103         /* fast path failed, unlock */
104         spin_unlock(lock);
105
106         /* Slow path 1) get global lock */
107         spin_lock(&nf_conntrack_locks_all_lock);
108
109         /* Slow path 2) get the lock we want */
110         spin_lock(lock);
111
112         /* Slow path 3) release the global lock */
113         spin_unlock(&nf_conntrack_locks_all_lock);
114 }
115 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
116
117 static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
118 {
119         h1 %= CONNTRACK_LOCKS;
120         h2 %= CONNTRACK_LOCKS;
121         spin_unlock(&nf_conntrack_locks[h1]);
122         if (h1 != h2)
123                 spin_unlock(&nf_conntrack_locks[h2]);
124 }
125
126 /* return true if we need to recompute hashes (in case hash table was resized) */
127 static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
128                                      unsigned int h2, unsigned int sequence)
129 {
130         h1 %= CONNTRACK_LOCKS;
131         h2 %= CONNTRACK_LOCKS;
132         if (h1 <= h2) {
133                 nf_conntrack_lock(&nf_conntrack_locks[h1]);
134                 if (h1 != h2)
135                         spin_lock_nested(&nf_conntrack_locks[h2],
136                                          SINGLE_DEPTH_NESTING);
137         } else {
138                 nf_conntrack_lock(&nf_conntrack_locks[h2]);
139                 spin_lock_nested(&nf_conntrack_locks[h1],
140                                  SINGLE_DEPTH_NESTING);
141         }
142         if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
143                 nf_conntrack_double_unlock(h1, h2);
144                 return true;
145         }
146         return false;
147 }
148
149 static void nf_conntrack_all_lock(void)
150 {
151         int i;
152
153         spin_lock(&nf_conntrack_locks_all_lock);
154
155         nf_conntrack_locks_all = true;
156
157         for (i = 0; i < CONNTRACK_LOCKS; i++) {
158                 spin_lock(&nf_conntrack_locks[i]);
159
160                 /* This spin_unlock provides the "release" to ensure that
161                  * nf_conntrack_locks_all==true is visible to everyone that
162                  * acquired spin_lock(&nf_conntrack_locks[]).
163                  */
164                 spin_unlock(&nf_conntrack_locks[i]);
165         }
166 }
167
168 static void nf_conntrack_all_unlock(void)
169 {
170         /* All prior stores must be complete before we clear
171          * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
172          * might observe the false value but not the entire
173          * critical section.
174          * It pairs with the smp_load_acquire() in nf_conntrack_lock()
175          */
176         smp_store_release(&nf_conntrack_locks_all, false);
177         spin_unlock(&nf_conntrack_locks_all_lock);
178 }
179
180 unsigned int nf_conntrack_htable_size __read_mostly;
181 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
182
183 unsigned int nf_conntrack_max __read_mostly;
184 EXPORT_SYMBOL_GPL(nf_conntrack_max);
185 seqcount_t nf_conntrack_generation __read_mostly;
186 static unsigned int nf_conntrack_hash_rnd __read_mostly;
187
188 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
189                               const struct net *net)
190 {
191         unsigned int n;
192         u32 seed;
193
194         get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd));
195
196         /* The direction must be ignored, so we hash everything up to the
197          * destination ports (which is a multiple of 4) and treat the last
198          * three bytes manually.
199          */
200         seed = nf_conntrack_hash_rnd ^ net_hash_mix(net);
201         n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
202         return jhash2((u32 *)tuple, n, seed ^
203                       (((__force __u16)tuple->dst.u.all << 16) |
204                       tuple->dst.protonum));
205 }
206
207 static u32 scale_hash(u32 hash)
208 {
209         return reciprocal_scale(hash, nf_conntrack_htable_size);
210 }
211
212 static u32 __hash_conntrack(const struct net *net,
213                             const struct nf_conntrack_tuple *tuple,
214                             unsigned int size)
215 {
216         return reciprocal_scale(hash_conntrack_raw(tuple, net), size);
217 }
218
219 static u32 hash_conntrack(const struct net *net,
220                           const struct nf_conntrack_tuple *tuple)
221 {
222         return scale_hash(hash_conntrack_raw(tuple, net));
223 }
224
225 static bool
226 nf_ct_get_tuple(const struct sk_buff *skb,
227                 unsigned int nhoff,
228                 unsigned int dataoff,
229                 u_int16_t l3num,
230                 u_int8_t protonum,
231                 struct net *net,
232                 struct nf_conntrack_tuple *tuple,
233                 const struct nf_conntrack_l4proto *l4proto)
234 {
235         unsigned int size;
236         const __be32 *ap;
237         __be32 _addrs[8];
238         struct {
239                 __be16 sport;
240                 __be16 dport;
241         } _inet_hdr, *inet_hdr;
242
243         memset(tuple, 0, sizeof(*tuple));
244
245         tuple->src.l3num = l3num;
246         switch (l3num) {
247         case NFPROTO_IPV4:
248                 nhoff += offsetof(struct iphdr, saddr);
249                 size = 2 * sizeof(__be32);
250                 break;
251         case NFPROTO_IPV6:
252                 nhoff += offsetof(struct ipv6hdr, saddr);
253                 size = sizeof(_addrs);
254                 break;
255         default:
256                 return true;
257         }
258
259         ap = skb_header_pointer(skb, nhoff, size, _addrs);
260         if (!ap)
261                 return false;
262
263         switch (l3num) {
264         case NFPROTO_IPV4:
265                 tuple->src.u3.ip = ap[0];
266                 tuple->dst.u3.ip = ap[1];
267                 break;
268         case NFPROTO_IPV6:
269                 memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6));
270                 memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6));
271                 break;
272         }
273
274         tuple->dst.protonum = protonum;
275         tuple->dst.dir = IP_CT_DIR_ORIGINAL;
276
277         if (unlikely(l4proto->pkt_to_tuple))
278                 return l4proto->pkt_to_tuple(skb, dataoff, net, tuple);
279
280         /* Actually only need first 4 bytes to get ports. */
281         inet_hdr = skb_header_pointer(skb, dataoff, sizeof(_inet_hdr), &_inet_hdr);
282         if (!inet_hdr)
283                 return false;
284
285         tuple->src.u.udp.port = inet_hdr->sport;
286         tuple->dst.u.udp.port = inet_hdr->dport;
287         return true;
288 }
289
290 static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
291                             u_int8_t *protonum)
292 {
293         int dataoff = -1;
294         const struct iphdr *iph;
295         struct iphdr _iph;
296
297         iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
298         if (!iph)
299                 return -1;
300
301         /* Conntrack defragments packets, we might still see fragments
302          * inside ICMP packets though.
303          */
304         if (iph->frag_off & htons(IP_OFFSET))
305                 return -1;
306
307         dataoff = nhoff + (iph->ihl << 2);
308         *protonum = iph->protocol;
309
310         /* Check bogus IP headers */
311         if (dataoff > skb->len) {
312                 pr_debug("bogus IPv4 packet: nhoff %u, ihl %u, skblen %u\n",
313                          nhoff, iph->ihl << 2, skb->len);
314                 return -1;
315         }
316         return dataoff;
317 }
318
319 #if IS_ENABLED(CONFIG_IPV6)
320 static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
321                             u8 *protonum)
322 {
323         int protoff = -1;
324         unsigned int extoff = nhoff + sizeof(struct ipv6hdr);
325         __be16 frag_off;
326         u8 nexthdr;
327
328         if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr),
329                           &nexthdr, sizeof(nexthdr)) != 0) {
330                 pr_debug("can't get nexthdr\n");
331                 return -1;
332         }
333         protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off);
334         /*
335          * (protoff == skb->len) means the packet has not data, just
336          * IPv6 and possibly extensions headers, but it is tracked anyway
337          */
338         if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
339                 pr_debug("can't find proto in pkt\n");
340                 return -1;
341         }
342
343         *protonum = nexthdr;
344         return protoff;
345 }
346 #endif
347
348 static int get_l4proto(const struct sk_buff *skb,
349                        unsigned int nhoff, u8 pf, u8 *l4num)
350 {
351         switch (pf) {
352         case NFPROTO_IPV4:
353                 return ipv4_get_l4proto(skb, nhoff, l4num);
354 #if IS_ENABLED(CONFIG_IPV6)
355         case NFPROTO_IPV6:
356                 return ipv6_get_l4proto(skb, nhoff, l4num);
357 #endif
358         default:
359                 *l4num = 0;
360                 break;
361         }
362         return -1;
363 }
364
365 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
366                        u_int16_t l3num,
367                        struct net *net, struct nf_conntrack_tuple *tuple)
368 {
369         const struct nf_conntrack_l4proto *l4proto;
370         u8 protonum;
371         int protoff;
372         int ret;
373
374         rcu_read_lock();
375
376         protoff = get_l4proto(skb, nhoff, l3num, &protonum);
377         if (protoff <= 0) {
378                 rcu_read_unlock();
379                 return false;
380         }
381
382         l4proto = __nf_ct_l4proto_find(l3num, protonum);
383
384         ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple,
385                               l4proto);
386
387         rcu_read_unlock();
388         return ret;
389 }
390 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
391
392 bool
393 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
394                    const struct nf_conntrack_tuple *orig,
395                    const struct nf_conntrack_l4proto *l4proto)
396 {
397         memset(inverse, 0, sizeof(*inverse));
398
399         inverse->src.l3num = orig->src.l3num;
400
401         switch (orig->src.l3num) {
402         case NFPROTO_IPV4:
403                 inverse->src.u3.ip = orig->dst.u3.ip;
404                 inverse->dst.u3.ip = orig->src.u3.ip;
405                 break;
406         case NFPROTO_IPV6:
407                 inverse->src.u3.in6 = orig->dst.u3.in6;
408                 inverse->dst.u3.in6 = orig->src.u3.in6;
409                 break;
410         default:
411                 break;
412         }
413
414         inverse->dst.dir = !orig->dst.dir;
415
416         inverse->dst.protonum = orig->dst.protonum;
417
418         if (unlikely(l4proto->invert_tuple))
419                 return l4proto->invert_tuple(inverse, orig);
420
421         inverse->src.u.all = orig->dst.u.all;
422         inverse->dst.u.all = orig->src.u.all;
423         return true;
424 }
425 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
426
427 static void
428 clean_from_lists(struct nf_conn *ct)
429 {
430         pr_debug("clean_from_lists(%p)\n", ct);
431         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
432         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
433
434         /* Destroy all pending expectations */
435         nf_ct_remove_expectations(ct);
436 }
437
438 /* must be called with local_bh_disable */
439 static void nf_ct_add_to_dying_list(struct nf_conn *ct)
440 {
441         struct ct_pcpu *pcpu;
442
443         /* add this conntrack to the (per cpu) dying list */
444         ct->cpu = smp_processor_id();
445         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
446
447         spin_lock(&pcpu->lock);
448         hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
449                              &pcpu->dying);
450         spin_unlock(&pcpu->lock);
451 }
452
453 /* must be called with local_bh_disable */
454 static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
455 {
456         struct ct_pcpu *pcpu;
457
458         /* add this conntrack to the (per cpu) unconfirmed list */
459         ct->cpu = smp_processor_id();
460         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
461
462         spin_lock(&pcpu->lock);
463         hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
464                              &pcpu->unconfirmed);
465         spin_unlock(&pcpu->lock);
466 }
467
468 /* must be called with local_bh_disable */
469 static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
470 {
471         struct ct_pcpu *pcpu;
472
473         /* We overload first tuple to link into unconfirmed or dying list.*/
474         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
475
476         spin_lock(&pcpu->lock);
477         BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
478         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
479         spin_unlock(&pcpu->lock);
480 }
481
482 #define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK)
483
484 /* Released via destroy_conntrack() */
485 struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
486                                  const struct nf_conntrack_zone *zone,
487                                  gfp_t flags)
488 {
489         struct nf_conn *tmpl, *p;
490
491         if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) {
492                 tmpl = kzalloc(sizeof(*tmpl) + NFCT_INFOMASK, flags);
493                 if (!tmpl)
494                         return NULL;
495
496                 p = tmpl;
497                 tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
498                 if (tmpl != p) {
499                         tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
500                         tmpl->proto.tmpl_padto = (char *)tmpl - (char *)p;
501                 }
502         } else {
503                 tmpl = kzalloc(sizeof(*tmpl), flags);
504                 if (!tmpl)
505                         return NULL;
506         }
507
508         tmpl->status = IPS_TEMPLATE;
509         write_pnet(&tmpl->ct_net, net);
510         nf_ct_zone_add(tmpl, zone);
511         atomic_set(&tmpl->ct_general.use, 0);
512
513         return tmpl;
514 }
515 EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
516
517 void nf_ct_tmpl_free(struct nf_conn *tmpl)
518 {
519         nf_ct_ext_destroy(tmpl);
520         nf_ct_ext_free(tmpl);
521
522         if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK)
523                 kfree((char *)tmpl - tmpl->proto.tmpl_padto);
524         else
525                 kfree(tmpl);
526 }
527 EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
528
529 static void
530 destroy_conntrack(struct nf_conntrack *nfct)
531 {
532         struct nf_conn *ct = (struct nf_conn *)nfct;
533         const struct nf_conntrack_l4proto *l4proto;
534
535         pr_debug("destroy_conntrack(%p)\n", ct);
536         WARN_ON(atomic_read(&nfct->use) != 0);
537
538         if (unlikely(nf_ct_is_template(ct))) {
539                 nf_ct_tmpl_free(ct);
540                 return;
541         }
542         l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
543         if (l4proto->destroy)
544                 l4proto->destroy(ct);
545
546         local_bh_disable();
547         /* Expectations will have been removed in clean_from_lists,
548          * except TFTP can create an expectation on the first packet,
549          * before connection is in the list, so we need to clean here,
550          * too.
551          */
552         nf_ct_remove_expectations(ct);
553
554         nf_ct_del_from_dying_or_unconfirmed_list(ct);
555
556         local_bh_enable();
557
558         if (ct->master)
559                 nf_ct_put(ct->master);
560
561         pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
562         nf_conntrack_free(ct);
563 }
564
565 static void nf_ct_delete_from_lists(struct nf_conn *ct)
566 {
567         struct net *net = nf_ct_net(ct);
568         unsigned int hash, reply_hash;
569         unsigned int sequence;
570
571         nf_ct_helper_destroy(ct);
572
573         local_bh_disable();
574         do {
575                 sequence = read_seqcount_begin(&nf_conntrack_generation);
576                 hash = hash_conntrack(net,
577                                       &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
578                 reply_hash = hash_conntrack(net,
579                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
580         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
581
582         clean_from_lists(ct);
583         nf_conntrack_double_unlock(hash, reply_hash);
584
585         nf_ct_add_to_dying_list(ct);
586
587         local_bh_enable();
588 }
589
590 bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
591 {
592         struct nf_conn_tstamp *tstamp;
593
594         if (test_and_set_bit(IPS_DYING_BIT, &ct->status))
595                 return false;
596
597         tstamp = nf_conn_tstamp_find(ct);
598         if (tstamp && tstamp->stop == 0)
599                 tstamp->stop = ktime_get_real_ns();
600
601         if (nf_conntrack_event_report(IPCT_DESTROY, ct,
602                                     portid, report) < 0) {
603                 /* destroy event was not delivered. nf_ct_put will
604                  * be done by event cache worker on redelivery.
605                  */
606                 nf_ct_delete_from_lists(ct);
607                 nf_conntrack_ecache_delayed_work(nf_ct_net(ct));
608                 return false;
609         }
610
611         nf_conntrack_ecache_work(nf_ct_net(ct));
612         nf_ct_delete_from_lists(ct);
613         nf_ct_put(ct);
614         return true;
615 }
616 EXPORT_SYMBOL_GPL(nf_ct_delete);
617
618 static inline bool
619 nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
620                 const struct nf_conntrack_tuple *tuple,
621                 const struct nf_conntrack_zone *zone,
622                 const struct net *net)
623 {
624         struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
625
626         /* A conntrack can be recreated with the equal tuple,
627          * so we need to check that the conntrack is confirmed
628          */
629         return nf_ct_tuple_equal(tuple, &h->tuple) &&
630                nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
631                nf_ct_is_confirmed(ct) &&
632                net_eq(net, nf_ct_net(ct));
633 }
634
635 static inline bool
636 nf_ct_match(const struct nf_conn *ct1, const struct nf_conn *ct2)
637 {
638         return nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
639                                  &ct2->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
640                nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_REPLY].tuple,
641                                  &ct2->tuplehash[IP_CT_DIR_REPLY].tuple) &&
642                nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_ORIGINAL) &&
643                nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_REPLY) &&
644                net_eq(nf_ct_net(ct1), nf_ct_net(ct2));
645 }
646
647 /* caller must hold rcu readlock and none of the nf_conntrack_locks */
648 static void nf_ct_gc_expired(struct nf_conn *ct)
649 {
650         if (!atomic_inc_not_zero(&ct->ct_general.use))
651                 return;
652
653         if (nf_ct_should_gc(ct))
654                 nf_ct_kill(ct);
655
656         nf_ct_put(ct);
657 }
658
659 /*
660  * Warning :
661  * - Caller must take a reference on returned object
662  *   and recheck nf_ct_tuple_equal(tuple, &h->tuple)
663  */
664 static struct nf_conntrack_tuple_hash *
665 ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
666                       const struct nf_conntrack_tuple *tuple, u32 hash)
667 {
668         struct nf_conntrack_tuple_hash *h;
669         struct hlist_nulls_head *ct_hash;
670         struct hlist_nulls_node *n;
671         unsigned int bucket, hsize;
672
673 begin:
674         nf_conntrack_get_ht(&ct_hash, &hsize);
675         bucket = reciprocal_scale(hash, hsize);
676
677         hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) {
678                 struct nf_conn *ct;
679
680                 ct = nf_ct_tuplehash_to_ctrack(h);
681                 if (nf_ct_is_expired(ct)) {
682                         nf_ct_gc_expired(ct);
683                         continue;
684                 }
685
686                 if (nf_ct_is_dying(ct))
687                         continue;
688
689                 if (nf_ct_key_equal(h, tuple, zone, net))
690                         return h;
691         }
692         /*
693          * if the nulls value we got at the end of this lookup is
694          * not the expected one, we must restart lookup.
695          * We probably met an item that was moved to another chain.
696          */
697         if (get_nulls_value(n) != bucket) {
698                 NF_CT_STAT_INC_ATOMIC(net, search_restart);
699                 goto begin;
700         }
701
702         return NULL;
703 }
704
705 /* Find a connection corresponding to a tuple. */
706 static struct nf_conntrack_tuple_hash *
707 __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
708                         const struct nf_conntrack_tuple *tuple, u32 hash)
709 {
710         struct nf_conntrack_tuple_hash *h;
711         struct nf_conn *ct;
712
713         rcu_read_lock();
714 begin:
715         h = ____nf_conntrack_find(net, zone, tuple, hash);
716         if (h) {
717                 ct = nf_ct_tuplehash_to_ctrack(h);
718                 if (unlikely(nf_ct_is_dying(ct) ||
719                              !atomic_inc_not_zero(&ct->ct_general.use)))
720                         h = NULL;
721                 else {
722                         if (unlikely(!nf_ct_key_equal(h, tuple, zone, net))) {
723                                 nf_ct_put(ct);
724                                 goto begin;
725                         }
726                 }
727         }
728         rcu_read_unlock();
729
730         return h;
731 }
732
733 struct nf_conntrack_tuple_hash *
734 nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
735                       const struct nf_conntrack_tuple *tuple)
736 {
737         return __nf_conntrack_find_get(net, zone, tuple,
738                                        hash_conntrack_raw(tuple, net));
739 }
740 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
741
742 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
743                                        unsigned int hash,
744                                        unsigned int reply_hash)
745 {
746         hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
747                            &nf_conntrack_hash[hash]);
748         hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
749                            &nf_conntrack_hash[reply_hash]);
750 }
751
752 int
753 nf_conntrack_hash_check_insert(struct nf_conn *ct)
754 {
755         const struct nf_conntrack_zone *zone;
756         struct net *net = nf_ct_net(ct);
757         unsigned int hash, reply_hash;
758         struct nf_conntrack_tuple_hash *h;
759         struct hlist_nulls_node *n;
760         unsigned int sequence;
761
762         zone = nf_ct_zone(ct);
763
764         local_bh_disable();
765         do {
766                 sequence = read_seqcount_begin(&nf_conntrack_generation);
767                 hash = hash_conntrack(net,
768                                       &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
769                 reply_hash = hash_conntrack(net,
770                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
771         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
772
773         /* See if there's one in the list already, including reverse */
774         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
775                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
776                                     zone, net))
777                         goto out;
778
779         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
780                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
781                                     zone, net))
782                         goto out;
783
784         smp_wmb();
785         /* The caller holds a reference to this object */
786         atomic_set(&ct->ct_general.use, 2);
787         __nf_conntrack_hash_insert(ct, hash, reply_hash);
788         nf_conntrack_double_unlock(hash, reply_hash);
789         NF_CT_STAT_INC(net, insert);
790         local_bh_enable();
791         return 0;
792
793 out:
794         nf_conntrack_double_unlock(hash, reply_hash);
795         NF_CT_STAT_INC(net, insert_failed);
796         local_bh_enable();
797         return -EEXIST;
798 }
799 EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
800
801 static inline void nf_ct_acct_update(struct nf_conn *ct,
802                                      enum ip_conntrack_info ctinfo,
803                                      unsigned int len)
804 {
805         struct nf_conn_acct *acct;
806
807         acct = nf_conn_acct_find(ct);
808         if (acct) {
809                 struct nf_conn_counter *counter = acct->counter;
810
811                 atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
812                 atomic64_add(len, &counter[CTINFO2DIR(ctinfo)].bytes);
813         }
814 }
815
816 static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
817                              const struct nf_conn *loser_ct)
818 {
819         struct nf_conn_acct *acct;
820
821         acct = nf_conn_acct_find(loser_ct);
822         if (acct) {
823                 struct nf_conn_counter *counter = acct->counter;
824                 unsigned int bytes;
825
826                 /* u32 should be fine since we must have seen one packet. */
827                 bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes);
828                 nf_ct_acct_update(ct, ctinfo, bytes);
829         }
830 }
831
832 /* Resolve race on insertion if this protocol allows this. */
833 static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
834                                enum ip_conntrack_info ctinfo,
835                                struct nf_conntrack_tuple_hash *h)
836 {
837         /* This is the conntrack entry already in hashes that won race. */
838         struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
839         const struct nf_conntrack_l4proto *l4proto;
840         enum ip_conntrack_info oldinfo;
841         struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo);
842
843         l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
844         if (l4proto->allow_clash &&
845             !nf_ct_is_dying(ct) &&
846             atomic_inc_not_zero(&ct->ct_general.use)) {
847                 if (((ct->status & IPS_NAT_DONE_MASK) == 0) ||
848                     nf_ct_match(ct, loser_ct)) {
849                         nf_ct_acct_merge(ct, ctinfo, loser_ct);
850                         nf_conntrack_put(&loser_ct->ct_general);
851                         nf_ct_set(skb, ct, oldinfo);
852                         return NF_ACCEPT;
853                 }
854                 nf_ct_put(ct);
855         }
856         NF_CT_STAT_INC(net, drop);
857         return NF_DROP;
858 }
859
860 /* Confirm a connection given skb; places it in hash table */
861 int
862 __nf_conntrack_confirm(struct sk_buff *skb)
863 {
864         const struct nf_conntrack_zone *zone;
865         unsigned int hash, reply_hash;
866         struct nf_conntrack_tuple_hash *h;
867         struct nf_conn *ct;
868         struct nf_conn_help *help;
869         struct nf_conn_tstamp *tstamp;
870         struct hlist_nulls_node *n;
871         enum ip_conntrack_info ctinfo;
872         struct net *net;
873         unsigned int sequence;
874         int ret = NF_DROP;
875
876         ct = nf_ct_get(skb, &ctinfo);
877         net = nf_ct_net(ct);
878
879         /* ipt_REJECT uses nf_conntrack_attach to attach related
880            ICMP/TCP RST packets in other direction.  Actual packet
881            which created connection will be IP_CT_NEW or for an
882            expected connection, IP_CT_RELATED. */
883         if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
884                 return NF_ACCEPT;
885
886         zone = nf_ct_zone(ct);
887         local_bh_disable();
888
889         do {
890                 sequence = read_seqcount_begin(&nf_conntrack_generation);
891                 /* reuse the hash saved before */
892                 hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
893                 hash = scale_hash(hash);
894                 reply_hash = hash_conntrack(net,
895                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
896
897         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
898
899         /* We're not in hash table, and we refuse to set up related
900          * connections for unconfirmed conns.  But packet copies and
901          * REJECT will give spurious warnings here.
902          */
903
904         /* No external references means no one else could have
905          * confirmed us.
906          */
907         WARN_ON(nf_ct_is_confirmed(ct));
908         pr_debug("Confirming conntrack %p\n", ct);
909         /* We have to check the DYING flag after unlink to prevent
910          * a race against nf_ct_get_next_corpse() possibly called from
911          * user context, else we insert an already 'dead' hash, blocking
912          * further use of that particular connection -JM.
913          */
914         nf_ct_del_from_dying_or_unconfirmed_list(ct);
915
916         if (unlikely(nf_ct_is_dying(ct))) {
917                 nf_ct_add_to_dying_list(ct);
918                 goto dying;
919         }
920
921         /* See if there's one in the list already, including reverse:
922            NAT could have grabbed it without realizing, since we're
923            not in the hash.  If there is, we lost race. */
924         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
925                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
926                                     zone, net))
927                         goto out;
928
929         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
930                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
931                                     zone, net))
932                         goto out;
933
934         /* Timer relative to confirmation time, not original
935            setting time, otherwise we'd get timer wrap in
936            weird delay cases. */
937         ct->timeout += nfct_time_stamp;
938         atomic_inc(&ct->ct_general.use);
939         ct->status |= IPS_CONFIRMED;
940
941         /* set conntrack timestamp, if enabled. */
942         tstamp = nf_conn_tstamp_find(ct);
943         if (tstamp) {
944                 if (skb->tstamp == 0)
945                         __net_timestamp(skb);
946
947                 tstamp->start = ktime_to_ns(skb->tstamp);
948         }
949         /* Since the lookup is lockless, hash insertion must be done after
950          * starting the timer and setting the CONFIRMED bit. The RCU barriers
951          * guarantee that no other CPU can find the conntrack before the above
952          * stores are visible.
953          */
954         __nf_conntrack_hash_insert(ct, hash, reply_hash);
955         nf_conntrack_double_unlock(hash, reply_hash);
956         local_bh_enable();
957
958         help = nfct_help(ct);
959         if (help && help->helper)
960                 nf_conntrack_event_cache(IPCT_HELPER, ct);
961
962         nf_conntrack_event_cache(master_ct(ct) ?
963                                  IPCT_RELATED : IPCT_NEW, ct);
964         return NF_ACCEPT;
965
966 out:
967         nf_ct_add_to_dying_list(ct);
968         ret = nf_ct_resolve_clash(net, skb, ctinfo, h);
969 dying:
970         nf_conntrack_double_unlock(hash, reply_hash);
971         NF_CT_STAT_INC(net, insert_failed);
972         local_bh_enable();
973         return ret;
974 }
975 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
976
977 /* Returns true if a connection correspondings to the tuple (required
978    for NAT). */
979 int
980 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
981                          const struct nf_conn *ignored_conntrack)
982 {
983         struct net *net = nf_ct_net(ignored_conntrack);
984         const struct nf_conntrack_zone *zone;
985         struct nf_conntrack_tuple_hash *h;
986         struct hlist_nulls_head *ct_hash;
987         unsigned int hash, hsize;
988         struct hlist_nulls_node *n;
989         struct nf_conn *ct;
990
991         zone = nf_ct_zone(ignored_conntrack);
992
993         rcu_read_lock();
994  begin:
995         nf_conntrack_get_ht(&ct_hash, &hsize);
996         hash = __hash_conntrack(net, tuple, hsize);
997
998         hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
999                 ct = nf_ct_tuplehash_to_ctrack(h);
1000
1001                 if (ct == ignored_conntrack)
1002                         continue;
1003
1004                 if (nf_ct_is_expired(ct)) {
1005                         nf_ct_gc_expired(ct);
1006                         continue;
1007                 }
1008
1009                 if (nf_ct_key_equal(h, tuple, zone, net)) {
1010                         NF_CT_STAT_INC_ATOMIC(net, found);
1011                         rcu_read_unlock();
1012                         return 1;
1013                 }
1014         }
1015
1016         if (get_nulls_value(n) != hash) {
1017                 NF_CT_STAT_INC_ATOMIC(net, search_restart);
1018                 goto begin;
1019         }
1020
1021         rcu_read_unlock();
1022
1023         return 0;
1024 }
1025 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
1026
1027 #define NF_CT_EVICTION_RANGE    8
1028
1029 /* There's a small race here where we may free a just-assured
1030    connection.  Too bad: we're in trouble anyway. */
1031 static unsigned int early_drop_list(struct net *net,
1032                                     struct hlist_nulls_head *head)
1033 {
1034         struct nf_conntrack_tuple_hash *h;
1035         struct hlist_nulls_node *n;
1036         unsigned int drops = 0;
1037         struct nf_conn *tmp;
1038
1039         hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) {
1040                 tmp = nf_ct_tuplehash_to_ctrack(h);
1041
1042                 if (test_bit(IPS_OFFLOAD_BIT, &tmp->status))
1043                         continue;
1044
1045                 if (nf_ct_is_expired(tmp)) {
1046                         nf_ct_gc_expired(tmp);
1047                         continue;
1048                 }
1049
1050                 if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
1051                     !net_eq(nf_ct_net(tmp), net) ||
1052                     nf_ct_is_dying(tmp))
1053                         continue;
1054
1055                 if (!atomic_inc_not_zero(&tmp->ct_general.use))
1056                         continue;
1057
1058                 /* kill only if still in same netns -- might have moved due to
1059                  * SLAB_TYPESAFE_BY_RCU rules.
1060                  *
1061                  * We steal the timer reference.  If that fails timer has
1062                  * already fired or someone else deleted it. Just drop ref
1063                  * and move to next entry.
1064                  */
1065                 if (net_eq(nf_ct_net(tmp), net) &&
1066                     nf_ct_is_confirmed(tmp) &&
1067                     nf_ct_delete(tmp, 0, 0))
1068                         drops++;
1069
1070                 nf_ct_put(tmp);
1071         }
1072
1073         return drops;
1074 }
1075
1076 static noinline int early_drop(struct net *net, unsigned int _hash)
1077 {
1078         unsigned int i;
1079
1080         for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
1081                 struct hlist_nulls_head *ct_hash;
1082                 unsigned int hash, hsize, drops;
1083
1084                 rcu_read_lock();
1085                 nf_conntrack_get_ht(&ct_hash, &hsize);
1086                 hash = reciprocal_scale(_hash++, hsize);
1087
1088                 drops = early_drop_list(net, &ct_hash[hash]);
1089                 rcu_read_unlock();
1090
1091                 if (drops) {
1092                         NF_CT_STAT_ADD_ATOMIC(net, early_drop, drops);
1093                         return true;
1094                 }
1095         }
1096
1097         return false;
1098 }
1099
1100 static bool gc_worker_skip_ct(const struct nf_conn *ct)
1101 {
1102         return !nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct);
1103 }
1104
1105 static bool gc_worker_can_early_drop(const struct nf_conn *ct)
1106 {
1107         const struct nf_conntrack_l4proto *l4proto;
1108
1109         if (!test_bit(IPS_ASSURED_BIT, &ct->status))
1110                 return true;
1111
1112         l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
1113         if (l4proto->can_early_drop && l4proto->can_early_drop(ct))
1114                 return true;
1115
1116         return false;
1117 }
1118
1119 #define DAY     (86400 * HZ)
1120
1121 /* Set an arbitrary timeout large enough not to ever expire, this save
1122  * us a check for the IPS_OFFLOAD_BIT from the packet path via
1123  * nf_ct_is_expired().
1124  */
1125 static void nf_ct_offload_timeout(struct nf_conn *ct)
1126 {
1127         if (nf_ct_expires(ct) < DAY / 2)
1128                 ct->timeout = nfct_time_stamp + DAY;
1129 }
1130
1131 static void gc_worker(struct work_struct *work)
1132 {
1133         unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
1134         unsigned int i, goal, buckets = 0, expired_count = 0;
1135         unsigned int nf_conntrack_max95 = 0;
1136         struct conntrack_gc_work *gc_work;
1137         unsigned int ratio, scanned = 0;
1138         unsigned long next_run;
1139
1140         gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
1141
1142         goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV;
1143         i = gc_work->last_bucket;
1144         if (gc_work->early_drop)
1145                 nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
1146
1147         do {
1148                 struct nf_conntrack_tuple_hash *h;
1149                 struct hlist_nulls_head *ct_hash;
1150                 struct hlist_nulls_node *n;
1151                 unsigned int hashsz;
1152                 struct nf_conn *tmp;
1153
1154                 i++;
1155                 rcu_read_lock();
1156
1157                 nf_conntrack_get_ht(&ct_hash, &hashsz);
1158                 if (i >= hashsz)
1159                         i = 0;
1160
1161                 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
1162                         struct net *net;
1163
1164                         tmp = nf_ct_tuplehash_to_ctrack(h);
1165
1166                         scanned++;
1167                         if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
1168                                 nf_ct_offload_timeout(tmp);
1169                                 continue;
1170                         }
1171
1172                         if (nf_ct_is_expired(tmp)) {
1173                                 nf_ct_gc_expired(tmp);
1174                                 expired_count++;
1175                                 continue;
1176                         }
1177
1178                         if (nf_conntrack_max95 == 0 || gc_worker_skip_ct(tmp))
1179                                 continue;
1180
1181                         net = nf_ct_net(tmp);
1182                         if (atomic_read(&net->ct.count) < nf_conntrack_max95)
1183                                 continue;
1184
1185                         /* need to take reference to avoid possible races */
1186                         if (!atomic_inc_not_zero(&tmp->ct_general.use))
1187                                 continue;
1188
1189                         if (gc_worker_skip_ct(tmp)) {
1190                                 nf_ct_put(tmp);
1191                                 continue;
1192                         }
1193
1194                         if (gc_worker_can_early_drop(tmp))
1195                                 nf_ct_kill(tmp);
1196
1197                         nf_ct_put(tmp);
1198                 }
1199
1200                 /* could check get_nulls_value() here and restart if ct
1201                  * was moved to another chain.  But given gc is best-effort
1202                  * we will just continue with next hash slot.
1203                  */
1204                 rcu_read_unlock();
1205                 cond_resched();
1206         } while (++buckets < goal);
1207
1208         if (gc_work->exiting)
1209                 return;
1210
1211         /*
1212          * Eviction will normally happen from the packet path, and not
1213          * from this gc worker.
1214          *
1215          * This worker is only here to reap expired entries when system went
1216          * idle after a busy period.
1217          *
1218          * The heuristics below are supposed to balance conflicting goals:
1219          *
1220          * 1. Minimize time until we notice a stale entry
1221          * 2. Maximize scan intervals to not waste cycles
1222          *
1223          * Normally, expire ratio will be close to 0.
1224          *
1225          * As soon as a sizeable fraction of the entries have expired
1226          * increase scan frequency.
1227          */
1228         ratio = scanned ? expired_count * 100 / scanned : 0;
1229         if (ratio > GC_EVICT_RATIO) {
1230                 gc_work->next_gc_run = min_interval;
1231         } else {
1232                 unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
1233
1234                 BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
1235
1236                 gc_work->next_gc_run += min_interval;
1237                 if (gc_work->next_gc_run > max)
1238                         gc_work->next_gc_run = max;
1239         }
1240
1241         next_run = gc_work->next_gc_run;
1242         gc_work->last_bucket = i;
1243         gc_work->early_drop = false;
1244         queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
1245 }
1246
1247 static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
1248 {
1249         INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker);
1250         gc_work->next_gc_run = HZ;
1251         gc_work->exiting = false;
1252 }
1253
1254 static struct nf_conn *
1255 __nf_conntrack_alloc(struct net *net,
1256                      const struct nf_conntrack_zone *zone,
1257                      const struct nf_conntrack_tuple *orig,
1258                      const struct nf_conntrack_tuple *repl,
1259                      gfp_t gfp, u32 hash)
1260 {
1261         struct nf_conn *ct;
1262
1263         /* We don't want any race condition at early drop stage */
1264         atomic_inc(&net->ct.count);
1265
1266         if (nf_conntrack_max &&
1267             unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
1268                 if (!early_drop(net, hash)) {
1269                         if (!conntrack_gc_work.early_drop)
1270                                 conntrack_gc_work.early_drop = true;
1271                         atomic_dec(&net->ct.count);
1272                         net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
1273                         return ERR_PTR(-ENOMEM);
1274                 }
1275         }
1276
1277         /*
1278          * Do not use kmem_cache_zalloc(), as this cache uses
1279          * SLAB_TYPESAFE_BY_RCU.
1280          */
1281         ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
1282         if (ct == NULL)
1283                 goto out;
1284
1285         spin_lock_init(&ct->lock);
1286         ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
1287         ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
1288         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
1289         /* save hash for reusing when confirming */
1290         *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
1291         ct->status = 0;
1292         write_pnet(&ct->ct_net, net);
1293         memset(&ct->__nfct_init_offset[0], 0,
1294                offsetof(struct nf_conn, proto) -
1295                offsetof(struct nf_conn, __nfct_init_offset[0]));
1296
1297         nf_ct_zone_add(ct, zone);
1298
1299         /* Because we use RCU lookups, we set ct_general.use to zero before
1300          * this is inserted in any list.
1301          */
1302         atomic_set(&ct->ct_general.use, 0);
1303         return ct;
1304 out:
1305         atomic_dec(&net->ct.count);
1306         return ERR_PTR(-ENOMEM);
1307 }
1308
1309 struct nf_conn *nf_conntrack_alloc(struct net *net,
1310                                    const struct nf_conntrack_zone *zone,
1311                                    const struct nf_conntrack_tuple *orig,
1312                                    const struct nf_conntrack_tuple *repl,
1313                                    gfp_t gfp)
1314 {
1315         return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
1316 }
1317 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
1318
1319 void nf_conntrack_free(struct nf_conn *ct)
1320 {
1321         struct net *net = nf_ct_net(ct);
1322
1323         /* A freed object has refcnt == 0, that's
1324          * the golden rule for SLAB_TYPESAFE_BY_RCU
1325          */
1326         WARN_ON(atomic_read(&ct->ct_general.use) != 0);
1327
1328         nf_ct_ext_destroy(ct);
1329         nf_ct_ext_free(ct);
1330         kmem_cache_free(nf_conntrack_cachep, ct);
1331         smp_mb__before_atomic();
1332         atomic_dec(&net->ct.count);
1333 }
1334 EXPORT_SYMBOL_GPL(nf_conntrack_free);
1335
1336
1337 /* Allocate a new conntrack: we return -ENOMEM if classification
1338    failed due to stress.  Otherwise it really is unclassifiable. */
1339 static noinline struct nf_conntrack_tuple_hash *
1340 init_conntrack(struct net *net, struct nf_conn *tmpl,
1341                const struct nf_conntrack_tuple *tuple,
1342                const struct nf_conntrack_l4proto *l4proto,
1343                struct sk_buff *skb,
1344                unsigned int dataoff, u32 hash)
1345 {
1346         struct nf_conn *ct;
1347         struct nf_conn_help *help;
1348         struct nf_conntrack_tuple repl_tuple;
1349         struct nf_conntrack_ecache *ecache;
1350         struct nf_conntrack_expect *exp = NULL;
1351         const struct nf_conntrack_zone *zone;
1352         struct nf_conn_timeout *timeout_ext;
1353         struct nf_conntrack_zone tmp;
1354
1355         if (!nf_ct_invert_tuple(&repl_tuple, tuple, l4proto)) {
1356                 pr_debug("Can't invert tuple.\n");
1357                 return NULL;
1358         }
1359
1360         zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1361         ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
1362                                   hash);
1363         if (IS_ERR(ct))
1364                 return (struct nf_conntrack_tuple_hash *)ct;
1365
1366         if (!nf_ct_add_synproxy(ct, tmpl)) {
1367                 nf_conntrack_free(ct);
1368                 return ERR_PTR(-ENOMEM);
1369         }
1370
1371         timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
1372
1373         if (!l4proto->new(ct, skb, dataoff)) {
1374                 nf_conntrack_free(ct);
1375                 pr_debug("can't track with proto module\n");
1376                 return NULL;
1377         }
1378
1379         if (timeout_ext)
1380                 nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
1381                                       GFP_ATOMIC);
1382
1383         nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1384         nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
1385         nf_ct_labels_ext_add(ct);
1386
1387         ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
1388         nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
1389                                  ecache ? ecache->expmask : 0,
1390                              GFP_ATOMIC);
1391
1392         local_bh_disable();
1393         if (net->ct.expect_count) {
1394                 spin_lock(&nf_conntrack_expect_lock);
1395                 exp = nf_ct_find_expectation(net, zone, tuple);
1396                 if (exp) {
1397                         pr_debug("expectation arrives ct=%p exp=%p\n",
1398                                  ct, exp);
1399                         /* Welcome, Mr. Bond.  We've been expecting you... */
1400                         __set_bit(IPS_EXPECTED_BIT, &ct->status);
1401                         /* exp->master safe, refcnt bumped in nf_ct_find_expectation */
1402                         ct->master = exp->master;
1403                         if (exp->helper) {
1404                                 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
1405                                 if (help)
1406                                         rcu_assign_pointer(help->helper, exp->helper);
1407                         }
1408
1409 #ifdef CONFIG_NF_CONNTRACK_MARK
1410                         ct->mark = exp->master->mark;
1411 #endif
1412 #ifdef CONFIG_NF_CONNTRACK_SECMARK
1413                         ct->secmark = exp->master->secmark;
1414 #endif
1415                         NF_CT_STAT_INC(net, expect_new);
1416                 }
1417                 spin_unlock(&nf_conntrack_expect_lock);
1418         }
1419         if (!exp)
1420                 __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
1421
1422         /* Now it is inserted into the unconfirmed list, bump refcount */
1423         nf_conntrack_get(&ct->ct_general);
1424         nf_ct_add_to_unconfirmed_list(ct);
1425
1426         local_bh_enable();
1427
1428         if (exp) {
1429                 if (exp->expectfn)
1430                         exp->expectfn(ct, exp);
1431                 nf_ct_expect_put(exp);
1432         }
1433
1434         return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
1435 }
1436
1437 /* On success, returns 0, sets skb->_nfct | ctinfo */
1438 static int
1439 resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
1440                   struct sk_buff *skb,
1441                   unsigned int dataoff,
1442                   u_int16_t l3num,
1443                   u_int8_t protonum,
1444                   const struct nf_conntrack_l4proto *l4proto)
1445 {
1446         const struct nf_conntrack_zone *zone;
1447         struct nf_conntrack_tuple tuple;
1448         struct nf_conntrack_tuple_hash *h;
1449         enum ip_conntrack_info ctinfo;
1450         struct nf_conntrack_zone tmp;
1451         struct nf_conn *ct;
1452         u32 hash;
1453
1454         if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
1455                              dataoff, l3num, protonum, net, &tuple, l4proto)) {
1456                 pr_debug("Can't get tuple\n");
1457                 return 0;
1458         }
1459
1460         /* look for tuple match */
1461         zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1462         hash = hash_conntrack_raw(&tuple, net);
1463         h = __nf_conntrack_find_get(net, zone, &tuple, hash);
1464         if (!h) {
1465                 h = init_conntrack(net, tmpl, &tuple, l4proto,
1466                                    skb, dataoff, hash);
1467                 if (!h)
1468                         return 0;
1469                 if (IS_ERR(h))
1470                         return PTR_ERR(h);
1471         }
1472         ct = nf_ct_tuplehash_to_ctrack(h);
1473
1474         /* It exists; we have (non-exclusive) reference. */
1475         if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
1476                 ctinfo = IP_CT_ESTABLISHED_REPLY;
1477         } else {
1478                 /* Once we've had two way comms, always ESTABLISHED. */
1479                 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1480                         pr_debug("normal packet for %p\n", ct);
1481                         ctinfo = IP_CT_ESTABLISHED;
1482                 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
1483                         pr_debug("related packet for %p\n", ct);
1484                         ctinfo = IP_CT_RELATED;
1485                 } else {
1486                         pr_debug("new packet for %p\n", ct);
1487                         ctinfo = IP_CT_NEW;
1488                 }
1489         }
1490         nf_ct_set(skb, ct, ctinfo);
1491         return 0;
1492 }
1493
1494 unsigned int
1495 nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
1496                 struct sk_buff *skb)
1497 {
1498         const struct nf_conntrack_l4proto *l4proto;
1499         struct nf_conn *ct, *tmpl;
1500         enum ip_conntrack_info ctinfo;
1501         u_int8_t protonum;
1502         int dataoff, ret;
1503
1504         tmpl = nf_ct_get(skb, &ctinfo);
1505         if (tmpl || ctinfo == IP_CT_UNTRACKED) {
1506                 /* Previously seen (loopback or untracked)?  Ignore. */
1507                 if ((tmpl && !nf_ct_is_template(tmpl)) ||
1508                      ctinfo == IP_CT_UNTRACKED) {
1509                         NF_CT_STAT_INC_ATOMIC(net, ignore);
1510                         return NF_ACCEPT;
1511                 }
1512                 skb->_nfct = 0;
1513         }
1514
1515         /* rcu_read_lock()ed by nf_hook_thresh */
1516         dataoff = get_l4proto(skb, skb_network_offset(skb), pf, &protonum);
1517         if (dataoff <= 0) {
1518                 pr_debug("not prepared to track yet or error occurred\n");
1519                 NF_CT_STAT_INC_ATOMIC(net, error);
1520                 NF_CT_STAT_INC_ATOMIC(net, invalid);
1521                 ret = NF_ACCEPT;
1522                 goto out;
1523         }
1524
1525         l4proto = __nf_ct_l4proto_find(pf, protonum);
1526
1527         /* It may be an special packet, error, unclean...
1528          * inverse of the return code tells to the netfilter
1529          * core what to do with the packet. */
1530         if (l4proto->error != NULL) {
1531                 ret = l4proto->error(net, tmpl, skb, dataoff, pf, hooknum);
1532                 if (ret <= 0) {
1533                         NF_CT_STAT_INC_ATOMIC(net, error);
1534                         NF_CT_STAT_INC_ATOMIC(net, invalid);
1535                         ret = -ret;
1536                         goto out;
1537                 }
1538                 /* ICMP[v6] protocol trackers may assign one conntrack. */
1539                 if (skb->_nfct)
1540                         goto out;
1541         }
1542 repeat:
1543         ret = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, l4proto);
1544         if (ret < 0) {
1545                 /* Too stressed to deal. */
1546                 NF_CT_STAT_INC_ATOMIC(net, drop);
1547                 ret = NF_DROP;
1548                 goto out;
1549         }
1550
1551         ct = nf_ct_get(skb, &ctinfo);
1552         if (!ct) {
1553                 /* Not valid part of a connection */
1554                 NF_CT_STAT_INC_ATOMIC(net, invalid);
1555                 ret = NF_ACCEPT;
1556                 goto out;
1557         }
1558
1559         ret = l4proto->packet(ct, skb, dataoff, ctinfo);
1560         if (ret <= 0) {
1561                 /* Invalid: inverse of the return code tells
1562                  * the netfilter core what to do */
1563                 pr_debug("nf_conntrack_in: Can't track with proto module\n");
1564                 nf_conntrack_put(&ct->ct_general);
1565                 skb->_nfct = 0;
1566                 NF_CT_STAT_INC_ATOMIC(net, invalid);
1567                 if (ret == -NF_DROP)
1568                         NF_CT_STAT_INC_ATOMIC(net, drop);
1569                 /* Special case: TCP tracker reports an attempt to reopen a
1570                  * closed/aborted connection. We have to go back and create a
1571                  * fresh conntrack.
1572                  */
1573                 if (ret == -NF_REPEAT)
1574                         goto repeat;
1575                 ret = -ret;
1576                 goto out;
1577         }
1578
1579         if (ctinfo == IP_CT_ESTABLISHED_REPLY &&
1580             !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
1581                 nf_conntrack_event_cache(IPCT_REPLY, ct);
1582 out:
1583         if (tmpl)
1584                 nf_ct_put(tmpl);
1585
1586         return ret;
1587 }
1588 EXPORT_SYMBOL_GPL(nf_conntrack_in);
1589
1590 bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
1591                           const struct nf_conntrack_tuple *orig)
1592 {
1593         bool ret;
1594
1595         rcu_read_lock();
1596         ret = nf_ct_invert_tuple(inverse, orig,
1597                                  __nf_ct_l4proto_find(orig->src.l3num,
1598                                                       orig->dst.protonum));
1599         rcu_read_unlock();
1600         return ret;
1601 }
1602 EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
1603
1604 /* Alter reply tuple (maybe alter helper).  This is for NAT, and is
1605    implicitly racy: see __nf_conntrack_confirm */
1606 void nf_conntrack_alter_reply(struct nf_conn *ct,
1607                               const struct nf_conntrack_tuple *newreply)
1608 {
1609         struct nf_conn_help *help = nfct_help(ct);
1610
1611         /* Should be unconfirmed, so not in hash table yet */
1612         WARN_ON(nf_ct_is_confirmed(ct));
1613
1614         pr_debug("Altering reply tuple of %p to ", ct);
1615         nf_ct_dump_tuple(newreply);
1616
1617         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
1618         if (ct->master || (help && !hlist_empty(&help->expectations)))
1619                 return;
1620
1621         rcu_read_lock();
1622         __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1623         rcu_read_unlock();
1624 }
1625 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
1626
1627 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
1628 void __nf_ct_refresh_acct(struct nf_conn *ct,
1629                           enum ip_conntrack_info ctinfo,
1630                           const struct sk_buff *skb,
1631                           unsigned long extra_jiffies,
1632                           int do_acct)
1633 {
1634         WARN_ON(!skb);
1635
1636         /* Only update if this is not a fixed timeout */
1637         if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
1638                 goto acct;
1639
1640         /* If not in hash table, timer will not be active yet */
1641         if (nf_ct_is_confirmed(ct))
1642                 extra_jiffies += nfct_time_stamp;
1643
1644         ct->timeout = extra_jiffies;
1645 acct:
1646         if (do_acct)
1647                 nf_ct_acct_update(ct, ctinfo, skb->len);
1648 }
1649 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
1650
1651 bool nf_ct_kill_acct(struct nf_conn *ct,
1652                      enum ip_conntrack_info ctinfo,
1653                      const struct sk_buff *skb)
1654 {
1655         nf_ct_acct_update(ct, ctinfo, skb->len);
1656
1657         return nf_ct_delete(ct, 0, 0);
1658 }
1659 EXPORT_SYMBOL_GPL(nf_ct_kill_acct);
1660
1661 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1662
1663 #include <linux/netfilter/nfnetlink.h>
1664 #include <linux/netfilter/nfnetlink_conntrack.h>
1665 #include <linux/mutex.h>
1666
1667 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
1668  * in ip_conntrack_core, since we don't want the protocols to autoload
1669  * or depend on ctnetlink */
1670 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
1671                                const struct nf_conntrack_tuple *tuple)
1672 {
1673         if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
1674             nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
1675                 goto nla_put_failure;
1676         return 0;
1677
1678 nla_put_failure:
1679         return -1;
1680 }
1681 EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
1682
1683 const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
1684         [CTA_PROTO_SRC_PORT]  = { .type = NLA_U16 },
1685         [CTA_PROTO_DST_PORT]  = { .type = NLA_U16 },
1686 };
1687 EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
1688
1689 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
1690                                struct nf_conntrack_tuple *t)
1691 {
1692         if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
1693                 return -EINVAL;
1694
1695         t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
1696         t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
1697
1698         return 0;
1699 }
1700 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
1701
1702 unsigned int nf_ct_port_nlattr_tuple_size(void)
1703 {
1704         static unsigned int size __read_mostly;
1705
1706         if (!size)
1707                 size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1708
1709         return size;
1710 }
1711 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
1712 #endif
1713
1714 /* Used by ipt_REJECT and ip6t_REJECT. */
1715 static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
1716 {
1717         struct nf_conn *ct;
1718         enum ip_conntrack_info ctinfo;
1719
1720         /* This ICMP is in reverse direction to the packet which caused it */
1721         ct = nf_ct_get(skb, &ctinfo);
1722         if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1723                 ctinfo = IP_CT_RELATED_REPLY;
1724         else
1725                 ctinfo = IP_CT_RELATED;
1726
1727         /* Attach to new skbuff, and increment count */
1728         nf_ct_set(nskb, ct, ctinfo);
1729         nf_conntrack_get(skb_nfct(nskb));
1730 }
1731
1732 static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
1733 {
1734         const struct nf_conntrack_l4proto *l4proto;
1735         struct nf_conntrack_tuple_hash *h;
1736         struct nf_conntrack_tuple tuple;
1737         enum ip_conntrack_info ctinfo;
1738         struct nf_nat_hook *nat_hook;
1739         unsigned int status;
1740         struct nf_conn *ct;
1741         int dataoff;
1742         u16 l3num;
1743         u8 l4num;
1744
1745         ct = nf_ct_get(skb, &ctinfo);
1746         if (!ct || nf_ct_is_confirmed(ct))
1747                 return 0;
1748
1749         l3num = nf_ct_l3num(ct);
1750
1751         dataoff = get_l4proto(skb, skb_network_offset(skb), l3num, &l4num);
1752         if (dataoff <= 0)
1753                 return -1;
1754
1755         l4proto = nf_ct_l4proto_find_get(l3num, l4num);
1756
1757         if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num,
1758                              l4num, net, &tuple, l4proto))
1759                 return -1;
1760
1761         if (ct->status & IPS_SRC_NAT) {
1762                 memcpy(tuple.src.u3.all,
1763                        ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.all,
1764                        sizeof(tuple.src.u3.all));
1765                 tuple.src.u.all =
1766                         ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all;
1767         }
1768
1769         if (ct->status & IPS_DST_NAT) {
1770                 memcpy(tuple.dst.u3.all,
1771                        ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.all,
1772                        sizeof(tuple.dst.u3.all));
1773                 tuple.dst.u.all =
1774                         ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all;
1775         }
1776
1777         h = nf_conntrack_find_get(net, nf_ct_zone(ct), &tuple);
1778         if (!h)
1779                 return 0;
1780
1781         /* Store status bits of the conntrack that is clashing to re-do NAT
1782          * mangling according to what it has been done already to this packet.
1783          */
1784         status = ct->status;
1785
1786         nf_ct_put(ct);
1787         ct = nf_ct_tuplehash_to_ctrack(h);
1788         nf_ct_set(skb, ct, ctinfo);
1789
1790         nat_hook = rcu_dereference(nf_nat_hook);
1791         if (!nat_hook)
1792                 return 0;
1793
1794         if (status & IPS_SRC_NAT &&
1795             nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_SRC,
1796                                 IP_CT_DIR_ORIGINAL) == NF_DROP)
1797                 return -1;
1798
1799         if (status & IPS_DST_NAT &&
1800             nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_DST,
1801                                 IP_CT_DIR_ORIGINAL) == NF_DROP)
1802                 return -1;
1803
1804         return 0;
1805 }
1806
1807 static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
1808                                        const struct sk_buff *skb)
1809 {
1810         const struct nf_conntrack_tuple *src_tuple;
1811         const struct nf_conntrack_tuple_hash *hash;
1812         struct nf_conntrack_tuple srctuple;
1813         enum ip_conntrack_info ctinfo;
1814         struct nf_conn *ct;
1815
1816         ct = nf_ct_get(skb, &ctinfo);
1817         if (ct) {
1818                 src_tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
1819                 memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
1820                 return true;
1821         }
1822
1823         if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
1824                                NFPROTO_IPV4, dev_net(skb->dev),
1825                                &srctuple))
1826                 return false;
1827
1828         hash = nf_conntrack_find_get(dev_net(skb->dev),
1829                                      &nf_ct_zone_dflt,
1830                                      &srctuple);
1831         if (!hash)
1832                 return false;
1833
1834         ct = nf_ct_tuplehash_to_ctrack(hash);
1835         src_tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
1836         memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
1837         nf_ct_put(ct);
1838
1839         return true;
1840 }
1841
1842 /* Bring out ya dead! */
1843 static struct nf_conn *
1844 get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
1845                 void *data, unsigned int *bucket)
1846 {
1847         struct nf_conntrack_tuple_hash *h;
1848         struct nf_conn *ct;
1849         struct hlist_nulls_node *n;
1850         spinlock_t *lockp;
1851
1852         for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
1853                 lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
1854                 local_bh_disable();
1855                 nf_conntrack_lock(lockp);
1856                 if (*bucket < nf_conntrack_htable_size) {
1857                         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {
1858                                 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
1859                                         continue;
1860                                 ct = nf_ct_tuplehash_to_ctrack(h);
1861                                 if (iter(ct, data))
1862                                         goto found;
1863                         }
1864                 }
1865                 spin_unlock(lockp);
1866                 local_bh_enable();
1867                 cond_resched();
1868         }
1869
1870         return NULL;
1871 found:
1872         atomic_inc(&ct->ct_general.use);
1873         spin_unlock(lockp);
1874         local_bh_enable();
1875         return ct;
1876 }
1877
1878 static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
1879                                   void *data, u32 portid, int report)
1880 {
1881         unsigned int bucket = 0, sequence;
1882         struct nf_conn *ct;
1883
1884         might_sleep();
1885
1886         for (;;) {
1887                 sequence = read_seqcount_begin(&nf_conntrack_generation);
1888
1889                 while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
1890                         /* Time to push up daises... */
1891
1892                         nf_ct_delete(ct, portid, report);
1893                         nf_ct_put(ct);
1894                         cond_resched();
1895                 }
1896
1897                 if (!read_seqcount_retry(&nf_conntrack_generation, sequence))
1898                         break;
1899                 bucket = 0;
1900         }
1901 }
1902
1903 struct iter_data {
1904         int (*iter)(struct nf_conn *i, void *data);
1905         void *data;
1906         struct net *net;
1907 };
1908
1909 static int iter_net_only(struct nf_conn *i, void *data)
1910 {
1911         struct iter_data *d = data;
1912
1913         if (!net_eq(d->net, nf_ct_net(i)))
1914                 return 0;
1915
1916         return d->iter(i, d->data);
1917 }
1918
1919 static void
1920 __nf_ct_unconfirmed_destroy(struct net *net)
1921 {
1922         int cpu;
1923
1924         for_each_possible_cpu(cpu) {
1925                 struct nf_conntrack_tuple_hash *h;
1926                 struct hlist_nulls_node *n;
1927                 struct ct_pcpu *pcpu;
1928
1929                 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1930
1931                 spin_lock_bh(&pcpu->lock);
1932                 hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
1933                         struct nf_conn *ct;
1934
1935                         ct = nf_ct_tuplehash_to_ctrack(h);
1936
1937                         /* we cannot call iter() on unconfirmed list, the
1938                          * owning cpu can reallocate ct->ext at any time.
1939                          */
1940                         set_bit(IPS_DYING_BIT, &ct->status);
1941                 }
1942                 spin_unlock_bh(&pcpu->lock);
1943                 cond_resched();
1944         }
1945 }
1946
1947 void nf_ct_unconfirmed_destroy(struct net *net)
1948 {
1949         might_sleep();
1950
1951         if (atomic_read(&net->ct.count) > 0) {
1952                 __nf_ct_unconfirmed_destroy(net);
1953                 nf_queue_nf_hook_drop(net);
1954                 synchronize_net();
1955         }
1956 }
1957 EXPORT_SYMBOL_GPL(nf_ct_unconfirmed_destroy);
1958
1959 void nf_ct_iterate_cleanup_net(struct net *net,
1960                                int (*iter)(struct nf_conn *i, void *data),
1961                                void *data, u32 portid, int report)
1962 {
1963         struct iter_data d;
1964
1965         might_sleep();
1966
1967         if (atomic_read(&net->ct.count) == 0)
1968                 return;
1969
1970         d.iter = iter;
1971         d.data = data;
1972         d.net = net;
1973
1974         nf_ct_iterate_cleanup(iter_net_only, &d, portid, report);
1975 }
1976 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net);
1977
1978 /**
1979  * nf_ct_iterate_destroy - destroy unconfirmed conntracks and iterate table
1980  * @iter: callback to invoke for each conntrack
1981  * @data: data to pass to @iter
1982  *
1983  * Like nf_ct_iterate_cleanup, but first marks conntracks on the
1984  * unconfirmed list as dying (so they will not be inserted into
1985  * main table).
1986  *
1987  * Can only be called in module exit path.
1988  */
1989 void
1990 nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data)
1991 {
1992         struct net *net;
1993
1994         down_read(&net_rwsem);
1995         for_each_net(net) {
1996                 if (atomic_read(&net->ct.count) == 0)
1997                         continue;
1998                 __nf_ct_unconfirmed_destroy(net);
1999                 nf_queue_nf_hook_drop(net);
2000         }
2001         up_read(&net_rwsem);
2002
2003         /* Need to wait for netns cleanup worker to finish, if its
2004          * running -- it might have deleted a net namespace from
2005          * the global list, so our __nf_ct_unconfirmed_destroy() might
2006          * not have affected all namespaces.
2007          */
2008         net_ns_barrier();
2009
2010         /* a conntrack could have been unlinked from unconfirmed list
2011          * before we grabbed pcpu lock in __nf_ct_unconfirmed_destroy().
2012          * This makes sure its inserted into conntrack table.
2013          */
2014         synchronize_net();
2015
2016         nf_ct_iterate_cleanup(iter, data, 0, 0);
2017 }
2018 EXPORT_SYMBOL_GPL(nf_ct_iterate_destroy);
2019
2020 static int kill_all(struct nf_conn *i, void *data)
2021 {
2022         return net_eq(nf_ct_net(i), data);
2023 }
2024
2025 void nf_conntrack_cleanup_start(void)
2026 {
2027         conntrack_gc_work.exiting = true;
2028         RCU_INIT_POINTER(ip_ct_attach, NULL);
2029 }
2030
2031 void nf_conntrack_cleanup_end(void)
2032 {
2033         RCU_INIT_POINTER(nf_ct_hook, NULL);
2034         cancel_delayed_work_sync(&conntrack_gc_work.dwork);
2035         kvfree(nf_conntrack_hash);
2036
2037         nf_conntrack_proto_fini();
2038         nf_conntrack_seqadj_fini();
2039         nf_conntrack_labels_fini();
2040         nf_conntrack_helper_fini();
2041         nf_conntrack_timeout_fini();
2042         nf_conntrack_ecache_fini();
2043         nf_conntrack_tstamp_fini();
2044         nf_conntrack_acct_fini();
2045         nf_conntrack_expect_fini();
2046
2047         kmem_cache_destroy(nf_conntrack_cachep);
2048 }
2049
2050 /*
2051  * Mishearing the voices in his head, our hero wonders how he's
2052  * supposed to kill the mall.
2053  */
2054 void nf_conntrack_cleanup_net(struct net *net)
2055 {
2056         LIST_HEAD(single);
2057
2058         list_add(&net->exit_list, &single);
2059         nf_conntrack_cleanup_net_list(&single);
2060 }
2061
2062 void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
2063 {
2064         int busy;
2065         struct net *net;
2066
2067         /*
2068          * This makes sure all current packets have passed through
2069          *  netfilter framework.  Roll on, two-stage module
2070          *  delete...
2071          */
2072         synchronize_net();
2073 i_see_dead_people:
2074         busy = 0;
2075         list_for_each_entry(net, net_exit_list, exit_list) {
2076                 nf_ct_iterate_cleanup(kill_all, net, 0, 0);
2077                 if (atomic_read(&net->ct.count) != 0)
2078                         busy = 1;
2079         }
2080         if (busy) {
2081                 schedule();
2082                 goto i_see_dead_people;
2083         }
2084
2085         list_for_each_entry(net, net_exit_list, exit_list) {
2086                 nf_conntrack_proto_pernet_fini(net);
2087                 nf_conntrack_helper_pernet_fini(net);
2088                 nf_conntrack_ecache_pernet_fini(net);
2089                 nf_conntrack_tstamp_pernet_fini(net);
2090                 nf_conntrack_acct_pernet_fini(net);
2091                 nf_conntrack_expect_pernet_fini(net);
2092                 free_percpu(net->ct.stat);
2093                 free_percpu(net->ct.pcpu_lists);
2094         }
2095 }
2096
2097 void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
2098 {
2099         struct hlist_nulls_head *hash;
2100         unsigned int nr_slots, i;
2101
2102         if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
2103                 return NULL;
2104
2105         BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
2106         nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
2107
2108         hash = kvmalloc_array(nr_slots, sizeof(struct hlist_nulls_head),
2109                               GFP_KERNEL | __GFP_ZERO);
2110
2111         if (hash && nulls)
2112                 for (i = 0; i < nr_slots; i++)
2113                         INIT_HLIST_NULLS_HEAD(&hash[i], i);
2114
2115         return hash;
2116 }
2117 EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
2118
2119 int nf_conntrack_hash_resize(unsigned int hashsize)
2120 {
2121         int i, bucket;
2122         unsigned int old_size;
2123         struct hlist_nulls_head *hash, *old_hash;
2124         struct nf_conntrack_tuple_hash *h;
2125         struct nf_conn *ct;
2126
2127         if (!hashsize)
2128                 return -EINVAL;
2129
2130         hash = nf_ct_alloc_hashtable(&hashsize, 1);
2131         if (!hash)
2132                 return -ENOMEM;
2133
2134         old_size = nf_conntrack_htable_size;
2135         if (old_size == hashsize) {
2136                 kvfree(hash);
2137                 return 0;
2138         }
2139
2140         local_bh_disable();
2141         nf_conntrack_all_lock();
2142         write_seqcount_begin(&nf_conntrack_generation);
2143
2144         /* Lookups in the old hash might happen in parallel, which means we
2145          * might get false negatives during connection lookup. New connections
2146          * created because of a false negative won't make it into the hash
2147          * though since that required taking the locks.
2148          */
2149
2150         for (i = 0; i < nf_conntrack_htable_size; i++) {
2151                 while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {
2152                         h = hlist_nulls_entry(nf_conntrack_hash[i].first,
2153                                               struct nf_conntrack_tuple_hash, hnnode);
2154                         ct = nf_ct_tuplehash_to_ctrack(h);
2155                         hlist_nulls_del_rcu(&h->hnnode);
2156                         bucket = __hash_conntrack(nf_ct_net(ct),
2157                                                   &h->tuple, hashsize);
2158                         hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
2159                 }
2160         }
2161         old_size = nf_conntrack_htable_size;
2162         old_hash = nf_conntrack_hash;
2163
2164         nf_conntrack_hash = hash;
2165         nf_conntrack_htable_size = hashsize;
2166
2167         write_seqcount_end(&nf_conntrack_generation);
2168         nf_conntrack_all_unlock();
2169         local_bh_enable();
2170
2171         synchronize_net();
2172         kvfree(old_hash);
2173         return 0;
2174 }
2175
2176 int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp)
2177 {
2178         unsigned int hashsize;
2179         int rc;
2180
2181         if (current->nsproxy->net_ns != &init_net)
2182                 return -EOPNOTSUPP;
2183
2184         /* On boot, we can set this without any fancy locking. */
2185         if (!nf_conntrack_hash)
2186                 return param_set_uint(val, kp);
2187
2188         rc = kstrtouint(val, 0, &hashsize);
2189         if (rc)
2190                 return rc;
2191
2192         return nf_conntrack_hash_resize(hashsize);
2193 }
2194 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
2195
2196 static __always_inline unsigned int total_extension_size(void)
2197 {
2198         /* remember to add new extensions below */
2199         BUILD_BUG_ON(NF_CT_EXT_NUM > 9);
2200
2201         return sizeof(struct nf_ct_ext) +
2202                sizeof(struct nf_conn_help)
2203 #if IS_ENABLED(CONFIG_NF_NAT)
2204                 + sizeof(struct nf_conn_nat)
2205 #endif
2206                 + sizeof(struct nf_conn_seqadj)
2207                 + sizeof(struct nf_conn_acct)
2208 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2209                 + sizeof(struct nf_conntrack_ecache)
2210 #endif
2211 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
2212                 + sizeof(struct nf_conn_tstamp)
2213 #endif
2214 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
2215                 + sizeof(struct nf_conn_timeout)
2216 #endif
2217 #ifdef CONFIG_NF_CONNTRACK_LABELS
2218                 + sizeof(struct nf_conn_labels)
2219 #endif
2220 #if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
2221                 + sizeof(struct nf_conn_synproxy)
2222 #endif
2223         ;
2224 };
2225
2226 int nf_conntrack_init_start(void)
2227 {
2228         int max_factor = 8;
2229         int ret = -ENOMEM;
2230         int i;
2231
2232         /* struct nf_ct_ext uses u8 to store offsets/size */
2233         BUILD_BUG_ON(total_extension_size() > 255u);
2234
2235         seqcount_init(&nf_conntrack_generation);
2236
2237         for (i = 0; i < CONNTRACK_LOCKS; i++)
2238                 spin_lock_init(&nf_conntrack_locks[i]);
2239
2240         if (!nf_conntrack_htable_size) {
2241                 /* Idea from tcp.c: use 1/16384 of memory.
2242                  * On i386: 32MB machine has 512 buckets.
2243                  * >= 1GB machines have 16384 buckets.
2244                  * >= 4GB machines have 65536 buckets.
2245                  */
2246                 nf_conntrack_htable_size
2247                         = (((totalram_pages << PAGE_SHIFT) / 16384)
2248                            / sizeof(struct hlist_head));
2249                 if (totalram_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
2250                         nf_conntrack_htable_size = 65536;
2251                 else if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
2252                         nf_conntrack_htable_size = 16384;
2253                 if (nf_conntrack_htable_size < 32)
2254                         nf_conntrack_htable_size = 32;
2255
2256                 /* Use a max. factor of four by default to get the same max as
2257                  * with the old struct list_heads. When a table size is given
2258                  * we use the old value of 8 to avoid reducing the max.
2259                  * entries. */
2260                 max_factor = 4;
2261         }
2262
2263         nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1);
2264         if (!nf_conntrack_hash)
2265                 return -ENOMEM;
2266
2267         nf_conntrack_max = max_factor * nf_conntrack_htable_size;
2268
2269         nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
2270                                                 sizeof(struct nf_conn),
2271                                                 NFCT_INFOMASK + 1,
2272                                                 SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
2273         if (!nf_conntrack_cachep)
2274                 goto err_cachep;
2275
2276         ret = nf_conntrack_expect_init();
2277         if (ret < 0)
2278                 goto err_expect;
2279
2280         ret = nf_conntrack_acct_init();
2281         if (ret < 0)
2282                 goto err_acct;
2283
2284         ret = nf_conntrack_tstamp_init();
2285         if (ret < 0)
2286                 goto err_tstamp;
2287
2288         ret = nf_conntrack_ecache_init();
2289         if (ret < 0)
2290                 goto err_ecache;
2291
2292         ret = nf_conntrack_timeout_init();
2293         if (ret < 0)
2294                 goto err_timeout;
2295
2296         ret = nf_conntrack_helper_init();
2297         if (ret < 0)
2298                 goto err_helper;
2299
2300         ret = nf_conntrack_labels_init();
2301         if (ret < 0)
2302                 goto err_labels;
2303
2304         ret = nf_conntrack_seqadj_init();
2305         if (ret < 0)
2306                 goto err_seqadj;
2307
2308         ret = nf_conntrack_proto_init();
2309         if (ret < 0)
2310                 goto err_proto;
2311
2312         conntrack_gc_work_init(&conntrack_gc_work);
2313         queue_delayed_work(system_power_efficient_wq, &conntrack_gc_work.dwork, HZ);
2314
2315         return 0;
2316
2317 err_proto:
2318         nf_conntrack_seqadj_fini();
2319 err_seqadj:
2320         nf_conntrack_labels_fini();
2321 err_labels:
2322         nf_conntrack_helper_fini();
2323 err_helper:
2324         nf_conntrack_timeout_fini();
2325 err_timeout:
2326         nf_conntrack_ecache_fini();
2327 err_ecache:
2328         nf_conntrack_tstamp_fini();
2329 err_tstamp:
2330         nf_conntrack_acct_fini();
2331 err_acct:
2332         nf_conntrack_expect_fini();
2333 err_expect:
2334         kmem_cache_destroy(nf_conntrack_cachep);
2335 err_cachep:
2336         kvfree(nf_conntrack_hash);
2337         return ret;
2338 }
2339
2340 static struct nf_ct_hook nf_conntrack_hook = {
2341         .update         = nf_conntrack_update,
2342         .destroy        = destroy_conntrack,
2343         .get_tuple_skb  = nf_conntrack_get_tuple_skb,
2344 };
2345
2346 void nf_conntrack_init_end(void)
2347 {
2348         /* For use by REJECT target */
2349         RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
2350         RCU_INIT_POINTER(nf_ct_hook, &nf_conntrack_hook);
2351 }
2352
2353 /*
2354  * We need to use special "null" values, not used in hash table
2355  */
2356 #define UNCONFIRMED_NULLS_VAL   ((1<<30)+0)
2357 #define DYING_NULLS_VAL         ((1<<30)+1)
2358 #define TEMPLATE_NULLS_VAL      ((1<<30)+2)
2359
2360 int nf_conntrack_init_net(struct net *net)
2361 {
2362         int ret = -ENOMEM;
2363         int cpu;
2364
2365         BUILD_BUG_ON(IP_CT_UNTRACKED == IP_CT_NUMBER);
2366         atomic_set(&net->ct.count, 0);
2367
2368         net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
2369         if (!net->ct.pcpu_lists)
2370                 goto err_stat;
2371
2372         for_each_possible_cpu(cpu) {
2373                 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
2374
2375                 spin_lock_init(&pcpu->lock);
2376                 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
2377                 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
2378         }
2379
2380         net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
2381         if (!net->ct.stat)
2382                 goto err_pcpu_lists;
2383
2384         ret = nf_conntrack_expect_pernet_init(net);
2385         if (ret < 0)
2386                 goto err_expect;
2387         ret = nf_conntrack_acct_pernet_init(net);
2388         if (ret < 0)
2389                 goto err_acct;
2390         ret = nf_conntrack_tstamp_pernet_init(net);
2391         if (ret < 0)
2392                 goto err_tstamp;
2393         ret = nf_conntrack_ecache_pernet_init(net);
2394         if (ret < 0)
2395                 goto err_ecache;
2396         ret = nf_conntrack_helper_pernet_init(net);
2397         if (ret < 0)
2398                 goto err_helper;
2399         ret = nf_conntrack_proto_pernet_init(net);
2400         if (ret < 0)
2401                 goto err_proto;
2402         return 0;
2403
2404 err_proto:
2405         nf_conntrack_helper_pernet_fini(net);
2406 err_helper:
2407         nf_conntrack_ecache_pernet_fini(net);
2408 err_ecache:
2409         nf_conntrack_tstamp_pernet_fini(net);
2410 err_tstamp:
2411         nf_conntrack_acct_pernet_fini(net);
2412 err_acct:
2413         nf_conntrack_expect_pernet_fini(net);
2414 err_expect:
2415         free_percpu(net->ct.stat);
2416 err_pcpu_lists:
2417         free_percpu(net->ct.pcpu_lists);
2418 err_stat:
2419         return ret;
2420 }