Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
[linux-2.6-microblaze.git] / net / netfilter / nf_conntrack_core.c
1 /* Connection state tracking for netfilter.  This is separated from,
2    but required by, the NAT layer; it can also be used by an iptables
3    extension. */
4
5 /* (C) 1999-2001 Paul `Rusty' Russell
6  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
8  * (C) 2005-2012 Patrick McHardy <kaber@trash.net>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/types.h>
18 #include <linux/netfilter.h>
19 #include <linux/module.h>
20 #include <linux/sched.h>
21 #include <linux/skbuff.h>
22 #include <linux/proc_fs.h>
23 #include <linux/vmalloc.h>
24 #include <linux/stddef.h>
25 #include <linux/slab.h>
26 #include <linux/random.h>
27 #include <linux/jhash.h>
28 #include <linux/siphash.h>
29 #include <linux/err.h>
30 #include <linux/percpu.h>
31 #include <linux/moduleparam.h>
32 #include <linux/notifier.h>
33 #include <linux/kernel.h>
34 #include <linux/netdevice.h>
35 #include <linux/socket.h>
36 #include <linux/mm.h>
37 #include <linux/nsproxy.h>
38 #include <linux/rculist_nulls.h>
39
40 #include <net/netfilter/nf_conntrack.h>
41 #include <net/netfilter/nf_conntrack_l4proto.h>
42 #include <net/netfilter/nf_conntrack_expect.h>
43 #include <net/netfilter/nf_conntrack_helper.h>
44 #include <net/netfilter/nf_conntrack_seqadj.h>
45 #include <net/netfilter/nf_conntrack_core.h>
46 #include <net/netfilter/nf_conntrack_extend.h>
47 #include <net/netfilter/nf_conntrack_acct.h>
48 #include <net/netfilter/nf_conntrack_ecache.h>
49 #include <net/netfilter/nf_conntrack_zones.h>
50 #include <net/netfilter/nf_conntrack_timestamp.h>
51 #include <net/netfilter/nf_conntrack_timeout.h>
52 #include <net/netfilter/nf_conntrack_labels.h>
53 #include <net/netfilter/nf_conntrack_synproxy.h>
54 #include <net/netfilter/nf_nat.h>
55 #include <net/netfilter/nf_nat_helper.h>
56 #include <net/netns/hash.h>
57 #include <net/ip.h>
58
59 #include "nf_internals.h"
60
61 __cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
62 EXPORT_SYMBOL_GPL(nf_conntrack_locks);
63
64 __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
65 EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
66
67 struct hlist_nulls_head *nf_conntrack_hash __read_mostly;
68 EXPORT_SYMBOL_GPL(nf_conntrack_hash);
69
70 struct conntrack_gc_work {
71         struct delayed_work     dwork;
72         u32                     last_bucket;
73         bool                    exiting;
74         bool                    early_drop;
75         long                    next_gc_run;
76 };
77
78 static __read_mostly struct kmem_cache *nf_conntrack_cachep;
79 static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
80 static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
81 static __read_mostly bool nf_conntrack_locks_all;
82
83 /* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
84 #define GC_MAX_BUCKETS_DIV      128u
85 /* upper bound of full table scan */
86 #define GC_MAX_SCAN_JIFFIES     (16u * HZ)
87 /* desired ratio of entries found to be expired */
88 #define GC_EVICT_RATIO  50u
89
90 static struct conntrack_gc_work conntrack_gc_work;
91
92 void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
93 {
94         /* 1) Acquire the lock */
95         spin_lock(lock);
96
97         /* 2) read nf_conntrack_locks_all, with ACQUIRE semantics
98          * It pairs with the smp_store_release() in nf_conntrack_all_unlock()
99          */
100         if (likely(smp_load_acquire(&nf_conntrack_locks_all) == false))
101                 return;
102
103         /* fast path failed, unlock */
104         spin_unlock(lock);
105
106         /* Slow path 1) get global lock */
107         spin_lock(&nf_conntrack_locks_all_lock);
108
109         /* Slow path 2) get the lock we want */
110         spin_lock(lock);
111
112         /* Slow path 3) release the global lock */
113         spin_unlock(&nf_conntrack_locks_all_lock);
114 }
115 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
116
117 static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
118 {
119         h1 %= CONNTRACK_LOCKS;
120         h2 %= CONNTRACK_LOCKS;
121         spin_unlock(&nf_conntrack_locks[h1]);
122         if (h1 != h2)
123                 spin_unlock(&nf_conntrack_locks[h2]);
124 }
125
126 /* return true if we need to recompute hashes (in case hash table was resized) */
127 static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
128                                      unsigned int h2, unsigned int sequence)
129 {
130         h1 %= CONNTRACK_LOCKS;
131         h2 %= CONNTRACK_LOCKS;
132         if (h1 <= h2) {
133                 nf_conntrack_lock(&nf_conntrack_locks[h1]);
134                 if (h1 != h2)
135                         spin_lock_nested(&nf_conntrack_locks[h2],
136                                          SINGLE_DEPTH_NESTING);
137         } else {
138                 nf_conntrack_lock(&nf_conntrack_locks[h2]);
139                 spin_lock_nested(&nf_conntrack_locks[h1],
140                                  SINGLE_DEPTH_NESTING);
141         }
142         if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
143                 nf_conntrack_double_unlock(h1, h2);
144                 return true;
145         }
146         return false;
147 }
148
149 static void nf_conntrack_all_lock(void)
150 {
151         int i;
152
153         spin_lock(&nf_conntrack_locks_all_lock);
154
155         nf_conntrack_locks_all = true;
156
157         for (i = 0; i < CONNTRACK_LOCKS; i++) {
158                 spin_lock(&nf_conntrack_locks[i]);
159
160                 /* This spin_unlock provides the "release" to ensure that
161                  * nf_conntrack_locks_all==true is visible to everyone that
162                  * acquired spin_lock(&nf_conntrack_locks[]).
163                  */
164                 spin_unlock(&nf_conntrack_locks[i]);
165         }
166 }
167
168 static void nf_conntrack_all_unlock(void)
169 {
170         /* All prior stores must be complete before we clear
171          * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
172          * might observe the false value but not the entire
173          * critical section.
174          * It pairs with the smp_load_acquire() in nf_conntrack_lock()
175          */
176         smp_store_release(&nf_conntrack_locks_all, false);
177         spin_unlock(&nf_conntrack_locks_all_lock);
178 }
179
180 unsigned int nf_conntrack_htable_size __read_mostly;
181 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
182
183 unsigned int nf_conntrack_max __read_mostly;
184 EXPORT_SYMBOL_GPL(nf_conntrack_max);
185 seqcount_t nf_conntrack_generation __read_mostly;
186 static unsigned int nf_conntrack_hash_rnd __read_mostly;
187
188 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
189                               const struct net *net)
190 {
191         unsigned int n;
192         u32 seed;
193
194         get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd));
195
196         /* The direction must be ignored, so we hash everything up to the
197          * destination ports (which is a multiple of 4) and treat the last
198          * three bytes manually.
199          */
200         seed = nf_conntrack_hash_rnd ^ net_hash_mix(net);
201         n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
202         return jhash2((u32 *)tuple, n, seed ^
203                       (((__force __u16)tuple->dst.u.all << 16) |
204                       tuple->dst.protonum));
205 }
206
207 static u32 scale_hash(u32 hash)
208 {
209         return reciprocal_scale(hash, nf_conntrack_htable_size);
210 }
211
212 static u32 __hash_conntrack(const struct net *net,
213                             const struct nf_conntrack_tuple *tuple,
214                             unsigned int size)
215 {
216         return reciprocal_scale(hash_conntrack_raw(tuple, net), size);
217 }
218
219 static u32 hash_conntrack(const struct net *net,
220                           const struct nf_conntrack_tuple *tuple)
221 {
222         return scale_hash(hash_conntrack_raw(tuple, net));
223 }
224
225 static bool nf_ct_get_tuple_ports(const struct sk_buff *skb,
226                                   unsigned int dataoff,
227                                   struct nf_conntrack_tuple *tuple)
228 {       struct {
229                 __be16 sport;
230                 __be16 dport;
231         } _inet_hdr, *inet_hdr;
232
233         /* Actually only need first 4 bytes to get ports. */
234         inet_hdr = skb_header_pointer(skb, dataoff, sizeof(_inet_hdr), &_inet_hdr);
235         if (!inet_hdr)
236                 return false;
237
238         tuple->src.u.udp.port = inet_hdr->sport;
239         tuple->dst.u.udp.port = inet_hdr->dport;
240         return true;
241 }
242
243 static bool
244 nf_ct_get_tuple(const struct sk_buff *skb,
245                 unsigned int nhoff,
246                 unsigned int dataoff,
247                 u_int16_t l3num,
248                 u_int8_t protonum,
249                 struct net *net,
250                 struct nf_conntrack_tuple *tuple)
251 {
252         unsigned int size;
253         const __be32 *ap;
254         __be32 _addrs[8];
255
256         memset(tuple, 0, sizeof(*tuple));
257
258         tuple->src.l3num = l3num;
259         switch (l3num) {
260         case NFPROTO_IPV4:
261                 nhoff += offsetof(struct iphdr, saddr);
262                 size = 2 * sizeof(__be32);
263                 break;
264         case NFPROTO_IPV6:
265                 nhoff += offsetof(struct ipv6hdr, saddr);
266                 size = sizeof(_addrs);
267                 break;
268         default:
269                 return true;
270         }
271
272         ap = skb_header_pointer(skb, nhoff, size, _addrs);
273         if (!ap)
274                 return false;
275
276         switch (l3num) {
277         case NFPROTO_IPV4:
278                 tuple->src.u3.ip = ap[0];
279                 tuple->dst.u3.ip = ap[1];
280                 break;
281         case NFPROTO_IPV6:
282                 memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6));
283                 memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6));
284                 break;
285         }
286
287         tuple->dst.protonum = protonum;
288         tuple->dst.dir = IP_CT_DIR_ORIGINAL;
289
290         switch (protonum) {
291 #if IS_ENABLED(CONFIG_IPV6)
292         case IPPROTO_ICMPV6:
293                 return icmpv6_pkt_to_tuple(skb, dataoff, net, tuple);
294 #endif
295         case IPPROTO_ICMP:
296                 return icmp_pkt_to_tuple(skb, dataoff, net, tuple);
297 #ifdef CONFIG_NF_CT_PROTO_GRE
298         case IPPROTO_GRE:
299                 return gre_pkt_to_tuple(skb, dataoff, net, tuple);
300 #endif
301         case IPPROTO_TCP:
302         case IPPROTO_UDP: /* fallthrough */
303                 return nf_ct_get_tuple_ports(skb, dataoff, tuple);
304 #ifdef CONFIG_NF_CT_PROTO_UDPLITE
305         case IPPROTO_UDPLITE:
306                 return nf_ct_get_tuple_ports(skb, dataoff, tuple);
307 #endif
308 #ifdef CONFIG_NF_CT_PROTO_SCTP
309         case IPPROTO_SCTP:
310                 return nf_ct_get_tuple_ports(skb, dataoff, tuple);
311 #endif
312 #ifdef CONFIG_NF_CT_PROTO_DCCP
313         case IPPROTO_DCCP:
314                 return nf_ct_get_tuple_ports(skb, dataoff, tuple);
315 #endif
316         default:
317                 break;
318         }
319
320         return true;
321 }
322
323 static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
324                             u_int8_t *protonum)
325 {
326         int dataoff = -1;
327         const struct iphdr *iph;
328         struct iphdr _iph;
329
330         iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
331         if (!iph)
332                 return -1;
333
334         /* Conntrack defragments packets, we might still see fragments
335          * inside ICMP packets though.
336          */
337         if (iph->frag_off & htons(IP_OFFSET))
338                 return -1;
339
340         dataoff = nhoff + (iph->ihl << 2);
341         *protonum = iph->protocol;
342
343         /* Check bogus IP headers */
344         if (dataoff > skb->len) {
345                 pr_debug("bogus IPv4 packet: nhoff %u, ihl %u, skblen %u\n",
346                          nhoff, iph->ihl << 2, skb->len);
347                 return -1;
348         }
349         return dataoff;
350 }
351
352 #if IS_ENABLED(CONFIG_IPV6)
353 static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
354                             u8 *protonum)
355 {
356         int protoff = -1;
357         unsigned int extoff = nhoff + sizeof(struct ipv6hdr);
358         __be16 frag_off;
359         u8 nexthdr;
360
361         if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr),
362                           &nexthdr, sizeof(nexthdr)) != 0) {
363                 pr_debug("can't get nexthdr\n");
364                 return -1;
365         }
366         protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off);
367         /*
368          * (protoff == skb->len) means the packet has not data, just
369          * IPv6 and possibly extensions headers, but it is tracked anyway
370          */
371         if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
372                 pr_debug("can't find proto in pkt\n");
373                 return -1;
374         }
375
376         *protonum = nexthdr;
377         return protoff;
378 }
379 #endif
380
381 static int get_l4proto(const struct sk_buff *skb,
382                        unsigned int nhoff, u8 pf, u8 *l4num)
383 {
384         switch (pf) {
385         case NFPROTO_IPV4:
386                 return ipv4_get_l4proto(skb, nhoff, l4num);
387 #if IS_ENABLED(CONFIG_IPV6)
388         case NFPROTO_IPV6:
389                 return ipv6_get_l4proto(skb, nhoff, l4num);
390 #endif
391         default:
392                 *l4num = 0;
393                 break;
394         }
395         return -1;
396 }
397
398 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
399                        u_int16_t l3num,
400                        struct net *net, struct nf_conntrack_tuple *tuple)
401 {
402         u8 protonum;
403         int protoff;
404
405         protoff = get_l4proto(skb, nhoff, l3num, &protonum);
406         if (protoff <= 0)
407                 return false;
408
409         return nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple);
410 }
411 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
412
413 bool
414 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
415                    const struct nf_conntrack_tuple *orig)
416 {
417         memset(inverse, 0, sizeof(*inverse));
418
419         inverse->src.l3num = orig->src.l3num;
420
421         switch (orig->src.l3num) {
422         case NFPROTO_IPV4:
423                 inverse->src.u3.ip = orig->dst.u3.ip;
424                 inverse->dst.u3.ip = orig->src.u3.ip;
425                 break;
426         case NFPROTO_IPV6:
427                 inverse->src.u3.in6 = orig->dst.u3.in6;
428                 inverse->dst.u3.in6 = orig->src.u3.in6;
429                 break;
430         default:
431                 break;
432         }
433
434         inverse->dst.dir = !orig->dst.dir;
435
436         inverse->dst.protonum = orig->dst.protonum;
437
438         switch (orig->dst.protonum) {
439         case IPPROTO_ICMP:
440                 return nf_conntrack_invert_icmp_tuple(inverse, orig);
441 #if IS_ENABLED(CONFIG_IPV6)
442         case IPPROTO_ICMPV6:
443                 return nf_conntrack_invert_icmpv6_tuple(inverse, orig);
444 #endif
445         }
446
447         inverse->src.u.all = orig->dst.u.all;
448         inverse->dst.u.all = orig->src.u.all;
449         return true;
450 }
451 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
452
453 /* Generate a almost-unique pseudo-id for a given conntrack.
454  *
455  * intentionally doesn't re-use any of the seeds used for hash
456  * table location, we assume id gets exposed to userspace.
457  *
458  * Following nf_conn items do not change throughout lifetime
459  * of the nf_conn after it has been committed to main hash table:
460  *
461  * 1. nf_conn address
462  * 2. nf_conn->ext address
463  * 3. nf_conn->master address (normally NULL)
464  * 4. tuple
465  * 5. the associated net namespace
466  */
467 u32 nf_ct_get_id(const struct nf_conn *ct)
468 {
469         static __read_mostly siphash_key_t ct_id_seed;
470         unsigned long a, b, c, d;
471
472         net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
473
474         a = (unsigned long)ct;
475         b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct));
476         c = (unsigned long)ct->ext;
477         d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash),
478                                    &ct_id_seed);
479 #ifdef CONFIG_64BIT
480         return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
481 #else
482         return siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &ct_id_seed);
483 #endif
484 }
485 EXPORT_SYMBOL_GPL(nf_ct_get_id);
486
487 static void
488 clean_from_lists(struct nf_conn *ct)
489 {
490         pr_debug("clean_from_lists(%p)\n", ct);
491         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
492         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
493
494         /* Destroy all pending expectations */
495         nf_ct_remove_expectations(ct);
496 }
497
498 /* must be called with local_bh_disable */
499 static void nf_ct_add_to_dying_list(struct nf_conn *ct)
500 {
501         struct ct_pcpu *pcpu;
502
503         /* add this conntrack to the (per cpu) dying list */
504         ct->cpu = smp_processor_id();
505         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
506
507         spin_lock(&pcpu->lock);
508         hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
509                              &pcpu->dying);
510         spin_unlock(&pcpu->lock);
511 }
512
513 /* must be called with local_bh_disable */
514 static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
515 {
516         struct ct_pcpu *pcpu;
517
518         /* add this conntrack to the (per cpu) unconfirmed list */
519         ct->cpu = smp_processor_id();
520         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
521
522         spin_lock(&pcpu->lock);
523         hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
524                              &pcpu->unconfirmed);
525         spin_unlock(&pcpu->lock);
526 }
527
528 /* must be called with local_bh_disable */
529 static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
530 {
531         struct ct_pcpu *pcpu;
532
533         /* We overload first tuple to link into unconfirmed or dying list.*/
534         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
535
536         spin_lock(&pcpu->lock);
537         BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
538         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
539         spin_unlock(&pcpu->lock);
540 }
541
542 #define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK)
543
544 /* Released via destroy_conntrack() */
545 struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
546                                  const struct nf_conntrack_zone *zone,
547                                  gfp_t flags)
548 {
549         struct nf_conn *tmpl, *p;
550
551         if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) {
552                 tmpl = kzalloc(sizeof(*tmpl) + NFCT_INFOMASK, flags);
553                 if (!tmpl)
554                         return NULL;
555
556                 p = tmpl;
557                 tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
558                 if (tmpl != p) {
559                         tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
560                         tmpl->proto.tmpl_padto = (char *)tmpl - (char *)p;
561                 }
562         } else {
563                 tmpl = kzalloc(sizeof(*tmpl), flags);
564                 if (!tmpl)
565                         return NULL;
566         }
567
568         tmpl->status = IPS_TEMPLATE;
569         write_pnet(&tmpl->ct_net, net);
570         nf_ct_zone_add(tmpl, zone);
571         atomic_set(&tmpl->ct_general.use, 0);
572
573         return tmpl;
574 }
575 EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
576
577 void nf_ct_tmpl_free(struct nf_conn *tmpl)
578 {
579         nf_ct_ext_destroy(tmpl);
580         nf_ct_ext_free(tmpl);
581
582         if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK)
583                 kfree((char *)tmpl - tmpl->proto.tmpl_padto);
584         else
585                 kfree(tmpl);
586 }
587 EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
588
589 static void destroy_gre_conntrack(struct nf_conn *ct)
590 {
591 #ifdef CONFIG_NF_CT_PROTO_GRE
592         struct nf_conn *master = ct->master;
593
594         if (master)
595                 nf_ct_gre_keymap_destroy(master);
596 #endif
597 }
598
599 static void
600 destroy_conntrack(struct nf_conntrack *nfct)
601 {
602         struct nf_conn *ct = (struct nf_conn *)nfct;
603
604         pr_debug("destroy_conntrack(%p)\n", ct);
605         WARN_ON(atomic_read(&nfct->use) != 0);
606
607         if (unlikely(nf_ct_is_template(ct))) {
608                 nf_ct_tmpl_free(ct);
609                 return;
610         }
611
612         if (unlikely(nf_ct_protonum(ct) == IPPROTO_GRE))
613                 destroy_gre_conntrack(ct);
614
615         local_bh_disable();
616         /* Expectations will have been removed in clean_from_lists,
617          * except TFTP can create an expectation on the first packet,
618          * before connection is in the list, so we need to clean here,
619          * too.
620          */
621         nf_ct_remove_expectations(ct);
622
623         nf_ct_del_from_dying_or_unconfirmed_list(ct);
624
625         local_bh_enable();
626
627         if (ct->master)
628                 nf_ct_put(ct->master);
629
630         pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
631         nf_conntrack_free(ct);
632 }
633
634 static void nf_ct_delete_from_lists(struct nf_conn *ct)
635 {
636         struct net *net = nf_ct_net(ct);
637         unsigned int hash, reply_hash;
638         unsigned int sequence;
639
640         nf_ct_helper_destroy(ct);
641
642         local_bh_disable();
643         do {
644                 sequence = read_seqcount_begin(&nf_conntrack_generation);
645                 hash = hash_conntrack(net,
646                                       &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
647                 reply_hash = hash_conntrack(net,
648                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
649         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
650
651         clean_from_lists(ct);
652         nf_conntrack_double_unlock(hash, reply_hash);
653
654         nf_ct_add_to_dying_list(ct);
655
656         local_bh_enable();
657 }
658
659 bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
660 {
661         struct nf_conn_tstamp *tstamp;
662
663         if (test_and_set_bit(IPS_DYING_BIT, &ct->status))
664                 return false;
665
666         tstamp = nf_conn_tstamp_find(ct);
667         if (tstamp && tstamp->stop == 0)
668                 tstamp->stop = ktime_get_real_ns();
669
670         if (nf_conntrack_event_report(IPCT_DESTROY, ct,
671                                     portid, report) < 0) {
672                 /* destroy event was not delivered. nf_ct_put will
673                  * be done by event cache worker on redelivery.
674                  */
675                 nf_ct_delete_from_lists(ct);
676                 nf_conntrack_ecache_delayed_work(nf_ct_net(ct));
677                 return false;
678         }
679
680         nf_conntrack_ecache_work(nf_ct_net(ct));
681         nf_ct_delete_from_lists(ct);
682         nf_ct_put(ct);
683         return true;
684 }
685 EXPORT_SYMBOL_GPL(nf_ct_delete);
686
687 static inline bool
688 nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
689                 const struct nf_conntrack_tuple *tuple,
690                 const struct nf_conntrack_zone *zone,
691                 const struct net *net)
692 {
693         struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
694
695         /* A conntrack can be recreated with the equal tuple,
696          * so we need to check that the conntrack is confirmed
697          */
698         return nf_ct_tuple_equal(tuple, &h->tuple) &&
699                nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
700                nf_ct_is_confirmed(ct) &&
701                net_eq(net, nf_ct_net(ct));
702 }
703
704 static inline bool
705 nf_ct_match(const struct nf_conn *ct1, const struct nf_conn *ct2)
706 {
707         return nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
708                                  &ct2->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
709                nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_REPLY].tuple,
710                                  &ct2->tuplehash[IP_CT_DIR_REPLY].tuple) &&
711                nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_ORIGINAL) &&
712                nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_REPLY) &&
713                net_eq(nf_ct_net(ct1), nf_ct_net(ct2));
714 }
715
716 /* caller must hold rcu readlock and none of the nf_conntrack_locks */
717 static void nf_ct_gc_expired(struct nf_conn *ct)
718 {
719         if (!atomic_inc_not_zero(&ct->ct_general.use))
720                 return;
721
722         if (nf_ct_should_gc(ct))
723                 nf_ct_kill(ct);
724
725         nf_ct_put(ct);
726 }
727
728 /*
729  * Warning :
730  * - Caller must take a reference on returned object
731  *   and recheck nf_ct_tuple_equal(tuple, &h->tuple)
732  */
733 static struct nf_conntrack_tuple_hash *
734 ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
735                       const struct nf_conntrack_tuple *tuple, u32 hash)
736 {
737         struct nf_conntrack_tuple_hash *h;
738         struct hlist_nulls_head *ct_hash;
739         struct hlist_nulls_node *n;
740         unsigned int bucket, hsize;
741
742 begin:
743         nf_conntrack_get_ht(&ct_hash, &hsize);
744         bucket = reciprocal_scale(hash, hsize);
745
746         hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) {
747                 struct nf_conn *ct;
748
749                 ct = nf_ct_tuplehash_to_ctrack(h);
750                 if (nf_ct_is_expired(ct)) {
751                         nf_ct_gc_expired(ct);
752                         continue;
753                 }
754
755                 if (nf_ct_is_dying(ct))
756                         continue;
757
758                 if (nf_ct_key_equal(h, tuple, zone, net))
759                         return h;
760         }
761         /*
762          * if the nulls value we got at the end of this lookup is
763          * not the expected one, we must restart lookup.
764          * We probably met an item that was moved to another chain.
765          */
766         if (get_nulls_value(n) != bucket) {
767                 NF_CT_STAT_INC_ATOMIC(net, search_restart);
768                 goto begin;
769         }
770
771         return NULL;
772 }
773
774 /* Find a connection corresponding to a tuple. */
775 static struct nf_conntrack_tuple_hash *
776 __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
777                         const struct nf_conntrack_tuple *tuple, u32 hash)
778 {
779         struct nf_conntrack_tuple_hash *h;
780         struct nf_conn *ct;
781
782         rcu_read_lock();
783 begin:
784         h = ____nf_conntrack_find(net, zone, tuple, hash);
785         if (h) {
786                 ct = nf_ct_tuplehash_to_ctrack(h);
787                 if (unlikely(nf_ct_is_dying(ct) ||
788                              !atomic_inc_not_zero(&ct->ct_general.use)))
789                         h = NULL;
790                 else {
791                         if (unlikely(!nf_ct_key_equal(h, tuple, zone, net))) {
792                                 nf_ct_put(ct);
793                                 goto begin;
794                         }
795                 }
796         }
797         rcu_read_unlock();
798
799         return h;
800 }
801
802 struct nf_conntrack_tuple_hash *
803 nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
804                       const struct nf_conntrack_tuple *tuple)
805 {
806         return __nf_conntrack_find_get(net, zone, tuple,
807                                        hash_conntrack_raw(tuple, net));
808 }
809 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
810
811 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
812                                        unsigned int hash,
813                                        unsigned int reply_hash)
814 {
815         hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
816                            &nf_conntrack_hash[hash]);
817         hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
818                            &nf_conntrack_hash[reply_hash]);
819 }
820
821 int
822 nf_conntrack_hash_check_insert(struct nf_conn *ct)
823 {
824         const struct nf_conntrack_zone *zone;
825         struct net *net = nf_ct_net(ct);
826         unsigned int hash, reply_hash;
827         struct nf_conntrack_tuple_hash *h;
828         struct hlist_nulls_node *n;
829         unsigned int sequence;
830
831         zone = nf_ct_zone(ct);
832
833         local_bh_disable();
834         do {
835                 sequence = read_seqcount_begin(&nf_conntrack_generation);
836                 hash = hash_conntrack(net,
837                                       &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
838                 reply_hash = hash_conntrack(net,
839                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
840         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
841
842         /* See if there's one in the list already, including reverse */
843         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
844                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
845                                     zone, net))
846                         goto out;
847
848         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
849                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
850                                     zone, net))
851                         goto out;
852
853         smp_wmb();
854         /* The caller holds a reference to this object */
855         atomic_set(&ct->ct_general.use, 2);
856         __nf_conntrack_hash_insert(ct, hash, reply_hash);
857         nf_conntrack_double_unlock(hash, reply_hash);
858         NF_CT_STAT_INC(net, insert);
859         local_bh_enable();
860         return 0;
861
862 out:
863         nf_conntrack_double_unlock(hash, reply_hash);
864         NF_CT_STAT_INC(net, insert_failed);
865         local_bh_enable();
866         return -EEXIST;
867 }
868 EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
869
870 static inline void nf_ct_acct_update(struct nf_conn *ct,
871                                      enum ip_conntrack_info ctinfo,
872                                      unsigned int len)
873 {
874         struct nf_conn_acct *acct;
875
876         acct = nf_conn_acct_find(ct);
877         if (acct) {
878                 struct nf_conn_counter *counter = acct->counter;
879
880                 atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
881                 atomic64_add(len, &counter[CTINFO2DIR(ctinfo)].bytes);
882         }
883 }
884
885 static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
886                              const struct nf_conn *loser_ct)
887 {
888         struct nf_conn_acct *acct;
889
890         acct = nf_conn_acct_find(loser_ct);
891         if (acct) {
892                 struct nf_conn_counter *counter = acct->counter;
893                 unsigned int bytes;
894
895                 /* u32 should be fine since we must have seen one packet. */
896                 bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes);
897                 nf_ct_acct_update(ct, ctinfo, bytes);
898         }
899 }
900
901 /* Resolve race on insertion if this protocol allows this. */
902 static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
903                                enum ip_conntrack_info ctinfo,
904                                struct nf_conntrack_tuple_hash *h)
905 {
906         /* This is the conntrack entry already in hashes that won race. */
907         struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
908         const struct nf_conntrack_l4proto *l4proto;
909         enum ip_conntrack_info oldinfo;
910         struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo);
911
912         l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
913         if (l4proto->allow_clash &&
914             !nf_ct_is_dying(ct) &&
915             atomic_inc_not_zero(&ct->ct_general.use)) {
916                 if (((ct->status & IPS_NAT_DONE_MASK) == 0) ||
917                     nf_ct_match(ct, loser_ct)) {
918                         nf_ct_acct_merge(ct, ctinfo, loser_ct);
919                         nf_conntrack_put(&loser_ct->ct_general);
920                         nf_ct_set(skb, ct, oldinfo);
921                         return NF_ACCEPT;
922                 }
923                 nf_ct_put(ct);
924         }
925         NF_CT_STAT_INC(net, drop);
926         return NF_DROP;
927 }
928
929 /* Confirm a connection given skb; places it in hash table */
930 int
931 __nf_conntrack_confirm(struct sk_buff *skb)
932 {
933         const struct nf_conntrack_zone *zone;
934         unsigned int hash, reply_hash;
935         struct nf_conntrack_tuple_hash *h;
936         struct nf_conn *ct;
937         struct nf_conn_help *help;
938         struct nf_conn_tstamp *tstamp;
939         struct hlist_nulls_node *n;
940         enum ip_conntrack_info ctinfo;
941         struct net *net;
942         unsigned int sequence;
943         int ret = NF_DROP;
944
945         ct = nf_ct_get(skb, &ctinfo);
946         net = nf_ct_net(ct);
947
948         /* ipt_REJECT uses nf_conntrack_attach to attach related
949            ICMP/TCP RST packets in other direction.  Actual packet
950            which created connection will be IP_CT_NEW or for an
951            expected connection, IP_CT_RELATED. */
952         if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
953                 return NF_ACCEPT;
954
955         zone = nf_ct_zone(ct);
956         local_bh_disable();
957
958         do {
959                 sequence = read_seqcount_begin(&nf_conntrack_generation);
960                 /* reuse the hash saved before */
961                 hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
962                 hash = scale_hash(hash);
963                 reply_hash = hash_conntrack(net,
964                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
965
966         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
967
968         /* We're not in hash table, and we refuse to set up related
969          * connections for unconfirmed conns.  But packet copies and
970          * REJECT will give spurious warnings here.
971          */
972
973         /* Another skb with the same unconfirmed conntrack may
974          * win the race. This may happen for bridge(br_flood)
975          * or broadcast/multicast packets do skb_clone with
976          * unconfirmed conntrack.
977          */
978         if (unlikely(nf_ct_is_confirmed(ct))) {
979                 WARN_ON_ONCE(1);
980                 nf_conntrack_double_unlock(hash, reply_hash);
981                 local_bh_enable();
982                 return NF_DROP;
983         }
984
985         pr_debug("Confirming conntrack %p\n", ct);
986         /* We have to check the DYING flag after unlink to prevent
987          * a race against nf_ct_get_next_corpse() possibly called from
988          * user context, else we insert an already 'dead' hash, blocking
989          * further use of that particular connection -JM.
990          */
991         nf_ct_del_from_dying_or_unconfirmed_list(ct);
992
993         if (unlikely(nf_ct_is_dying(ct))) {
994                 nf_ct_add_to_dying_list(ct);
995                 goto dying;
996         }
997
998         /* See if there's one in the list already, including reverse:
999            NAT could have grabbed it without realizing, since we're
1000            not in the hash.  If there is, we lost race. */
1001         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
1002                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
1003                                     zone, net))
1004                         goto out;
1005
1006         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
1007                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
1008                                     zone, net))
1009                         goto out;
1010
1011         /* Timer relative to confirmation time, not original
1012            setting time, otherwise we'd get timer wrap in
1013            weird delay cases. */
1014         ct->timeout += nfct_time_stamp;
1015         atomic_inc(&ct->ct_general.use);
1016         ct->status |= IPS_CONFIRMED;
1017
1018         /* set conntrack timestamp, if enabled. */
1019         tstamp = nf_conn_tstamp_find(ct);
1020         if (tstamp)
1021                 tstamp->start = ktime_get_real_ns();
1022
1023         /* Since the lookup is lockless, hash insertion must be done after
1024          * starting the timer and setting the CONFIRMED bit. The RCU barriers
1025          * guarantee that no other CPU can find the conntrack before the above
1026          * stores are visible.
1027          */
1028         __nf_conntrack_hash_insert(ct, hash, reply_hash);
1029         nf_conntrack_double_unlock(hash, reply_hash);
1030         local_bh_enable();
1031
1032         help = nfct_help(ct);
1033         if (help && help->helper)
1034                 nf_conntrack_event_cache(IPCT_HELPER, ct);
1035
1036         nf_conntrack_event_cache(master_ct(ct) ?
1037                                  IPCT_RELATED : IPCT_NEW, ct);
1038         return NF_ACCEPT;
1039
1040 out:
1041         nf_ct_add_to_dying_list(ct);
1042         ret = nf_ct_resolve_clash(net, skb, ctinfo, h);
1043 dying:
1044         nf_conntrack_double_unlock(hash, reply_hash);
1045         NF_CT_STAT_INC(net, insert_failed);
1046         local_bh_enable();
1047         return ret;
1048 }
1049 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
1050
1051 /* Returns true if a connection correspondings to the tuple (required
1052    for NAT). */
1053 int
1054 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
1055                          const struct nf_conn *ignored_conntrack)
1056 {
1057         struct net *net = nf_ct_net(ignored_conntrack);
1058         const struct nf_conntrack_zone *zone;
1059         struct nf_conntrack_tuple_hash *h;
1060         struct hlist_nulls_head *ct_hash;
1061         unsigned int hash, hsize;
1062         struct hlist_nulls_node *n;
1063         struct nf_conn *ct;
1064
1065         zone = nf_ct_zone(ignored_conntrack);
1066
1067         rcu_read_lock();
1068  begin:
1069         nf_conntrack_get_ht(&ct_hash, &hsize);
1070         hash = __hash_conntrack(net, tuple, hsize);
1071
1072         hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
1073                 ct = nf_ct_tuplehash_to_ctrack(h);
1074
1075                 if (ct == ignored_conntrack)
1076                         continue;
1077
1078                 if (nf_ct_is_expired(ct)) {
1079                         nf_ct_gc_expired(ct);
1080                         continue;
1081                 }
1082
1083                 if (nf_ct_key_equal(h, tuple, zone, net)) {
1084                         /* Tuple is taken already, so caller will need to find
1085                          * a new source port to use.
1086                          *
1087                          * Only exception:
1088                          * If the *original tuples* are identical, then both
1089                          * conntracks refer to the same flow.
1090                          * This is a rare situation, it can occur e.g. when
1091                          * more than one UDP packet is sent from same socket
1092                          * in different threads.
1093                          *
1094                          * Let nf_ct_resolve_clash() deal with this later.
1095                          */
1096                         if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
1097                                               &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
1098                                 continue;
1099
1100                         NF_CT_STAT_INC_ATOMIC(net, found);
1101                         rcu_read_unlock();
1102                         return 1;
1103                 }
1104         }
1105
1106         if (get_nulls_value(n) != hash) {
1107                 NF_CT_STAT_INC_ATOMIC(net, search_restart);
1108                 goto begin;
1109         }
1110
1111         rcu_read_unlock();
1112
1113         return 0;
1114 }
1115 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
1116
1117 #define NF_CT_EVICTION_RANGE    8
1118
1119 /* There's a small race here where we may free a just-assured
1120    connection.  Too bad: we're in trouble anyway. */
1121 static unsigned int early_drop_list(struct net *net,
1122                                     struct hlist_nulls_head *head)
1123 {
1124         struct nf_conntrack_tuple_hash *h;
1125         struct hlist_nulls_node *n;
1126         unsigned int drops = 0;
1127         struct nf_conn *tmp;
1128
1129         hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) {
1130                 tmp = nf_ct_tuplehash_to_ctrack(h);
1131
1132                 if (test_bit(IPS_OFFLOAD_BIT, &tmp->status))
1133                         continue;
1134
1135                 if (nf_ct_is_expired(tmp)) {
1136                         nf_ct_gc_expired(tmp);
1137                         continue;
1138                 }
1139
1140                 if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
1141                     !net_eq(nf_ct_net(tmp), net) ||
1142                     nf_ct_is_dying(tmp))
1143                         continue;
1144
1145                 if (!atomic_inc_not_zero(&tmp->ct_general.use))
1146                         continue;
1147
1148                 /* kill only if still in same netns -- might have moved due to
1149                  * SLAB_TYPESAFE_BY_RCU rules.
1150                  *
1151                  * We steal the timer reference.  If that fails timer has
1152                  * already fired or someone else deleted it. Just drop ref
1153                  * and move to next entry.
1154                  */
1155                 if (net_eq(nf_ct_net(tmp), net) &&
1156                     nf_ct_is_confirmed(tmp) &&
1157                     nf_ct_delete(tmp, 0, 0))
1158                         drops++;
1159
1160                 nf_ct_put(tmp);
1161         }
1162
1163         return drops;
1164 }
1165
1166 static noinline int early_drop(struct net *net, unsigned int hash)
1167 {
1168         unsigned int i, bucket;
1169
1170         for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
1171                 struct hlist_nulls_head *ct_hash;
1172                 unsigned int hsize, drops;
1173
1174                 rcu_read_lock();
1175                 nf_conntrack_get_ht(&ct_hash, &hsize);
1176                 if (!i)
1177                         bucket = reciprocal_scale(hash, hsize);
1178                 else
1179                         bucket = (bucket + 1) % hsize;
1180
1181                 drops = early_drop_list(net, &ct_hash[bucket]);
1182                 rcu_read_unlock();
1183
1184                 if (drops) {
1185                         NF_CT_STAT_ADD_ATOMIC(net, early_drop, drops);
1186                         return true;
1187                 }
1188         }
1189
1190         return false;
1191 }
1192
1193 static bool gc_worker_skip_ct(const struct nf_conn *ct)
1194 {
1195         return !nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct);
1196 }
1197
1198 static bool gc_worker_can_early_drop(const struct nf_conn *ct)
1199 {
1200         const struct nf_conntrack_l4proto *l4proto;
1201
1202         if (!test_bit(IPS_ASSURED_BIT, &ct->status))
1203                 return true;
1204
1205         l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
1206         if (l4proto->can_early_drop && l4proto->can_early_drop(ct))
1207                 return true;
1208
1209         return false;
1210 }
1211
1212 #define DAY     (86400 * HZ)
1213
1214 /* Set an arbitrary timeout large enough not to ever expire, this save
1215  * us a check for the IPS_OFFLOAD_BIT from the packet path via
1216  * nf_ct_is_expired().
1217  */
1218 static void nf_ct_offload_timeout(struct nf_conn *ct)
1219 {
1220         if (nf_ct_expires(ct) < DAY / 2)
1221                 ct->timeout = nfct_time_stamp + DAY;
1222 }
1223
1224 static void gc_worker(struct work_struct *work)
1225 {
1226         unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
1227         unsigned int i, goal, buckets = 0, expired_count = 0;
1228         unsigned int nf_conntrack_max95 = 0;
1229         struct conntrack_gc_work *gc_work;
1230         unsigned int ratio, scanned = 0;
1231         unsigned long next_run;
1232
1233         gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
1234
1235         goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV;
1236         i = gc_work->last_bucket;
1237         if (gc_work->early_drop)
1238                 nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
1239
1240         do {
1241                 struct nf_conntrack_tuple_hash *h;
1242                 struct hlist_nulls_head *ct_hash;
1243                 struct hlist_nulls_node *n;
1244                 unsigned int hashsz;
1245                 struct nf_conn *tmp;
1246
1247                 i++;
1248                 rcu_read_lock();
1249
1250                 nf_conntrack_get_ht(&ct_hash, &hashsz);
1251                 if (i >= hashsz)
1252                         i = 0;
1253
1254                 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
1255                         struct net *net;
1256
1257                         tmp = nf_ct_tuplehash_to_ctrack(h);
1258
1259                         scanned++;
1260                         if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
1261                                 nf_ct_offload_timeout(tmp);
1262                                 continue;
1263                         }
1264
1265                         if (nf_ct_is_expired(tmp)) {
1266                                 nf_ct_gc_expired(tmp);
1267                                 expired_count++;
1268                                 continue;
1269                         }
1270
1271                         if (nf_conntrack_max95 == 0 || gc_worker_skip_ct(tmp))
1272                                 continue;
1273
1274                         net = nf_ct_net(tmp);
1275                         if (atomic_read(&net->ct.count) < nf_conntrack_max95)
1276                                 continue;
1277
1278                         /* need to take reference to avoid possible races */
1279                         if (!atomic_inc_not_zero(&tmp->ct_general.use))
1280                                 continue;
1281
1282                         if (gc_worker_skip_ct(tmp)) {
1283                                 nf_ct_put(tmp);
1284                                 continue;
1285                         }
1286
1287                         if (gc_worker_can_early_drop(tmp))
1288                                 nf_ct_kill(tmp);
1289
1290                         nf_ct_put(tmp);
1291                 }
1292
1293                 /* could check get_nulls_value() here and restart if ct
1294                  * was moved to another chain.  But given gc is best-effort
1295                  * we will just continue with next hash slot.
1296                  */
1297                 rcu_read_unlock();
1298                 cond_resched();
1299         } while (++buckets < goal);
1300
1301         if (gc_work->exiting)
1302                 return;
1303
1304         /*
1305          * Eviction will normally happen from the packet path, and not
1306          * from this gc worker.
1307          *
1308          * This worker is only here to reap expired entries when system went
1309          * idle after a busy period.
1310          *
1311          * The heuristics below are supposed to balance conflicting goals:
1312          *
1313          * 1. Minimize time until we notice a stale entry
1314          * 2. Maximize scan intervals to not waste cycles
1315          *
1316          * Normally, expire ratio will be close to 0.
1317          *
1318          * As soon as a sizeable fraction of the entries have expired
1319          * increase scan frequency.
1320          */
1321         ratio = scanned ? expired_count * 100 / scanned : 0;
1322         if (ratio > GC_EVICT_RATIO) {
1323                 gc_work->next_gc_run = min_interval;
1324         } else {
1325                 unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
1326
1327                 BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
1328
1329                 gc_work->next_gc_run += min_interval;
1330                 if (gc_work->next_gc_run > max)
1331                         gc_work->next_gc_run = max;
1332         }
1333
1334         next_run = gc_work->next_gc_run;
1335         gc_work->last_bucket = i;
1336         gc_work->early_drop = false;
1337         queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
1338 }
1339
1340 static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
1341 {
1342         INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker);
1343         gc_work->next_gc_run = HZ;
1344         gc_work->exiting = false;
1345 }
1346
1347 static struct nf_conn *
1348 __nf_conntrack_alloc(struct net *net,
1349                      const struct nf_conntrack_zone *zone,
1350                      const struct nf_conntrack_tuple *orig,
1351                      const struct nf_conntrack_tuple *repl,
1352                      gfp_t gfp, u32 hash)
1353 {
1354         struct nf_conn *ct;
1355
1356         /* We don't want any race condition at early drop stage */
1357         atomic_inc(&net->ct.count);
1358
1359         if (nf_conntrack_max &&
1360             unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
1361                 if (!early_drop(net, hash)) {
1362                         if (!conntrack_gc_work.early_drop)
1363                                 conntrack_gc_work.early_drop = true;
1364                         atomic_dec(&net->ct.count);
1365                         net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
1366                         return ERR_PTR(-ENOMEM);
1367                 }
1368         }
1369
1370         /*
1371          * Do not use kmem_cache_zalloc(), as this cache uses
1372          * SLAB_TYPESAFE_BY_RCU.
1373          */
1374         ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
1375         if (ct == NULL)
1376                 goto out;
1377
1378         spin_lock_init(&ct->lock);
1379         ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
1380         ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
1381         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
1382         /* save hash for reusing when confirming */
1383         *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
1384         ct->status = 0;
1385         ct->timeout = 0;
1386         write_pnet(&ct->ct_net, net);
1387         memset(&ct->__nfct_init_offset[0], 0,
1388                offsetof(struct nf_conn, proto) -
1389                offsetof(struct nf_conn, __nfct_init_offset[0]));
1390
1391         nf_ct_zone_add(ct, zone);
1392
1393         /* Because we use RCU lookups, we set ct_general.use to zero before
1394          * this is inserted in any list.
1395          */
1396         atomic_set(&ct->ct_general.use, 0);
1397         return ct;
1398 out:
1399         atomic_dec(&net->ct.count);
1400         return ERR_PTR(-ENOMEM);
1401 }
1402
1403 struct nf_conn *nf_conntrack_alloc(struct net *net,
1404                                    const struct nf_conntrack_zone *zone,
1405                                    const struct nf_conntrack_tuple *orig,
1406                                    const struct nf_conntrack_tuple *repl,
1407                                    gfp_t gfp)
1408 {
1409         return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
1410 }
1411 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
1412
1413 void nf_conntrack_free(struct nf_conn *ct)
1414 {
1415         struct net *net = nf_ct_net(ct);
1416
1417         /* A freed object has refcnt == 0, that's
1418          * the golden rule for SLAB_TYPESAFE_BY_RCU
1419          */
1420         WARN_ON(atomic_read(&ct->ct_general.use) != 0);
1421
1422         nf_ct_ext_destroy(ct);
1423         nf_ct_ext_free(ct);
1424         kmem_cache_free(nf_conntrack_cachep, ct);
1425         smp_mb__before_atomic();
1426         atomic_dec(&net->ct.count);
1427 }
1428 EXPORT_SYMBOL_GPL(nf_conntrack_free);
1429
1430
1431 /* Allocate a new conntrack: we return -ENOMEM if classification
1432    failed due to stress.  Otherwise it really is unclassifiable. */
1433 static noinline struct nf_conntrack_tuple_hash *
1434 init_conntrack(struct net *net, struct nf_conn *tmpl,
1435                const struct nf_conntrack_tuple *tuple,
1436                struct sk_buff *skb,
1437                unsigned int dataoff, u32 hash)
1438 {
1439         struct nf_conn *ct;
1440         struct nf_conn_help *help;
1441         struct nf_conntrack_tuple repl_tuple;
1442         struct nf_conntrack_ecache *ecache;
1443         struct nf_conntrack_expect *exp = NULL;
1444         const struct nf_conntrack_zone *zone;
1445         struct nf_conn_timeout *timeout_ext;
1446         struct nf_conntrack_zone tmp;
1447
1448         if (!nf_ct_invert_tuple(&repl_tuple, tuple)) {
1449                 pr_debug("Can't invert tuple.\n");
1450                 return NULL;
1451         }
1452
1453         zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1454         ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
1455                                   hash);
1456         if (IS_ERR(ct))
1457                 return (struct nf_conntrack_tuple_hash *)ct;
1458
1459         if (!nf_ct_add_synproxy(ct, tmpl)) {
1460                 nf_conntrack_free(ct);
1461                 return ERR_PTR(-ENOMEM);
1462         }
1463
1464         timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
1465
1466         if (timeout_ext)
1467                 nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
1468                                       GFP_ATOMIC);
1469
1470         nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1471         nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
1472         nf_ct_labels_ext_add(ct);
1473
1474         ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
1475         nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
1476                                  ecache ? ecache->expmask : 0,
1477                              GFP_ATOMIC);
1478
1479         local_bh_disable();
1480         if (net->ct.expect_count) {
1481                 spin_lock(&nf_conntrack_expect_lock);
1482                 exp = nf_ct_find_expectation(net, zone, tuple);
1483                 if (exp) {
1484                         pr_debug("expectation arrives ct=%p exp=%p\n",
1485                                  ct, exp);
1486                         /* Welcome, Mr. Bond.  We've been expecting you... */
1487                         __set_bit(IPS_EXPECTED_BIT, &ct->status);
1488                         /* exp->master safe, refcnt bumped in nf_ct_find_expectation */
1489                         ct->master = exp->master;
1490                         if (exp->helper) {
1491                                 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
1492                                 if (help)
1493                                         rcu_assign_pointer(help->helper, exp->helper);
1494                         }
1495
1496 #ifdef CONFIG_NF_CONNTRACK_MARK
1497                         ct->mark = exp->master->mark;
1498 #endif
1499 #ifdef CONFIG_NF_CONNTRACK_SECMARK
1500                         ct->secmark = exp->master->secmark;
1501 #endif
1502                         NF_CT_STAT_INC(net, expect_new);
1503                 }
1504                 spin_unlock(&nf_conntrack_expect_lock);
1505         }
1506         if (!exp)
1507                 __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
1508
1509         /* Now it is inserted into the unconfirmed list, bump refcount */
1510         nf_conntrack_get(&ct->ct_general);
1511         nf_ct_add_to_unconfirmed_list(ct);
1512
1513         local_bh_enable();
1514
1515         if (exp) {
1516                 if (exp->expectfn)
1517                         exp->expectfn(ct, exp);
1518                 nf_ct_expect_put(exp);
1519         }
1520
1521         return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
1522 }
1523
1524 /* On success, returns 0, sets skb->_nfct | ctinfo */
1525 static int
1526 resolve_normal_ct(struct nf_conn *tmpl,
1527                   struct sk_buff *skb,
1528                   unsigned int dataoff,
1529                   u_int8_t protonum,
1530                   const struct nf_hook_state *state)
1531 {
1532         const struct nf_conntrack_zone *zone;
1533         struct nf_conntrack_tuple tuple;
1534         struct nf_conntrack_tuple_hash *h;
1535         enum ip_conntrack_info ctinfo;
1536         struct nf_conntrack_zone tmp;
1537         struct nf_conn *ct;
1538         u32 hash;
1539
1540         if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
1541                              dataoff, state->pf, protonum, state->net,
1542                              &tuple)) {
1543                 pr_debug("Can't get tuple\n");
1544                 return 0;
1545         }
1546
1547         /* look for tuple match */
1548         zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1549         hash = hash_conntrack_raw(&tuple, state->net);
1550         h = __nf_conntrack_find_get(state->net, zone, &tuple, hash);
1551         if (!h) {
1552                 h = init_conntrack(state->net, tmpl, &tuple,
1553                                    skb, dataoff, hash);
1554                 if (!h)
1555                         return 0;
1556                 if (IS_ERR(h))
1557                         return PTR_ERR(h);
1558         }
1559         ct = nf_ct_tuplehash_to_ctrack(h);
1560
1561         /* It exists; we have (non-exclusive) reference. */
1562         if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
1563                 ctinfo = IP_CT_ESTABLISHED_REPLY;
1564         } else {
1565                 /* Once we've had two way comms, always ESTABLISHED. */
1566                 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1567                         pr_debug("normal packet for %p\n", ct);
1568                         ctinfo = IP_CT_ESTABLISHED;
1569                 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
1570                         pr_debug("related packet for %p\n", ct);
1571                         ctinfo = IP_CT_RELATED;
1572                 } else {
1573                         pr_debug("new packet for %p\n", ct);
1574                         ctinfo = IP_CT_NEW;
1575                 }
1576         }
1577         nf_ct_set(skb, ct, ctinfo);
1578         return 0;
1579 }
1580
1581 /*
1582  * icmp packets need special treatment to handle error messages that are
1583  * related to a connection.
1584  *
1585  * Callers need to check if skb has a conntrack assigned when this
1586  * helper returns; in such case skb belongs to an already known connection.
1587  */
1588 static unsigned int __cold
1589 nf_conntrack_handle_icmp(struct nf_conn *tmpl,
1590                          struct sk_buff *skb,
1591                          unsigned int dataoff,
1592                          u8 protonum,
1593                          const struct nf_hook_state *state)
1594 {
1595         int ret;
1596
1597         if (state->pf == NFPROTO_IPV4 && protonum == IPPROTO_ICMP)
1598                 ret = nf_conntrack_icmpv4_error(tmpl, skb, dataoff, state);
1599 #if IS_ENABLED(CONFIG_IPV6)
1600         else if (state->pf == NFPROTO_IPV6 && protonum == IPPROTO_ICMPV6)
1601                 ret = nf_conntrack_icmpv6_error(tmpl, skb, dataoff, state);
1602 #endif
1603         else
1604                 return NF_ACCEPT;
1605
1606         if (ret <= 0) {
1607                 NF_CT_STAT_INC_ATOMIC(state->net, error);
1608                 NF_CT_STAT_INC_ATOMIC(state->net, invalid);
1609         }
1610
1611         return ret;
1612 }
1613
1614 static int generic_packet(struct nf_conn *ct, struct sk_buff *skb,
1615                           enum ip_conntrack_info ctinfo)
1616 {
1617         const unsigned int *timeout = nf_ct_timeout_lookup(ct);
1618
1619         if (!timeout)
1620                 timeout = &nf_generic_pernet(nf_ct_net(ct))->timeout;
1621
1622         nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
1623         return NF_ACCEPT;
1624 }
1625
1626 /* Returns verdict for packet, or -1 for invalid. */
1627 static int nf_conntrack_handle_packet(struct nf_conn *ct,
1628                                       struct sk_buff *skb,
1629                                       unsigned int dataoff,
1630                                       enum ip_conntrack_info ctinfo,
1631                                       const struct nf_hook_state *state)
1632 {
1633         switch (nf_ct_protonum(ct)) {
1634         case IPPROTO_TCP:
1635                 return nf_conntrack_tcp_packet(ct, skb, dataoff,
1636                                                ctinfo, state);
1637         case IPPROTO_UDP:
1638                 return nf_conntrack_udp_packet(ct, skb, dataoff,
1639                                                ctinfo, state);
1640         case IPPROTO_ICMP:
1641                 return nf_conntrack_icmp_packet(ct, skb, ctinfo, state);
1642 #if IS_ENABLED(CONFIG_IPV6)
1643         case IPPROTO_ICMPV6:
1644                 return nf_conntrack_icmpv6_packet(ct, skb, ctinfo, state);
1645 #endif
1646 #ifdef CONFIG_NF_CT_PROTO_UDPLITE
1647         case IPPROTO_UDPLITE:
1648                 return nf_conntrack_udplite_packet(ct, skb, dataoff,
1649                                                    ctinfo, state);
1650 #endif
1651 #ifdef CONFIG_NF_CT_PROTO_SCTP
1652         case IPPROTO_SCTP:
1653                 return nf_conntrack_sctp_packet(ct, skb, dataoff,
1654                                                 ctinfo, state);
1655 #endif
1656 #ifdef CONFIG_NF_CT_PROTO_DCCP
1657         case IPPROTO_DCCP:
1658                 return nf_conntrack_dccp_packet(ct, skb, dataoff,
1659                                                 ctinfo, state);
1660 #endif
1661 #ifdef CONFIG_NF_CT_PROTO_GRE
1662         case IPPROTO_GRE:
1663                 return nf_conntrack_gre_packet(ct, skb, dataoff,
1664                                                ctinfo, state);
1665 #endif
1666         }
1667
1668         return generic_packet(ct, skb, ctinfo);
1669 }
1670
1671 unsigned int
1672 nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
1673 {
1674         enum ip_conntrack_info ctinfo;
1675         struct nf_conn *ct, *tmpl;
1676         u_int8_t protonum;
1677         int dataoff, ret;
1678
1679         tmpl = nf_ct_get(skb, &ctinfo);
1680         if (tmpl || ctinfo == IP_CT_UNTRACKED) {
1681                 /* Previously seen (loopback or untracked)?  Ignore. */
1682                 if ((tmpl && !nf_ct_is_template(tmpl)) ||
1683                      ctinfo == IP_CT_UNTRACKED) {
1684                         NF_CT_STAT_INC_ATOMIC(state->net, ignore);
1685                         return NF_ACCEPT;
1686                 }
1687                 skb->_nfct = 0;
1688         }
1689
1690         /* rcu_read_lock()ed by nf_hook_thresh */
1691         dataoff = get_l4proto(skb, skb_network_offset(skb), state->pf, &protonum);
1692         if (dataoff <= 0) {
1693                 pr_debug("not prepared to track yet or error occurred\n");
1694                 NF_CT_STAT_INC_ATOMIC(state->net, error);
1695                 NF_CT_STAT_INC_ATOMIC(state->net, invalid);
1696                 ret = NF_ACCEPT;
1697                 goto out;
1698         }
1699
1700         if (protonum == IPPROTO_ICMP || protonum == IPPROTO_ICMPV6) {
1701                 ret = nf_conntrack_handle_icmp(tmpl, skb, dataoff,
1702                                                protonum, state);
1703                 if (ret <= 0) {
1704                         ret = -ret;
1705                         goto out;
1706                 }
1707                 /* ICMP[v6] protocol trackers may assign one conntrack. */
1708                 if (skb->_nfct)
1709                         goto out;
1710         }
1711 repeat:
1712         ret = resolve_normal_ct(tmpl, skb, dataoff,
1713                                 protonum, state);
1714         if (ret < 0) {
1715                 /* Too stressed to deal. */
1716                 NF_CT_STAT_INC_ATOMIC(state->net, drop);
1717                 ret = NF_DROP;
1718                 goto out;
1719         }
1720
1721         ct = nf_ct_get(skb, &ctinfo);
1722         if (!ct) {
1723                 /* Not valid part of a connection */
1724                 NF_CT_STAT_INC_ATOMIC(state->net, invalid);
1725                 ret = NF_ACCEPT;
1726                 goto out;
1727         }
1728
1729         ret = nf_conntrack_handle_packet(ct, skb, dataoff, ctinfo, state);
1730         if (ret <= 0) {
1731                 /* Invalid: inverse of the return code tells
1732                  * the netfilter core what to do */
1733                 pr_debug("nf_conntrack_in: Can't track with proto module\n");
1734                 nf_conntrack_put(&ct->ct_general);
1735                 skb->_nfct = 0;
1736                 NF_CT_STAT_INC_ATOMIC(state->net, invalid);
1737                 if (ret == -NF_DROP)
1738                         NF_CT_STAT_INC_ATOMIC(state->net, drop);
1739                 /* Special case: TCP tracker reports an attempt to reopen a
1740                  * closed/aborted connection. We have to go back and create a
1741                  * fresh conntrack.
1742                  */
1743                 if (ret == -NF_REPEAT)
1744                         goto repeat;
1745                 ret = -ret;
1746                 goto out;
1747         }
1748
1749         if (ctinfo == IP_CT_ESTABLISHED_REPLY &&
1750             !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
1751                 nf_conntrack_event_cache(IPCT_REPLY, ct);
1752 out:
1753         if (tmpl)
1754                 nf_ct_put(tmpl);
1755
1756         return ret;
1757 }
1758 EXPORT_SYMBOL_GPL(nf_conntrack_in);
1759
1760 /* Alter reply tuple (maybe alter helper).  This is for NAT, and is
1761    implicitly racy: see __nf_conntrack_confirm */
1762 void nf_conntrack_alter_reply(struct nf_conn *ct,
1763                               const struct nf_conntrack_tuple *newreply)
1764 {
1765         struct nf_conn_help *help = nfct_help(ct);
1766
1767         /* Should be unconfirmed, so not in hash table yet */
1768         WARN_ON(nf_ct_is_confirmed(ct));
1769
1770         pr_debug("Altering reply tuple of %p to ", ct);
1771         nf_ct_dump_tuple(newreply);
1772
1773         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
1774         if (ct->master || (help && !hlist_empty(&help->expectations)))
1775                 return;
1776
1777         rcu_read_lock();
1778         __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1779         rcu_read_unlock();
1780 }
1781 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
1782
1783 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
1784 void __nf_ct_refresh_acct(struct nf_conn *ct,
1785                           enum ip_conntrack_info ctinfo,
1786                           const struct sk_buff *skb,
1787                           u32 extra_jiffies,
1788                           bool do_acct)
1789 {
1790         /* Only update if this is not a fixed timeout */
1791         if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
1792                 goto acct;
1793
1794         /* If not in hash table, timer will not be active yet */
1795         if (nf_ct_is_confirmed(ct))
1796                 extra_jiffies += nfct_time_stamp;
1797
1798         if (ct->timeout != extra_jiffies)
1799                 ct->timeout = extra_jiffies;
1800 acct:
1801         if (do_acct)
1802                 nf_ct_acct_update(ct, ctinfo, skb->len);
1803 }
1804 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
1805
1806 bool nf_ct_kill_acct(struct nf_conn *ct,
1807                      enum ip_conntrack_info ctinfo,
1808                      const struct sk_buff *skb)
1809 {
1810         nf_ct_acct_update(ct, ctinfo, skb->len);
1811
1812         return nf_ct_delete(ct, 0, 0);
1813 }
1814 EXPORT_SYMBOL_GPL(nf_ct_kill_acct);
1815
1816 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1817
1818 #include <linux/netfilter/nfnetlink.h>
1819 #include <linux/netfilter/nfnetlink_conntrack.h>
1820 #include <linux/mutex.h>
1821
1822 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
1823  * in ip_conntrack_core, since we don't want the protocols to autoload
1824  * or depend on ctnetlink */
1825 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
1826                                const struct nf_conntrack_tuple *tuple)
1827 {
1828         if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
1829             nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
1830                 goto nla_put_failure;
1831         return 0;
1832
1833 nla_put_failure:
1834         return -1;
1835 }
1836 EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
1837
1838 const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
1839         [CTA_PROTO_SRC_PORT]  = { .type = NLA_U16 },
1840         [CTA_PROTO_DST_PORT]  = { .type = NLA_U16 },
1841 };
1842 EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
1843
1844 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
1845                                struct nf_conntrack_tuple *t)
1846 {
1847         if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
1848                 return -EINVAL;
1849
1850         t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
1851         t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
1852
1853         return 0;
1854 }
1855 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
1856
1857 unsigned int nf_ct_port_nlattr_tuple_size(void)
1858 {
1859         static unsigned int size __read_mostly;
1860
1861         if (!size)
1862                 size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1863
1864         return size;
1865 }
1866 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
1867 #endif
1868
1869 /* Used by ipt_REJECT and ip6t_REJECT. */
1870 static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
1871 {
1872         struct nf_conn *ct;
1873         enum ip_conntrack_info ctinfo;
1874
1875         /* This ICMP is in reverse direction to the packet which caused it */
1876         ct = nf_ct_get(skb, &ctinfo);
1877         if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1878                 ctinfo = IP_CT_RELATED_REPLY;
1879         else
1880                 ctinfo = IP_CT_RELATED;
1881
1882         /* Attach to new skbuff, and increment count */
1883         nf_ct_set(nskb, ct, ctinfo);
1884         nf_conntrack_get(skb_nfct(nskb));
1885 }
1886
1887 static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
1888 {
1889         struct nf_conntrack_tuple_hash *h;
1890         struct nf_conntrack_tuple tuple;
1891         enum ip_conntrack_info ctinfo;
1892         struct nf_nat_hook *nat_hook;
1893         unsigned int status;
1894         struct nf_conn *ct;
1895         int dataoff;
1896         u16 l3num;
1897         u8 l4num;
1898
1899         ct = nf_ct_get(skb, &ctinfo);
1900         if (!ct || nf_ct_is_confirmed(ct))
1901                 return 0;
1902
1903         l3num = nf_ct_l3num(ct);
1904
1905         dataoff = get_l4proto(skb, skb_network_offset(skb), l3num, &l4num);
1906         if (dataoff <= 0)
1907                 return -1;
1908
1909         if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num,
1910                              l4num, net, &tuple))
1911                 return -1;
1912
1913         if (ct->status & IPS_SRC_NAT) {
1914                 memcpy(tuple.src.u3.all,
1915                        ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.all,
1916                        sizeof(tuple.src.u3.all));
1917                 tuple.src.u.all =
1918                         ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all;
1919         }
1920
1921         if (ct->status & IPS_DST_NAT) {
1922                 memcpy(tuple.dst.u3.all,
1923                        ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.all,
1924                        sizeof(tuple.dst.u3.all));
1925                 tuple.dst.u.all =
1926                         ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all;
1927         }
1928
1929         h = nf_conntrack_find_get(net, nf_ct_zone(ct), &tuple);
1930         if (!h)
1931                 return 0;
1932
1933         /* Store status bits of the conntrack that is clashing to re-do NAT
1934          * mangling according to what it has been done already to this packet.
1935          */
1936         status = ct->status;
1937
1938         nf_ct_put(ct);
1939         ct = nf_ct_tuplehash_to_ctrack(h);
1940         nf_ct_set(skb, ct, ctinfo);
1941
1942         nat_hook = rcu_dereference(nf_nat_hook);
1943         if (!nat_hook)
1944                 return 0;
1945
1946         if (status & IPS_SRC_NAT &&
1947             nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_SRC,
1948                                 IP_CT_DIR_ORIGINAL) == NF_DROP)
1949                 return -1;
1950
1951         if (status & IPS_DST_NAT &&
1952             nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_DST,
1953                                 IP_CT_DIR_ORIGINAL) == NF_DROP)
1954                 return -1;
1955
1956         return 0;
1957 }
1958
1959 static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
1960                                        const struct sk_buff *skb)
1961 {
1962         const struct nf_conntrack_tuple *src_tuple;
1963         const struct nf_conntrack_tuple_hash *hash;
1964         struct nf_conntrack_tuple srctuple;
1965         enum ip_conntrack_info ctinfo;
1966         struct nf_conn *ct;
1967
1968         ct = nf_ct_get(skb, &ctinfo);
1969         if (ct) {
1970                 src_tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
1971                 memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
1972                 return true;
1973         }
1974
1975         if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
1976                                NFPROTO_IPV4, dev_net(skb->dev),
1977                                &srctuple))
1978                 return false;
1979
1980         hash = nf_conntrack_find_get(dev_net(skb->dev),
1981                                      &nf_ct_zone_dflt,
1982                                      &srctuple);
1983         if (!hash)
1984                 return false;
1985
1986         ct = nf_ct_tuplehash_to_ctrack(hash);
1987         src_tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
1988         memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
1989         nf_ct_put(ct);
1990
1991         return true;
1992 }
1993
1994 /* Bring out ya dead! */
1995 static struct nf_conn *
1996 get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
1997                 void *data, unsigned int *bucket)
1998 {
1999         struct nf_conntrack_tuple_hash *h;
2000         struct nf_conn *ct;
2001         struct hlist_nulls_node *n;
2002         spinlock_t *lockp;
2003
2004         for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
2005                 lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
2006                 local_bh_disable();
2007                 nf_conntrack_lock(lockp);
2008                 if (*bucket < nf_conntrack_htable_size) {
2009                         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {
2010                                 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
2011                                         continue;
2012                                 ct = nf_ct_tuplehash_to_ctrack(h);
2013                                 if (iter(ct, data))
2014                                         goto found;
2015                         }
2016                 }
2017                 spin_unlock(lockp);
2018                 local_bh_enable();
2019                 cond_resched();
2020         }
2021
2022         return NULL;
2023 found:
2024         atomic_inc(&ct->ct_general.use);
2025         spin_unlock(lockp);
2026         local_bh_enable();
2027         return ct;
2028 }
2029
2030 static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
2031                                   void *data, u32 portid, int report)
2032 {
2033         unsigned int bucket = 0, sequence;
2034         struct nf_conn *ct;
2035
2036         might_sleep();
2037
2038         for (;;) {
2039                 sequence = read_seqcount_begin(&nf_conntrack_generation);
2040
2041                 while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
2042                         /* Time to push up daises... */
2043
2044                         nf_ct_delete(ct, portid, report);
2045                         nf_ct_put(ct);
2046                         cond_resched();
2047                 }
2048
2049                 if (!read_seqcount_retry(&nf_conntrack_generation, sequence))
2050                         break;
2051                 bucket = 0;
2052         }
2053 }
2054
2055 struct iter_data {
2056         int (*iter)(struct nf_conn *i, void *data);
2057         void *data;
2058         struct net *net;
2059 };
2060
2061 static int iter_net_only(struct nf_conn *i, void *data)
2062 {
2063         struct iter_data *d = data;
2064
2065         if (!net_eq(d->net, nf_ct_net(i)))
2066                 return 0;
2067
2068         return d->iter(i, d->data);
2069 }
2070
2071 static void
2072 __nf_ct_unconfirmed_destroy(struct net *net)
2073 {
2074         int cpu;
2075
2076         for_each_possible_cpu(cpu) {
2077                 struct nf_conntrack_tuple_hash *h;
2078                 struct hlist_nulls_node *n;
2079                 struct ct_pcpu *pcpu;
2080
2081                 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
2082
2083                 spin_lock_bh(&pcpu->lock);
2084                 hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
2085                         struct nf_conn *ct;
2086
2087                         ct = nf_ct_tuplehash_to_ctrack(h);
2088
2089                         /* we cannot call iter() on unconfirmed list, the
2090                          * owning cpu can reallocate ct->ext at any time.
2091                          */
2092                         set_bit(IPS_DYING_BIT, &ct->status);
2093                 }
2094                 spin_unlock_bh(&pcpu->lock);
2095                 cond_resched();
2096         }
2097 }
2098
2099 void nf_ct_unconfirmed_destroy(struct net *net)
2100 {
2101         might_sleep();
2102
2103         if (atomic_read(&net->ct.count) > 0) {
2104                 __nf_ct_unconfirmed_destroy(net);
2105                 nf_queue_nf_hook_drop(net);
2106                 synchronize_net();
2107         }
2108 }
2109 EXPORT_SYMBOL_GPL(nf_ct_unconfirmed_destroy);
2110
2111 void nf_ct_iterate_cleanup_net(struct net *net,
2112                                int (*iter)(struct nf_conn *i, void *data),
2113                                void *data, u32 portid, int report)
2114 {
2115         struct iter_data d;
2116
2117         might_sleep();
2118
2119         if (atomic_read(&net->ct.count) == 0)
2120                 return;
2121
2122         d.iter = iter;
2123         d.data = data;
2124         d.net = net;
2125
2126         nf_ct_iterate_cleanup(iter_net_only, &d, portid, report);
2127 }
2128 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net);
2129
2130 /**
2131  * nf_ct_iterate_destroy - destroy unconfirmed conntracks and iterate table
2132  * @iter: callback to invoke for each conntrack
2133  * @data: data to pass to @iter
2134  *
2135  * Like nf_ct_iterate_cleanup, but first marks conntracks on the
2136  * unconfirmed list as dying (so they will not be inserted into
2137  * main table).
2138  *
2139  * Can only be called in module exit path.
2140  */
2141 void
2142 nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data)
2143 {
2144         struct net *net;
2145
2146         down_read(&net_rwsem);
2147         for_each_net(net) {
2148                 if (atomic_read(&net->ct.count) == 0)
2149                         continue;
2150                 __nf_ct_unconfirmed_destroy(net);
2151                 nf_queue_nf_hook_drop(net);
2152         }
2153         up_read(&net_rwsem);
2154
2155         /* Need to wait for netns cleanup worker to finish, if its
2156          * running -- it might have deleted a net namespace from
2157          * the global list, so our __nf_ct_unconfirmed_destroy() might
2158          * not have affected all namespaces.
2159          */
2160         net_ns_barrier();
2161
2162         /* a conntrack could have been unlinked from unconfirmed list
2163          * before we grabbed pcpu lock in __nf_ct_unconfirmed_destroy().
2164          * This makes sure its inserted into conntrack table.
2165          */
2166         synchronize_net();
2167
2168         nf_ct_iterate_cleanup(iter, data, 0, 0);
2169 }
2170 EXPORT_SYMBOL_GPL(nf_ct_iterate_destroy);
2171
2172 static int kill_all(struct nf_conn *i, void *data)
2173 {
2174         return net_eq(nf_ct_net(i), data);
2175 }
2176
2177 void nf_conntrack_cleanup_start(void)
2178 {
2179         conntrack_gc_work.exiting = true;
2180         RCU_INIT_POINTER(ip_ct_attach, NULL);
2181 }
2182
2183 void nf_conntrack_cleanup_end(void)
2184 {
2185         RCU_INIT_POINTER(nf_ct_hook, NULL);
2186         cancel_delayed_work_sync(&conntrack_gc_work.dwork);
2187         kvfree(nf_conntrack_hash);
2188
2189         nf_conntrack_proto_fini();
2190         nf_conntrack_seqadj_fini();
2191         nf_conntrack_labels_fini();
2192         nf_conntrack_helper_fini();
2193         nf_conntrack_timeout_fini();
2194         nf_conntrack_ecache_fini();
2195         nf_conntrack_tstamp_fini();
2196         nf_conntrack_acct_fini();
2197         nf_conntrack_expect_fini();
2198
2199         kmem_cache_destroy(nf_conntrack_cachep);
2200 }
2201
2202 /*
2203  * Mishearing the voices in his head, our hero wonders how he's
2204  * supposed to kill the mall.
2205  */
2206 void nf_conntrack_cleanup_net(struct net *net)
2207 {
2208         LIST_HEAD(single);
2209
2210         list_add(&net->exit_list, &single);
2211         nf_conntrack_cleanup_net_list(&single);
2212 }
2213
2214 void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
2215 {
2216         int busy;
2217         struct net *net;
2218
2219         /*
2220          * This makes sure all current packets have passed through
2221          *  netfilter framework.  Roll on, two-stage module
2222          *  delete...
2223          */
2224         synchronize_net();
2225 i_see_dead_people:
2226         busy = 0;
2227         list_for_each_entry(net, net_exit_list, exit_list) {
2228                 nf_ct_iterate_cleanup(kill_all, net, 0, 0);
2229                 if (atomic_read(&net->ct.count) != 0)
2230                         busy = 1;
2231         }
2232         if (busy) {
2233                 schedule();
2234                 goto i_see_dead_people;
2235         }
2236
2237         list_for_each_entry(net, net_exit_list, exit_list) {
2238                 nf_conntrack_proto_pernet_fini(net);
2239                 nf_conntrack_ecache_pernet_fini(net);
2240                 nf_conntrack_expect_pernet_fini(net);
2241                 free_percpu(net->ct.stat);
2242                 free_percpu(net->ct.pcpu_lists);
2243         }
2244 }
2245
2246 void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
2247 {
2248         struct hlist_nulls_head *hash;
2249         unsigned int nr_slots, i;
2250
2251         if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
2252                 return NULL;
2253
2254         BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
2255         nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
2256
2257         hash = kvmalloc_array(nr_slots, sizeof(struct hlist_nulls_head),
2258                               GFP_KERNEL | __GFP_ZERO);
2259
2260         if (hash && nulls)
2261                 for (i = 0; i < nr_slots; i++)
2262                         INIT_HLIST_NULLS_HEAD(&hash[i], i);
2263
2264         return hash;
2265 }
2266 EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
2267
2268 int nf_conntrack_hash_resize(unsigned int hashsize)
2269 {
2270         int i, bucket;
2271         unsigned int old_size;
2272         struct hlist_nulls_head *hash, *old_hash;
2273         struct nf_conntrack_tuple_hash *h;
2274         struct nf_conn *ct;
2275
2276         if (!hashsize)
2277                 return -EINVAL;
2278
2279         hash = nf_ct_alloc_hashtable(&hashsize, 1);
2280         if (!hash)
2281                 return -ENOMEM;
2282
2283         old_size = nf_conntrack_htable_size;
2284         if (old_size == hashsize) {
2285                 kvfree(hash);
2286                 return 0;
2287         }
2288
2289         local_bh_disable();
2290         nf_conntrack_all_lock();
2291         write_seqcount_begin(&nf_conntrack_generation);
2292
2293         /* Lookups in the old hash might happen in parallel, which means we
2294          * might get false negatives during connection lookup. New connections
2295          * created because of a false negative won't make it into the hash
2296          * though since that required taking the locks.
2297          */
2298
2299         for (i = 0; i < nf_conntrack_htable_size; i++) {
2300                 while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {
2301                         h = hlist_nulls_entry(nf_conntrack_hash[i].first,
2302                                               struct nf_conntrack_tuple_hash, hnnode);
2303                         ct = nf_ct_tuplehash_to_ctrack(h);
2304                         hlist_nulls_del_rcu(&h->hnnode);
2305                         bucket = __hash_conntrack(nf_ct_net(ct),
2306                                                   &h->tuple, hashsize);
2307                         hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
2308                 }
2309         }
2310         old_size = nf_conntrack_htable_size;
2311         old_hash = nf_conntrack_hash;
2312
2313         nf_conntrack_hash = hash;
2314         nf_conntrack_htable_size = hashsize;
2315
2316         write_seqcount_end(&nf_conntrack_generation);
2317         nf_conntrack_all_unlock();
2318         local_bh_enable();
2319
2320         synchronize_net();
2321         kvfree(old_hash);
2322         return 0;
2323 }
2324
2325 int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp)
2326 {
2327         unsigned int hashsize;
2328         int rc;
2329
2330         if (current->nsproxy->net_ns != &init_net)
2331                 return -EOPNOTSUPP;
2332
2333         /* On boot, we can set this without any fancy locking. */
2334         if (!nf_conntrack_hash)
2335                 return param_set_uint(val, kp);
2336
2337         rc = kstrtouint(val, 0, &hashsize);
2338         if (rc)
2339                 return rc;
2340
2341         return nf_conntrack_hash_resize(hashsize);
2342 }
2343 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
2344
2345 static __always_inline unsigned int total_extension_size(void)
2346 {
2347         /* remember to add new extensions below */
2348         BUILD_BUG_ON(NF_CT_EXT_NUM > 9);
2349
2350         return sizeof(struct nf_ct_ext) +
2351                sizeof(struct nf_conn_help)
2352 #if IS_ENABLED(CONFIG_NF_NAT)
2353                 + sizeof(struct nf_conn_nat)
2354 #endif
2355                 + sizeof(struct nf_conn_seqadj)
2356                 + sizeof(struct nf_conn_acct)
2357 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2358                 + sizeof(struct nf_conntrack_ecache)
2359 #endif
2360 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
2361                 + sizeof(struct nf_conn_tstamp)
2362 #endif
2363 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
2364                 + sizeof(struct nf_conn_timeout)
2365 #endif
2366 #ifdef CONFIG_NF_CONNTRACK_LABELS
2367                 + sizeof(struct nf_conn_labels)
2368 #endif
2369 #if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
2370                 + sizeof(struct nf_conn_synproxy)
2371 #endif
2372         ;
2373 };
2374
2375 int nf_conntrack_init_start(void)
2376 {
2377         unsigned long nr_pages = totalram_pages();
2378         int max_factor = 8;
2379         int ret = -ENOMEM;
2380         int i;
2381
2382         /* struct nf_ct_ext uses u8 to store offsets/size */
2383         BUILD_BUG_ON(total_extension_size() > 255u);
2384
2385         seqcount_init(&nf_conntrack_generation);
2386
2387         for (i = 0; i < CONNTRACK_LOCKS; i++)
2388                 spin_lock_init(&nf_conntrack_locks[i]);
2389
2390         if (!nf_conntrack_htable_size) {
2391                 /* Idea from tcp.c: use 1/16384 of memory.
2392                  * On i386: 32MB machine has 512 buckets.
2393                  * >= 1GB machines have 16384 buckets.
2394                  * >= 4GB machines have 65536 buckets.
2395                  */
2396                 nf_conntrack_htable_size
2397                         = (((nr_pages << PAGE_SHIFT) / 16384)
2398                            / sizeof(struct hlist_head));
2399                 if (nr_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
2400                         nf_conntrack_htable_size = 65536;
2401                 else if (nr_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
2402                         nf_conntrack_htable_size = 16384;
2403                 if (nf_conntrack_htable_size < 32)
2404                         nf_conntrack_htable_size = 32;
2405
2406                 /* Use a max. factor of four by default to get the same max as
2407                  * with the old struct list_heads. When a table size is given
2408                  * we use the old value of 8 to avoid reducing the max.
2409                  * entries. */
2410                 max_factor = 4;
2411         }
2412
2413         nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1);
2414         if (!nf_conntrack_hash)
2415                 return -ENOMEM;
2416
2417         nf_conntrack_max = max_factor * nf_conntrack_htable_size;
2418
2419         nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
2420                                                 sizeof(struct nf_conn),
2421                                                 NFCT_INFOMASK + 1,
2422                                                 SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
2423         if (!nf_conntrack_cachep)
2424                 goto err_cachep;
2425
2426         ret = nf_conntrack_expect_init();
2427         if (ret < 0)
2428                 goto err_expect;
2429
2430         ret = nf_conntrack_acct_init();
2431         if (ret < 0)
2432                 goto err_acct;
2433
2434         ret = nf_conntrack_tstamp_init();
2435         if (ret < 0)
2436                 goto err_tstamp;
2437
2438         ret = nf_conntrack_ecache_init();
2439         if (ret < 0)
2440                 goto err_ecache;
2441
2442         ret = nf_conntrack_timeout_init();
2443         if (ret < 0)
2444                 goto err_timeout;
2445
2446         ret = nf_conntrack_helper_init();
2447         if (ret < 0)
2448                 goto err_helper;
2449
2450         ret = nf_conntrack_labels_init();
2451         if (ret < 0)
2452                 goto err_labels;
2453
2454         ret = nf_conntrack_seqadj_init();
2455         if (ret < 0)
2456                 goto err_seqadj;
2457
2458         ret = nf_conntrack_proto_init();
2459         if (ret < 0)
2460                 goto err_proto;
2461
2462         conntrack_gc_work_init(&conntrack_gc_work);
2463         queue_delayed_work(system_power_efficient_wq, &conntrack_gc_work.dwork, HZ);
2464
2465         return 0;
2466
2467 err_proto:
2468         nf_conntrack_seqadj_fini();
2469 err_seqadj:
2470         nf_conntrack_labels_fini();
2471 err_labels:
2472         nf_conntrack_helper_fini();
2473 err_helper:
2474         nf_conntrack_timeout_fini();
2475 err_timeout:
2476         nf_conntrack_ecache_fini();
2477 err_ecache:
2478         nf_conntrack_tstamp_fini();
2479 err_tstamp:
2480         nf_conntrack_acct_fini();
2481 err_acct:
2482         nf_conntrack_expect_fini();
2483 err_expect:
2484         kmem_cache_destroy(nf_conntrack_cachep);
2485 err_cachep:
2486         kvfree(nf_conntrack_hash);
2487         return ret;
2488 }
2489
2490 static struct nf_ct_hook nf_conntrack_hook = {
2491         .update         = nf_conntrack_update,
2492         .destroy        = destroy_conntrack,
2493         .get_tuple_skb  = nf_conntrack_get_tuple_skb,
2494 };
2495
2496 void nf_conntrack_init_end(void)
2497 {
2498         /* For use by REJECT target */
2499         RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
2500         RCU_INIT_POINTER(nf_ct_hook, &nf_conntrack_hook);
2501 }
2502
2503 /*
2504  * We need to use special "null" values, not used in hash table
2505  */
2506 #define UNCONFIRMED_NULLS_VAL   ((1<<30)+0)
2507 #define DYING_NULLS_VAL         ((1<<30)+1)
2508 #define TEMPLATE_NULLS_VAL      ((1<<30)+2)
2509
2510 int nf_conntrack_init_net(struct net *net)
2511 {
2512         int ret = -ENOMEM;
2513         int cpu;
2514
2515         BUILD_BUG_ON(IP_CT_UNTRACKED == IP_CT_NUMBER);
2516         BUILD_BUG_ON_NOT_POWER_OF_2(CONNTRACK_LOCKS);
2517         atomic_set(&net->ct.count, 0);
2518
2519         net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
2520         if (!net->ct.pcpu_lists)
2521                 goto err_stat;
2522
2523         for_each_possible_cpu(cpu) {
2524                 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
2525
2526                 spin_lock_init(&pcpu->lock);
2527                 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
2528                 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
2529         }
2530
2531         net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
2532         if (!net->ct.stat)
2533                 goto err_pcpu_lists;
2534
2535         ret = nf_conntrack_expect_pernet_init(net);
2536         if (ret < 0)
2537                 goto err_expect;
2538
2539         nf_conntrack_acct_pernet_init(net);
2540         nf_conntrack_tstamp_pernet_init(net);
2541         nf_conntrack_ecache_pernet_init(net);
2542         nf_conntrack_helper_pernet_init(net);
2543         nf_conntrack_proto_pernet_init(net);
2544
2545         return 0;
2546
2547 err_expect:
2548         free_percpu(net->ct.stat);
2549 err_pcpu_lists:
2550         free_percpu(net->ct.pcpu_lists);
2551 err_stat:
2552         return ret;
2553 }