Merge tag 'arc-5.2-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
[linux-2.6-microblaze.git] / net / netfilter / nf_conntrack_core.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Connection state tracking for netfilter.  This is separated from,
3    but required by, the NAT layer; it can also be used by an iptables
4    extension. */
5
6 /* (C) 1999-2001 Paul `Rusty' Russell
7  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
8  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
9  * (C) 2005-2012 Patrick McHardy <kaber@trash.net>
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/types.h>
15 #include <linux/netfilter.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/skbuff.h>
19 #include <linux/proc_fs.h>
20 #include <linux/vmalloc.h>
21 #include <linux/stddef.h>
22 #include <linux/slab.h>
23 #include <linux/random.h>
24 #include <linux/jhash.h>
25 #include <linux/siphash.h>
26 #include <linux/err.h>
27 #include <linux/percpu.h>
28 #include <linux/moduleparam.h>
29 #include <linux/notifier.h>
30 #include <linux/kernel.h>
31 #include <linux/netdevice.h>
32 #include <linux/socket.h>
33 #include <linux/mm.h>
34 #include <linux/nsproxy.h>
35 #include <linux/rculist_nulls.h>
36
37 #include <net/netfilter/nf_conntrack.h>
38 #include <net/netfilter/nf_conntrack_l4proto.h>
39 #include <net/netfilter/nf_conntrack_expect.h>
40 #include <net/netfilter/nf_conntrack_helper.h>
41 #include <net/netfilter/nf_conntrack_seqadj.h>
42 #include <net/netfilter/nf_conntrack_core.h>
43 #include <net/netfilter/nf_conntrack_extend.h>
44 #include <net/netfilter/nf_conntrack_acct.h>
45 #include <net/netfilter/nf_conntrack_ecache.h>
46 #include <net/netfilter/nf_conntrack_zones.h>
47 #include <net/netfilter/nf_conntrack_timestamp.h>
48 #include <net/netfilter/nf_conntrack_timeout.h>
49 #include <net/netfilter/nf_conntrack_labels.h>
50 #include <net/netfilter/nf_conntrack_synproxy.h>
51 #include <net/netfilter/nf_nat.h>
52 #include <net/netfilter/nf_nat_helper.h>
53 #include <net/netns/hash.h>
54 #include <net/ip.h>
55
56 #include "nf_internals.h"
57
58 __cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
59 EXPORT_SYMBOL_GPL(nf_conntrack_locks);
60
61 __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
62 EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
63
64 struct hlist_nulls_head *nf_conntrack_hash __read_mostly;
65 EXPORT_SYMBOL_GPL(nf_conntrack_hash);
66
67 struct conntrack_gc_work {
68         struct delayed_work     dwork;
69         u32                     last_bucket;
70         bool                    exiting;
71         bool                    early_drop;
72         long                    next_gc_run;
73 };
74
75 static __read_mostly struct kmem_cache *nf_conntrack_cachep;
76 static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
77 static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
78 static __read_mostly bool nf_conntrack_locks_all;
79
80 /* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
81 #define GC_MAX_BUCKETS_DIV      128u
82 /* upper bound of full table scan */
83 #define GC_MAX_SCAN_JIFFIES     (16u * HZ)
84 /* desired ratio of entries found to be expired */
85 #define GC_EVICT_RATIO  50u
86
87 static struct conntrack_gc_work conntrack_gc_work;
88
89 void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
90 {
91         /* 1) Acquire the lock */
92         spin_lock(lock);
93
94         /* 2) read nf_conntrack_locks_all, with ACQUIRE semantics
95          * It pairs with the smp_store_release() in nf_conntrack_all_unlock()
96          */
97         if (likely(smp_load_acquire(&nf_conntrack_locks_all) == false))
98                 return;
99
100         /* fast path failed, unlock */
101         spin_unlock(lock);
102
103         /* Slow path 1) get global lock */
104         spin_lock(&nf_conntrack_locks_all_lock);
105
106         /* Slow path 2) get the lock we want */
107         spin_lock(lock);
108
109         /* Slow path 3) release the global lock */
110         spin_unlock(&nf_conntrack_locks_all_lock);
111 }
112 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
113
114 static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
115 {
116         h1 %= CONNTRACK_LOCKS;
117         h2 %= CONNTRACK_LOCKS;
118         spin_unlock(&nf_conntrack_locks[h1]);
119         if (h1 != h2)
120                 spin_unlock(&nf_conntrack_locks[h2]);
121 }
122
123 /* return true if we need to recompute hashes (in case hash table was resized) */
124 static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
125                                      unsigned int h2, unsigned int sequence)
126 {
127         h1 %= CONNTRACK_LOCKS;
128         h2 %= CONNTRACK_LOCKS;
129         if (h1 <= h2) {
130                 nf_conntrack_lock(&nf_conntrack_locks[h1]);
131                 if (h1 != h2)
132                         spin_lock_nested(&nf_conntrack_locks[h2],
133                                          SINGLE_DEPTH_NESTING);
134         } else {
135                 nf_conntrack_lock(&nf_conntrack_locks[h2]);
136                 spin_lock_nested(&nf_conntrack_locks[h1],
137                                  SINGLE_DEPTH_NESTING);
138         }
139         if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
140                 nf_conntrack_double_unlock(h1, h2);
141                 return true;
142         }
143         return false;
144 }
145
146 static void nf_conntrack_all_lock(void)
147 {
148         int i;
149
150         spin_lock(&nf_conntrack_locks_all_lock);
151
152         nf_conntrack_locks_all = true;
153
154         for (i = 0; i < CONNTRACK_LOCKS; i++) {
155                 spin_lock(&nf_conntrack_locks[i]);
156
157                 /* This spin_unlock provides the "release" to ensure that
158                  * nf_conntrack_locks_all==true is visible to everyone that
159                  * acquired spin_lock(&nf_conntrack_locks[]).
160                  */
161                 spin_unlock(&nf_conntrack_locks[i]);
162         }
163 }
164
165 static void nf_conntrack_all_unlock(void)
166 {
167         /* All prior stores must be complete before we clear
168          * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
169          * might observe the false value but not the entire
170          * critical section.
171          * It pairs with the smp_load_acquire() in nf_conntrack_lock()
172          */
173         smp_store_release(&nf_conntrack_locks_all, false);
174         spin_unlock(&nf_conntrack_locks_all_lock);
175 }
176
177 unsigned int nf_conntrack_htable_size __read_mostly;
178 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
179
180 unsigned int nf_conntrack_max __read_mostly;
181 EXPORT_SYMBOL_GPL(nf_conntrack_max);
182 seqcount_t nf_conntrack_generation __read_mostly;
183 static unsigned int nf_conntrack_hash_rnd __read_mostly;
184
185 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
186                               const struct net *net)
187 {
188         unsigned int n;
189         u32 seed;
190
191         get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd));
192
193         /* The direction must be ignored, so we hash everything up to the
194          * destination ports (which is a multiple of 4) and treat the last
195          * three bytes manually.
196          */
197         seed = nf_conntrack_hash_rnd ^ net_hash_mix(net);
198         n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
199         return jhash2((u32 *)tuple, n, seed ^
200                       (((__force __u16)tuple->dst.u.all << 16) |
201                       tuple->dst.protonum));
202 }
203
204 static u32 scale_hash(u32 hash)
205 {
206         return reciprocal_scale(hash, nf_conntrack_htable_size);
207 }
208
209 static u32 __hash_conntrack(const struct net *net,
210                             const struct nf_conntrack_tuple *tuple,
211                             unsigned int size)
212 {
213         return reciprocal_scale(hash_conntrack_raw(tuple, net), size);
214 }
215
216 static u32 hash_conntrack(const struct net *net,
217                           const struct nf_conntrack_tuple *tuple)
218 {
219         return scale_hash(hash_conntrack_raw(tuple, net));
220 }
221
222 static bool nf_ct_get_tuple_ports(const struct sk_buff *skb,
223                                   unsigned int dataoff,
224                                   struct nf_conntrack_tuple *tuple)
225 {       struct {
226                 __be16 sport;
227                 __be16 dport;
228         } _inet_hdr, *inet_hdr;
229
230         /* Actually only need first 4 bytes to get ports. */
231         inet_hdr = skb_header_pointer(skb, dataoff, sizeof(_inet_hdr), &_inet_hdr);
232         if (!inet_hdr)
233                 return false;
234
235         tuple->src.u.udp.port = inet_hdr->sport;
236         tuple->dst.u.udp.port = inet_hdr->dport;
237         return true;
238 }
239
240 static bool
241 nf_ct_get_tuple(const struct sk_buff *skb,
242                 unsigned int nhoff,
243                 unsigned int dataoff,
244                 u_int16_t l3num,
245                 u_int8_t protonum,
246                 struct net *net,
247                 struct nf_conntrack_tuple *tuple)
248 {
249         unsigned int size;
250         const __be32 *ap;
251         __be32 _addrs[8];
252
253         memset(tuple, 0, sizeof(*tuple));
254
255         tuple->src.l3num = l3num;
256         switch (l3num) {
257         case NFPROTO_IPV4:
258                 nhoff += offsetof(struct iphdr, saddr);
259                 size = 2 * sizeof(__be32);
260                 break;
261         case NFPROTO_IPV6:
262                 nhoff += offsetof(struct ipv6hdr, saddr);
263                 size = sizeof(_addrs);
264                 break;
265         default:
266                 return true;
267         }
268
269         ap = skb_header_pointer(skb, nhoff, size, _addrs);
270         if (!ap)
271                 return false;
272
273         switch (l3num) {
274         case NFPROTO_IPV4:
275                 tuple->src.u3.ip = ap[0];
276                 tuple->dst.u3.ip = ap[1];
277                 break;
278         case NFPROTO_IPV6:
279                 memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6));
280                 memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6));
281                 break;
282         }
283
284         tuple->dst.protonum = protonum;
285         tuple->dst.dir = IP_CT_DIR_ORIGINAL;
286
287         switch (protonum) {
288 #if IS_ENABLED(CONFIG_IPV6)
289         case IPPROTO_ICMPV6:
290                 return icmpv6_pkt_to_tuple(skb, dataoff, net, tuple);
291 #endif
292         case IPPROTO_ICMP:
293                 return icmp_pkt_to_tuple(skb, dataoff, net, tuple);
294 #ifdef CONFIG_NF_CT_PROTO_GRE
295         case IPPROTO_GRE:
296                 return gre_pkt_to_tuple(skb, dataoff, net, tuple);
297 #endif
298         case IPPROTO_TCP:
299         case IPPROTO_UDP: /* fallthrough */
300                 return nf_ct_get_tuple_ports(skb, dataoff, tuple);
301 #ifdef CONFIG_NF_CT_PROTO_UDPLITE
302         case IPPROTO_UDPLITE:
303                 return nf_ct_get_tuple_ports(skb, dataoff, tuple);
304 #endif
305 #ifdef CONFIG_NF_CT_PROTO_SCTP
306         case IPPROTO_SCTP:
307                 return nf_ct_get_tuple_ports(skb, dataoff, tuple);
308 #endif
309 #ifdef CONFIG_NF_CT_PROTO_DCCP
310         case IPPROTO_DCCP:
311                 return nf_ct_get_tuple_ports(skb, dataoff, tuple);
312 #endif
313         default:
314                 break;
315         }
316
317         return true;
318 }
319
320 static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
321                             u_int8_t *protonum)
322 {
323         int dataoff = -1;
324         const struct iphdr *iph;
325         struct iphdr _iph;
326
327         iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
328         if (!iph)
329                 return -1;
330
331         /* Conntrack defragments packets, we might still see fragments
332          * inside ICMP packets though.
333          */
334         if (iph->frag_off & htons(IP_OFFSET))
335                 return -1;
336
337         dataoff = nhoff + (iph->ihl << 2);
338         *protonum = iph->protocol;
339
340         /* Check bogus IP headers */
341         if (dataoff > skb->len) {
342                 pr_debug("bogus IPv4 packet: nhoff %u, ihl %u, skblen %u\n",
343                          nhoff, iph->ihl << 2, skb->len);
344                 return -1;
345         }
346         return dataoff;
347 }
348
349 #if IS_ENABLED(CONFIG_IPV6)
350 static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
351                             u8 *protonum)
352 {
353         int protoff = -1;
354         unsigned int extoff = nhoff + sizeof(struct ipv6hdr);
355         __be16 frag_off;
356         u8 nexthdr;
357
358         if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr),
359                           &nexthdr, sizeof(nexthdr)) != 0) {
360                 pr_debug("can't get nexthdr\n");
361                 return -1;
362         }
363         protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off);
364         /*
365          * (protoff == skb->len) means the packet has not data, just
366          * IPv6 and possibly extensions headers, but it is tracked anyway
367          */
368         if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
369                 pr_debug("can't find proto in pkt\n");
370                 return -1;
371         }
372
373         *protonum = nexthdr;
374         return protoff;
375 }
376 #endif
377
378 static int get_l4proto(const struct sk_buff *skb,
379                        unsigned int nhoff, u8 pf, u8 *l4num)
380 {
381         switch (pf) {
382         case NFPROTO_IPV4:
383                 return ipv4_get_l4proto(skb, nhoff, l4num);
384 #if IS_ENABLED(CONFIG_IPV6)
385         case NFPROTO_IPV6:
386                 return ipv6_get_l4proto(skb, nhoff, l4num);
387 #endif
388         default:
389                 *l4num = 0;
390                 break;
391         }
392         return -1;
393 }
394
395 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
396                        u_int16_t l3num,
397                        struct net *net, struct nf_conntrack_tuple *tuple)
398 {
399         u8 protonum;
400         int protoff;
401
402         protoff = get_l4proto(skb, nhoff, l3num, &protonum);
403         if (protoff <= 0)
404                 return false;
405
406         return nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple);
407 }
408 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
409
410 bool
411 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
412                    const struct nf_conntrack_tuple *orig)
413 {
414         memset(inverse, 0, sizeof(*inverse));
415
416         inverse->src.l3num = orig->src.l3num;
417
418         switch (orig->src.l3num) {
419         case NFPROTO_IPV4:
420                 inverse->src.u3.ip = orig->dst.u3.ip;
421                 inverse->dst.u3.ip = orig->src.u3.ip;
422                 break;
423         case NFPROTO_IPV6:
424                 inverse->src.u3.in6 = orig->dst.u3.in6;
425                 inverse->dst.u3.in6 = orig->src.u3.in6;
426                 break;
427         default:
428                 break;
429         }
430
431         inverse->dst.dir = !orig->dst.dir;
432
433         inverse->dst.protonum = orig->dst.protonum;
434
435         switch (orig->dst.protonum) {
436         case IPPROTO_ICMP:
437                 return nf_conntrack_invert_icmp_tuple(inverse, orig);
438 #if IS_ENABLED(CONFIG_IPV6)
439         case IPPROTO_ICMPV6:
440                 return nf_conntrack_invert_icmpv6_tuple(inverse, orig);
441 #endif
442         }
443
444         inverse->src.u.all = orig->dst.u.all;
445         inverse->dst.u.all = orig->src.u.all;
446         return true;
447 }
448 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
449
450 /* Generate a almost-unique pseudo-id for a given conntrack.
451  *
452  * intentionally doesn't re-use any of the seeds used for hash
453  * table location, we assume id gets exposed to userspace.
454  *
455  * Following nf_conn items do not change throughout lifetime
456  * of the nf_conn after it has been committed to main hash table:
457  *
458  * 1. nf_conn address
459  * 2. nf_conn->ext address
460  * 3. nf_conn->master address (normally NULL)
461  * 4. tuple
462  * 5. the associated net namespace
463  */
464 u32 nf_ct_get_id(const struct nf_conn *ct)
465 {
466         static __read_mostly siphash_key_t ct_id_seed;
467         unsigned long a, b, c, d;
468
469         net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
470
471         a = (unsigned long)ct;
472         b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct));
473         c = (unsigned long)ct->ext;
474         d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash),
475                                    &ct_id_seed);
476 #ifdef CONFIG_64BIT
477         return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
478 #else
479         return siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &ct_id_seed);
480 #endif
481 }
482 EXPORT_SYMBOL_GPL(nf_ct_get_id);
483
484 static void
485 clean_from_lists(struct nf_conn *ct)
486 {
487         pr_debug("clean_from_lists(%p)\n", ct);
488         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
489         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
490
491         /* Destroy all pending expectations */
492         nf_ct_remove_expectations(ct);
493 }
494
495 /* must be called with local_bh_disable */
496 static void nf_ct_add_to_dying_list(struct nf_conn *ct)
497 {
498         struct ct_pcpu *pcpu;
499
500         /* add this conntrack to the (per cpu) dying list */
501         ct->cpu = smp_processor_id();
502         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
503
504         spin_lock(&pcpu->lock);
505         hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
506                              &pcpu->dying);
507         spin_unlock(&pcpu->lock);
508 }
509
510 /* must be called with local_bh_disable */
511 static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
512 {
513         struct ct_pcpu *pcpu;
514
515         /* add this conntrack to the (per cpu) unconfirmed list */
516         ct->cpu = smp_processor_id();
517         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
518
519         spin_lock(&pcpu->lock);
520         hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
521                              &pcpu->unconfirmed);
522         spin_unlock(&pcpu->lock);
523 }
524
525 /* must be called with local_bh_disable */
526 static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
527 {
528         struct ct_pcpu *pcpu;
529
530         /* We overload first tuple to link into unconfirmed or dying list.*/
531         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
532
533         spin_lock(&pcpu->lock);
534         BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
535         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
536         spin_unlock(&pcpu->lock);
537 }
538
539 #define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK)
540
541 /* Released via destroy_conntrack() */
542 struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
543                                  const struct nf_conntrack_zone *zone,
544                                  gfp_t flags)
545 {
546         struct nf_conn *tmpl, *p;
547
548         if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) {
549                 tmpl = kzalloc(sizeof(*tmpl) + NFCT_INFOMASK, flags);
550                 if (!tmpl)
551                         return NULL;
552
553                 p = tmpl;
554                 tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
555                 if (tmpl != p) {
556                         tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
557                         tmpl->proto.tmpl_padto = (char *)tmpl - (char *)p;
558                 }
559         } else {
560                 tmpl = kzalloc(sizeof(*tmpl), flags);
561                 if (!tmpl)
562                         return NULL;
563         }
564
565         tmpl->status = IPS_TEMPLATE;
566         write_pnet(&tmpl->ct_net, net);
567         nf_ct_zone_add(tmpl, zone);
568         atomic_set(&tmpl->ct_general.use, 0);
569
570         return tmpl;
571 }
572 EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
573
574 void nf_ct_tmpl_free(struct nf_conn *tmpl)
575 {
576         nf_ct_ext_destroy(tmpl);
577         nf_ct_ext_free(tmpl);
578
579         if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK)
580                 kfree((char *)tmpl - tmpl->proto.tmpl_padto);
581         else
582                 kfree(tmpl);
583 }
584 EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
585
586 static void destroy_gre_conntrack(struct nf_conn *ct)
587 {
588 #ifdef CONFIG_NF_CT_PROTO_GRE
589         struct nf_conn *master = ct->master;
590
591         if (master)
592                 nf_ct_gre_keymap_destroy(master);
593 #endif
594 }
595
596 static void
597 destroy_conntrack(struct nf_conntrack *nfct)
598 {
599         struct nf_conn *ct = (struct nf_conn *)nfct;
600
601         pr_debug("destroy_conntrack(%p)\n", ct);
602         WARN_ON(atomic_read(&nfct->use) != 0);
603
604         if (unlikely(nf_ct_is_template(ct))) {
605                 nf_ct_tmpl_free(ct);
606                 return;
607         }
608
609         if (unlikely(nf_ct_protonum(ct) == IPPROTO_GRE))
610                 destroy_gre_conntrack(ct);
611
612         local_bh_disable();
613         /* Expectations will have been removed in clean_from_lists,
614          * except TFTP can create an expectation on the first packet,
615          * before connection is in the list, so we need to clean here,
616          * too.
617          */
618         nf_ct_remove_expectations(ct);
619
620         nf_ct_del_from_dying_or_unconfirmed_list(ct);
621
622         local_bh_enable();
623
624         if (ct->master)
625                 nf_ct_put(ct->master);
626
627         pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
628         nf_conntrack_free(ct);
629 }
630
631 static void nf_ct_delete_from_lists(struct nf_conn *ct)
632 {
633         struct net *net = nf_ct_net(ct);
634         unsigned int hash, reply_hash;
635         unsigned int sequence;
636
637         nf_ct_helper_destroy(ct);
638
639         local_bh_disable();
640         do {
641                 sequence = read_seqcount_begin(&nf_conntrack_generation);
642                 hash = hash_conntrack(net,
643                                       &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
644                 reply_hash = hash_conntrack(net,
645                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
646         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
647
648         clean_from_lists(ct);
649         nf_conntrack_double_unlock(hash, reply_hash);
650
651         nf_ct_add_to_dying_list(ct);
652
653         local_bh_enable();
654 }
655
656 bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
657 {
658         struct nf_conn_tstamp *tstamp;
659
660         if (test_and_set_bit(IPS_DYING_BIT, &ct->status))
661                 return false;
662
663         tstamp = nf_conn_tstamp_find(ct);
664         if (tstamp && tstamp->stop == 0)
665                 tstamp->stop = ktime_get_real_ns();
666
667         if (nf_conntrack_event_report(IPCT_DESTROY, ct,
668                                     portid, report) < 0) {
669                 /* destroy event was not delivered. nf_ct_put will
670                  * be done by event cache worker on redelivery.
671                  */
672                 nf_ct_delete_from_lists(ct);
673                 nf_conntrack_ecache_delayed_work(nf_ct_net(ct));
674                 return false;
675         }
676
677         nf_conntrack_ecache_work(nf_ct_net(ct));
678         nf_ct_delete_from_lists(ct);
679         nf_ct_put(ct);
680         return true;
681 }
682 EXPORT_SYMBOL_GPL(nf_ct_delete);
683
684 static inline bool
685 nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
686                 const struct nf_conntrack_tuple *tuple,
687                 const struct nf_conntrack_zone *zone,
688                 const struct net *net)
689 {
690         struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
691
692         /* A conntrack can be recreated with the equal tuple,
693          * so we need to check that the conntrack is confirmed
694          */
695         return nf_ct_tuple_equal(tuple, &h->tuple) &&
696                nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
697                nf_ct_is_confirmed(ct) &&
698                net_eq(net, nf_ct_net(ct));
699 }
700
701 static inline bool
702 nf_ct_match(const struct nf_conn *ct1, const struct nf_conn *ct2)
703 {
704         return nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
705                                  &ct2->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
706                nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_REPLY].tuple,
707                                  &ct2->tuplehash[IP_CT_DIR_REPLY].tuple) &&
708                nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_ORIGINAL) &&
709                nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_REPLY) &&
710                net_eq(nf_ct_net(ct1), nf_ct_net(ct2));
711 }
712
713 /* caller must hold rcu readlock and none of the nf_conntrack_locks */
714 static void nf_ct_gc_expired(struct nf_conn *ct)
715 {
716         if (!atomic_inc_not_zero(&ct->ct_general.use))
717                 return;
718
719         if (nf_ct_should_gc(ct))
720                 nf_ct_kill(ct);
721
722         nf_ct_put(ct);
723 }
724
725 /*
726  * Warning :
727  * - Caller must take a reference on returned object
728  *   and recheck nf_ct_tuple_equal(tuple, &h->tuple)
729  */
730 static struct nf_conntrack_tuple_hash *
731 ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
732                       const struct nf_conntrack_tuple *tuple, u32 hash)
733 {
734         struct nf_conntrack_tuple_hash *h;
735         struct hlist_nulls_head *ct_hash;
736         struct hlist_nulls_node *n;
737         unsigned int bucket, hsize;
738
739 begin:
740         nf_conntrack_get_ht(&ct_hash, &hsize);
741         bucket = reciprocal_scale(hash, hsize);
742
743         hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) {
744                 struct nf_conn *ct;
745
746                 ct = nf_ct_tuplehash_to_ctrack(h);
747                 if (nf_ct_is_expired(ct)) {
748                         nf_ct_gc_expired(ct);
749                         continue;
750                 }
751
752                 if (nf_ct_is_dying(ct))
753                         continue;
754
755                 if (nf_ct_key_equal(h, tuple, zone, net))
756                         return h;
757         }
758         /*
759          * if the nulls value we got at the end of this lookup is
760          * not the expected one, we must restart lookup.
761          * We probably met an item that was moved to another chain.
762          */
763         if (get_nulls_value(n) != bucket) {
764                 NF_CT_STAT_INC_ATOMIC(net, search_restart);
765                 goto begin;
766         }
767
768         return NULL;
769 }
770
771 /* Find a connection corresponding to a tuple. */
772 static struct nf_conntrack_tuple_hash *
773 __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
774                         const struct nf_conntrack_tuple *tuple, u32 hash)
775 {
776         struct nf_conntrack_tuple_hash *h;
777         struct nf_conn *ct;
778
779         rcu_read_lock();
780 begin:
781         h = ____nf_conntrack_find(net, zone, tuple, hash);
782         if (h) {
783                 ct = nf_ct_tuplehash_to_ctrack(h);
784                 if (unlikely(nf_ct_is_dying(ct) ||
785                              !atomic_inc_not_zero(&ct->ct_general.use)))
786                         h = NULL;
787                 else {
788                         if (unlikely(!nf_ct_key_equal(h, tuple, zone, net))) {
789                                 nf_ct_put(ct);
790                                 goto begin;
791                         }
792                 }
793         }
794         rcu_read_unlock();
795
796         return h;
797 }
798
799 struct nf_conntrack_tuple_hash *
800 nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
801                       const struct nf_conntrack_tuple *tuple)
802 {
803         return __nf_conntrack_find_get(net, zone, tuple,
804                                        hash_conntrack_raw(tuple, net));
805 }
806 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
807
808 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
809                                        unsigned int hash,
810                                        unsigned int reply_hash)
811 {
812         hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
813                            &nf_conntrack_hash[hash]);
814         hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
815                            &nf_conntrack_hash[reply_hash]);
816 }
817
818 int
819 nf_conntrack_hash_check_insert(struct nf_conn *ct)
820 {
821         const struct nf_conntrack_zone *zone;
822         struct net *net = nf_ct_net(ct);
823         unsigned int hash, reply_hash;
824         struct nf_conntrack_tuple_hash *h;
825         struct hlist_nulls_node *n;
826         unsigned int sequence;
827
828         zone = nf_ct_zone(ct);
829
830         local_bh_disable();
831         do {
832                 sequence = read_seqcount_begin(&nf_conntrack_generation);
833                 hash = hash_conntrack(net,
834                                       &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
835                 reply_hash = hash_conntrack(net,
836                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
837         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
838
839         /* See if there's one in the list already, including reverse */
840         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
841                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
842                                     zone, net))
843                         goto out;
844
845         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
846                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
847                                     zone, net))
848                         goto out;
849
850         smp_wmb();
851         /* The caller holds a reference to this object */
852         atomic_set(&ct->ct_general.use, 2);
853         __nf_conntrack_hash_insert(ct, hash, reply_hash);
854         nf_conntrack_double_unlock(hash, reply_hash);
855         NF_CT_STAT_INC(net, insert);
856         local_bh_enable();
857         return 0;
858
859 out:
860         nf_conntrack_double_unlock(hash, reply_hash);
861         NF_CT_STAT_INC(net, insert_failed);
862         local_bh_enable();
863         return -EEXIST;
864 }
865 EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
866
867 static inline void nf_ct_acct_update(struct nf_conn *ct,
868                                      enum ip_conntrack_info ctinfo,
869                                      unsigned int len)
870 {
871         struct nf_conn_acct *acct;
872
873         acct = nf_conn_acct_find(ct);
874         if (acct) {
875                 struct nf_conn_counter *counter = acct->counter;
876
877                 atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
878                 atomic64_add(len, &counter[CTINFO2DIR(ctinfo)].bytes);
879         }
880 }
881
882 static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
883                              const struct nf_conn *loser_ct)
884 {
885         struct nf_conn_acct *acct;
886
887         acct = nf_conn_acct_find(loser_ct);
888         if (acct) {
889                 struct nf_conn_counter *counter = acct->counter;
890                 unsigned int bytes;
891
892                 /* u32 should be fine since we must have seen one packet. */
893                 bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes);
894                 nf_ct_acct_update(ct, ctinfo, bytes);
895         }
896 }
897
898 /* Resolve race on insertion if this protocol allows this. */
899 static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
900                                enum ip_conntrack_info ctinfo,
901                                struct nf_conntrack_tuple_hash *h)
902 {
903         /* This is the conntrack entry already in hashes that won race. */
904         struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
905         const struct nf_conntrack_l4proto *l4proto;
906         enum ip_conntrack_info oldinfo;
907         struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo);
908
909         l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
910         if (l4proto->allow_clash &&
911             !nf_ct_is_dying(ct) &&
912             atomic_inc_not_zero(&ct->ct_general.use)) {
913                 if (((ct->status & IPS_NAT_DONE_MASK) == 0) ||
914                     nf_ct_match(ct, loser_ct)) {
915                         nf_ct_acct_merge(ct, ctinfo, loser_ct);
916                         nf_conntrack_put(&loser_ct->ct_general);
917                         nf_ct_set(skb, ct, oldinfo);
918                         return NF_ACCEPT;
919                 }
920                 nf_ct_put(ct);
921         }
922         NF_CT_STAT_INC(net, drop);
923         return NF_DROP;
924 }
925
926 /* Confirm a connection given skb; places it in hash table */
927 int
928 __nf_conntrack_confirm(struct sk_buff *skb)
929 {
930         const struct nf_conntrack_zone *zone;
931         unsigned int hash, reply_hash;
932         struct nf_conntrack_tuple_hash *h;
933         struct nf_conn *ct;
934         struct nf_conn_help *help;
935         struct nf_conn_tstamp *tstamp;
936         struct hlist_nulls_node *n;
937         enum ip_conntrack_info ctinfo;
938         struct net *net;
939         unsigned int sequence;
940         int ret = NF_DROP;
941
942         ct = nf_ct_get(skb, &ctinfo);
943         net = nf_ct_net(ct);
944
945         /* ipt_REJECT uses nf_conntrack_attach to attach related
946            ICMP/TCP RST packets in other direction.  Actual packet
947            which created connection will be IP_CT_NEW or for an
948            expected connection, IP_CT_RELATED. */
949         if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
950                 return NF_ACCEPT;
951
952         zone = nf_ct_zone(ct);
953         local_bh_disable();
954
955         do {
956                 sequence = read_seqcount_begin(&nf_conntrack_generation);
957                 /* reuse the hash saved before */
958                 hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
959                 hash = scale_hash(hash);
960                 reply_hash = hash_conntrack(net,
961                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
962
963         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
964
965         /* We're not in hash table, and we refuse to set up related
966          * connections for unconfirmed conns.  But packet copies and
967          * REJECT will give spurious warnings here.
968          */
969
970         /* Another skb with the same unconfirmed conntrack may
971          * win the race. This may happen for bridge(br_flood)
972          * or broadcast/multicast packets do skb_clone with
973          * unconfirmed conntrack.
974          */
975         if (unlikely(nf_ct_is_confirmed(ct))) {
976                 WARN_ON_ONCE(1);
977                 nf_conntrack_double_unlock(hash, reply_hash);
978                 local_bh_enable();
979                 return NF_DROP;
980         }
981
982         pr_debug("Confirming conntrack %p\n", ct);
983         /* We have to check the DYING flag after unlink to prevent
984          * a race against nf_ct_get_next_corpse() possibly called from
985          * user context, else we insert an already 'dead' hash, blocking
986          * further use of that particular connection -JM.
987          */
988         nf_ct_del_from_dying_or_unconfirmed_list(ct);
989
990         if (unlikely(nf_ct_is_dying(ct))) {
991                 nf_ct_add_to_dying_list(ct);
992                 goto dying;
993         }
994
995         /* See if there's one in the list already, including reverse:
996            NAT could have grabbed it without realizing, since we're
997            not in the hash.  If there is, we lost race. */
998         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
999                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
1000                                     zone, net))
1001                         goto out;
1002
1003         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
1004                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
1005                                     zone, net))
1006                         goto out;
1007
1008         /* Timer relative to confirmation time, not original
1009            setting time, otherwise we'd get timer wrap in
1010            weird delay cases. */
1011         ct->timeout += nfct_time_stamp;
1012         atomic_inc(&ct->ct_general.use);
1013         ct->status |= IPS_CONFIRMED;
1014
1015         /* set conntrack timestamp, if enabled. */
1016         tstamp = nf_conn_tstamp_find(ct);
1017         if (tstamp)
1018                 tstamp->start = ktime_get_real_ns();
1019
1020         /* Since the lookup is lockless, hash insertion must be done after
1021          * starting the timer and setting the CONFIRMED bit. The RCU barriers
1022          * guarantee that no other CPU can find the conntrack before the above
1023          * stores are visible.
1024          */
1025         __nf_conntrack_hash_insert(ct, hash, reply_hash);
1026         nf_conntrack_double_unlock(hash, reply_hash);
1027         local_bh_enable();
1028
1029         help = nfct_help(ct);
1030         if (help && help->helper)
1031                 nf_conntrack_event_cache(IPCT_HELPER, ct);
1032
1033         nf_conntrack_event_cache(master_ct(ct) ?
1034                                  IPCT_RELATED : IPCT_NEW, ct);
1035         return NF_ACCEPT;
1036
1037 out:
1038         nf_ct_add_to_dying_list(ct);
1039         ret = nf_ct_resolve_clash(net, skb, ctinfo, h);
1040 dying:
1041         nf_conntrack_double_unlock(hash, reply_hash);
1042         NF_CT_STAT_INC(net, insert_failed);
1043         local_bh_enable();
1044         return ret;
1045 }
1046 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
1047
1048 /* Returns true if a connection correspondings to the tuple (required
1049    for NAT). */
1050 int
1051 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
1052                          const struct nf_conn *ignored_conntrack)
1053 {
1054         struct net *net = nf_ct_net(ignored_conntrack);
1055         const struct nf_conntrack_zone *zone;
1056         struct nf_conntrack_tuple_hash *h;
1057         struct hlist_nulls_head *ct_hash;
1058         unsigned int hash, hsize;
1059         struct hlist_nulls_node *n;
1060         struct nf_conn *ct;
1061
1062         zone = nf_ct_zone(ignored_conntrack);
1063
1064         rcu_read_lock();
1065  begin:
1066         nf_conntrack_get_ht(&ct_hash, &hsize);
1067         hash = __hash_conntrack(net, tuple, hsize);
1068
1069         hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
1070                 ct = nf_ct_tuplehash_to_ctrack(h);
1071
1072                 if (ct == ignored_conntrack)
1073                         continue;
1074
1075                 if (nf_ct_is_expired(ct)) {
1076                         nf_ct_gc_expired(ct);
1077                         continue;
1078                 }
1079
1080                 if (nf_ct_key_equal(h, tuple, zone, net)) {
1081                         /* Tuple is taken already, so caller will need to find
1082                          * a new source port to use.
1083                          *
1084                          * Only exception:
1085                          * If the *original tuples* are identical, then both
1086                          * conntracks refer to the same flow.
1087                          * This is a rare situation, it can occur e.g. when
1088                          * more than one UDP packet is sent from same socket
1089                          * in different threads.
1090                          *
1091                          * Let nf_ct_resolve_clash() deal with this later.
1092                          */
1093                         if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
1094                                               &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
1095                                 continue;
1096
1097                         NF_CT_STAT_INC_ATOMIC(net, found);
1098                         rcu_read_unlock();
1099                         return 1;
1100                 }
1101         }
1102
1103         if (get_nulls_value(n) != hash) {
1104                 NF_CT_STAT_INC_ATOMIC(net, search_restart);
1105                 goto begin;
1106         }
1107
1108         rcu_read_unlock();
1109
1110         return 0;
1111 }
1112 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
1113
1114 #define NF_CT_EVICTION_RANGE    8
1115
1116 /* There's a small race here where we may free a just-assured
1117    connection.  Too bad: we're in trouble anyway. */
1118 static unsigned int early_drop_list(struct net *net,
1119                                     struct hlist_nulls_head *head)
1120 {
1121         struct nf_conntrack_tuple_hash *h;
1122         struct hlist_nulls_node *n;
1123         unsigned int drops = 0;
1124         struct nf_conn *tmp;
1125
1126         hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) {
1127                 tmp = nf_ct_tuplehash_to_ctrack(h);
1128
1129                 if (test_bit(IPS_OFFLOAD_BIT, &tmp->status))
1130                         continue;
1131
1132                 if (nf_ct_is_expired(tmp)) {
1133                         nf_ct_gc_expired(tmp);
1134                         continue;
1135                 }
1136
1137                 if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
1138                     !net_eq(nf_ct_net(tmp), net) ||
1139                     nf_ct_is_dying(tmp))
1140                         continue;
1141
1142                 if (!atomic_inc_not_zero(&tmp->ct_general.use))
1143                         continue;
1144
1145                 /* kill only if still in same netns -- might have moved due to
1146                  * SLAB_TYPESAFE_BY_RCU rules.
1147                  *
1148                  * We steal the timer reference.  If that fails timer has
1149                  * already fired or someone else deleted it. Just drop ref
1150                  * and move to next entry.
1151                  */
1152                 if (net_eq(nf_ct_net(tmp), net) &&
1153                     nf_ct_is_confirmed(tmp) &&
1154                     nf_ct_delete(tmp, 0, 0))
1155                         drops++;
1156
1157                 nf_ct_put(tmp);
1158         }
1159
1160         return drops;
1161 }
1162
1163 static noinline int early_drop(struct net *net, unsigned int hash)
1164 {
1165         unsigned int i, bucket;
1166
1167         for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
1168                 struct hlist_nulls_head *ct_hash;
1169                 unsigned int hsize, drops;
1170
1171                 rcu_read_lock();
1172                 nf_conntrack_get_ht(&ct_hash, &hsize);
1173                 if (!i)
1174                         bucket = reciprocal_scale(hash, hsize);
1175                 else
1176                         bucket = (bucket + 1) % hsize;
1177
1178                 drops = early_drop_list(net, &ct_hash[bucket]);
1179                 rcu_read_unlock();
1180
1181                 if (drops) {
1182                         NF_CT_STAT_ADD_ATOMIC(net, early_drop, drops);
1183                         return true;
1184                 }
1185         }
1186
1187         return false;
1188 }
1189
1190 static bool gc_worker_skip_ct(const struct nf_conn *ct)
1191 {
1192         return !nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct);
1193 }
1194
1195 static bool gc_worker_can_early_drop(const struct nf_conn *ct)
1196 {
1197         const struct nf_conntrack_l4proto *l4proto;
1198
1199         if (!test_bit(IPS_ASSURED_BIT, &ct->status))
1200                 return true;
1201
1202         l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
1203         if (l4proto->can_early_drop && l4proto->can_early_drop(ct))
1204                 return true;
1205
1206         return false;
1207 }
1208
1209 #define DAY     (86400 * HZ)
1210
1211 /* Set an arbitrary timeout large enough not to ever expire, this save
1212  * us a check for the IPS_OFFLOAD_BIT from the packet path via
1213  * nf_ct_is_expired().
1214  */
1215 static void nf_ct_offload_timeout(struct nf_conn *ct)
1216 {
1217         if (nf_ct_expires(ct) < DAY / 2)
1218                 ct->timeout = nfct_time_stamp + DAY;
1219 }
1220
1221 static void gc_worker(struct work_struct *work)
1222 {
1223         unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
1224         unsigned int i, goal, buckets = 0, expired_count = 0;
1225         unsigned int nf_conntrack_max95 = 0;
1226         struct conntrack_gc_work *gc_work;
1227         unsigned int ratio, scanned = 0;
1228         unsigned long next_run;
1229
1230         gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
1231
1232         goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV;
1233         i = gc_work->last_bucket;
1234         if (gc_work->early_drop)
1235                 nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
1236
1237         do {
1238                 struct nf_conntrack_tuple_hash *h;
1239                 struct hlist_nulls_head *ct_hash;
1240                 struct hlist_nulls_node *n;
1241                 unsigned int hashsz;
1242                 struct nf_conn *tmp;
1243
1244                 i++;
1245                 rcu_read_lock();
1246
1247                 nf_conntrack_get_ht(&ct_hash, &hashsz);
1248                 if (i >= hashsz)
1249                         i = 0;
1250
1251                 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
1252                         struct net *net;
1253
1254                         tmp = nf_ct_tuplehash_to_ctrack(h);
1255
1256                         scanned++;
1257                         if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
1258                                 nf_ct_offload_timeout(tmp);
1259                                 continue;
1260                         }
1261
1262                         if (nf_ct_is_expired(tmp)) {
1263                                 nf_ct_gc_expired(tmp);
1264                                 expired_count++;
1265                                 continue;
1266                         }
1267
1268                         if (nf_conntrack_max95 == 0 || gc_worker_skip_ct(tmp))
1269                                 continue;
1270
1271                         net = nf_ct_net(tmp);
1272                         if (atomic_read(&net->ct.count) < nf_conntrack_max95)
1273                                 continue;
1274
1275                         /* need to take reference to avoid possible races */
1276                         if (!atomic_inc_not_zero(&tmp->ct_general.use))
1277                                 continue;
1278
1279                         if (gc_worker_skip_ct(tmp)) {
1280                                 nf_ct_put(tmp);
1281                                 continue;
1282                         }
1283
1284                         if (gc_worker_can_early_drop(tmp))
1285                                 nf_ct_kill(tmp);
1286
1287                         nf_ct_put(tmp);
1288                 }
1289
1290                 /* could check get_nulls_value() here and restart if ct
1291                  * was moved to another chain.  But given gc is best-effort
1292                  * we will just continue with next hash slot.
1293                  */
1294                 rcu_read_unlock();
1295                 cond_resched();
1296         } while (++buckets < goal);
1297
1298         if (gc_work->exiting)
1299                 return;
1300
1301         /*
1302          * Eviction will normally happen from the packet path, and not
1303          * from this gc worker.
1304          *
1305          * This worker is only here to reap expired entries when system went
1306          * idle after a busy period.
1307          *
1308          * The heuristics below are supposed to balance conflicting goals:
1309          *
1310          * 1. Minimize time until we notice a stale entry
1311          * 2. Maximize scan intervals to not waste cycles
1312          *
1313          * Normally, expire ratio will be close to 0.
1314          *
1315          * As soon as a sizeable fraction of the entries have expired
1316          * increase scan frequency.
1317          */
1318         ratio = scanned ? expired_count * 100 / scanned : 0;
1319         if (ratio > GC_EVICT_RATIO) {
1320                 gc_work->next_gc_run = min_interval;
1321         } else {
1322                 unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
1323
1324                 BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
1325
1326                 gc_work->next_gc_run += min_interval;
1327                 if (gc_work->next_gc_run > max)
1328                         gc_work->next_gc_run = max;
1329         }
1330
1331         next_run = gc_work->next_gc_run;
1332         gc_work->last_bucket = i;
1333         gc_work->early_drop = false;
1334         queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
1335 }
1336
1337 static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
1338 {
1339         INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker);
1340         gc_work->next_gc_run = HZ;
1341         gc_work->exiting = false;
1342 }
1343
1344 static struct nf_conn *
1345 __nf_conntrack_alloc(struct net *net,
1346                      const struct nf_conntrack_zone *zone,
1347                      const struct nf_conntrack_tuple *orig,
1348                      const struct nf_conntrack_tuple *repl,
1349                      gfp_t gfp, u32 hash)
1350 {
1351         struct nf_conn *ct;
1352
1353         /* We don't want any race condition at early drop stage */
1354         atomic_inc(&net->ct.count);
1355
1356         if (nf_conntrack_max &&
1357             unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
1358                 if (!early_drop(net, hash)) {
1359                         if (!conntrack_gc_work.early_drop)
1360                                 conntrack_gc_work.early_drop = true;
1361                         atomic_dec(&net->ct.count);
1362                         net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
1363                         return ERR_PTR(-ENOMEM);
1364                 }
1365         }
1366
1367         /*
1368          * Do not use kmem_cache_zalloc(), as this cache uses
1369          * SLAB_TYPESAFE_BY_RCU.
1370          */
1371         ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
1372         if (ct == NULL)
1373                 goto out;
1374
1375         spin_lock_init(&ct->lock);
1376         ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
1377         ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
1378         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
1379         /* save hash for reusing when confirming */
1380         *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
1381         ct->status = 0;
1382         ct->timeout = 0;
1383         write_pnet(&ct->ct_net, net);
1384         memset(&ct->__nfct_init_offset[0], 0,
1385                offsetof(struct nf_conn, proto) -
1386                offsetof(struct nf_conn, __nfct_init_offset[0]));
1387
1388         nf_ct_zone_add(ct, zone);
1389
1390         /* Because we use RCU lookups, we set ct_general.use to zero before
1391          * this is inserted in any list.
1392          */
1393         atomic_set(&ct->ct_general.use, 0);
1394         return ct;
1395 out:
1396         atomic_dec(&net->ct.count);
1397         return ERR_PTR(-ENOMEM);
1398 }
1399
1400 struct nf_conn *nf_conntrack_alloc(struct net *net,
1401                                    const struct nf_conntrack_zone *zone,
1402                                    const struct nf_conntrack_tuple *orig,
1403                                    const struct nf_conntrack_tuple *repl,
1404                                    gfp_t gfp)
1405 {
1406         return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
1407 }
1408 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
1409
1410 void nf_conntrack_free(struct nf_conn *ct)
1411 {
1412         struct net *net = nf_ct_net(ct);
1413
1414         /* A freed object has refcnt == 0, that's
1415          * the golden rule for SLAB_TYPESAFE_BY_RCU
1416          */
1417         WARN_ON(atomic_read(&ct->ct_general.use) != 0);
1418
1419         nf_ct_ext_destroy(ct);
1420         nf_ct_ext_free(ct);
1421         kmem_cache_free(nf_conntrack_cachep, ct);
1422         smp_mb__before_atomic();
1423         atomic_dec(&net->ct.count);
1424 }
1425 EXPORT_SYMBOL_GPL(nf_conntrack_free);
1426
1427
1428 /* Allocate a new conntrack: we return -ENOMEM if classification
1429    failed due to stress.  Otherwise it really is unclassifiable. */
1430 static noinline struct nf_conntrack_tuple_hash *
1431 init_conntrack(struct net *net, struct nf_conn *tmpl,
1432                const struct nf_conntrack_tuple *tuple,
1433                struct sk_buff *skb,
1434                unsigned int dataoff, u32 hash)
1435 {
1436         struct nf_conn *ct;
1437         struct nf_conn_help *help;
1438         struct nf_conntrack_tuple repl_tuple;
1439         struct nf_conntrack_ecache *ecache;
1440         struct nf_conntrack_expect *exp = NULL;
1441         const struct nf_conntrack_zone *zone;
1442         struct nf_conn_timeout *timeout_ext;
1443         struct nf_conntrack_zone tmp;
1444
1445         if (!nf_ct_invert_tuple(&repl_tuple, tuple)) {
1446                 pr_debug("Can't invert tuple.\n");
1447                 return NULL;
1448         }
1449
1450         zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1451         ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
1452                                   hash);
1453         if (IS_ERR(ct))
1454                 return (struct nf_conntrack_tuple_hash *)ct;
1455
1456         if (!nf_ct_add_synproxy(ct, tmpl)) {
1457                 nf_conntrack_free(ct);
1458                 return ERR_PTR(-ENOMEM);
1459         }
1460
1461         timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
1462
1463         if (timeout_ext)
1464                 nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
1465                                       GFP_ATOMIC);
1466
1467         nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1468         nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
1469         nf_ct_labels_ext_add(ct);
1470
1471         ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
1472         nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
1473                                  ecache ? ecache->expmask : 0,
1474                              GFP_ATOMIC);
1475
1476         local_bh_disable();
1477         if (net->ct.expect_count) {
1478                 spin_lock(&nf_conntrack_expect_lock);
1479                 exp = nf_ct_find_expectation(net, zone, tuple);
1480                 if (exp) {
1481                         pr_debug("expectation arrives ct=%p exp=%p\n",
1482                                  ct, exp);
1483                         /* Welcome, Mr. Bond.  We've been expecting you... */
1484                         __set_bit(IPS_EXPECTED_BIT, &ct->status);
1485                         /* exp->master safe, refcnt bumped in nf_ct_find_expectation */
1486                         ct->master = exp->master;
1487                         if (exp->helper) {
1488                                 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
1489                                 if (help)
1490                                         rcu_assign_pointer(help->helper, exp->helper);
1491                         }
1492
1493 #ifdef CONFIG_NF_CONNTRACK_MARK
1494                         ct->mark = exp->master->mark;
1495 #endif
1496 #ifdef CONFIG_NF_CONNTRACK_SECMARK
1497                         ct->secmark = exp->master->secmark;
1498 #endif
1499                         NF_CT_STAT_INC(net, expect_new);
1500                 }
1501                 spin_unlock(&nf_conntrack_expect_lock);
1502         }
1503         if (!exp)
1504                 __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
1505
1506         /* Now it is inserted into the unconfirmed list, bump refcount */
1507         nf_conntrack_get(&ct->ct_general);
1508         nf_ct_add_to_unconfirmed_list(ct);
1509
1510         local_bh_enable();
1511
1512         if (exp) {
1513                 if (exp->expectfn)
1514                         exp->expectfn(ct, exp);
1515                 nf_ct_expect_put(exp);
1516         }
1517
1518         return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
1519 }
1520
1521 /* On success, returns 0, sets skb->_nfct | ctinfo */
1522 static int
1523 resolve_normal_ct(struct nf_conn *tmpl,
1524                   struct sk_buff *skb,
1525                   unsigned int dataoff,
1526                   u_int8_t protonum,
1527                   const struct nf_hook_state *state)
1528 {
1529         const struct nf_conntrack_zone *zone;
1530         struct nf_conntrack_tuple tuple;
1531         struct nf_conntrack_tuple_hash *h;
1532         enum ip_conntrack_info ctinfo;
1533         struct nf_conntrack_zone tmp;
1534         struct nf_conn *ct;
1535         u32 hash;
1536
1537         if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
1538                              dataoff, state->pf, protonum, state->net,
1539                              &tuple)) {
1540                 pr_debug("Can't get tuple\n");
1541                 return 0;
1542         }
1543
1544         /* look for tuple match */
1545         zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1546         hash = hash_conntrack_raw(&tuple, state->net);
1547         h = __nf_conntrack_find_get(state->net, zone, &tuple, hash);
1548         if (!h) {
1549                 h = init_conntrack(state->net, tmpl, &tuple,
1550                                    skb, dataoff, hash);
1551                 if (!h)
1552                         return 0;
1553                 if (IS_ERR(h))
1554                         return PTR_ERR(h);
1555         }
1556         ct = nf_ct_tuplehash_to_ctrack(h);
1557
1558         /* It exists; we have (non-exclusive) reference. */
1559         if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
1560                 ctinfo = IP_CT_ESTABLISHED_REPLY;
1561         } else {
1562                 /* Once we've had two way comms, always ESTABLISHED. */
1563                 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1564                         pr_debug("normal packet for %p\n", ct);
1565                         ctinfo = IP_CT_ESTABLISHED;
1566                 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
1567                         pr_debug("related packet for %p\n", ct);
1568                         ctinfo = IP_CT_RELATED;
1569                 } else {
1570                         pr_debug("new packet for %p\n", ct);
1571                         ctinfo = IP_CT_NEW;
1572                 }
1573         }
1574         nf_ct_set(skb, ct, ctinfo);
1575         return 0;
1576 }
1577
1578 /*
1579  * icmp packets need special treatment to handle error messages that are
1580  * related to a connection.
1581  *
1582  * Callers need to check if skb has a conntrack assigned when this
1583  * helper returns; in such case skb belongs to an already known connection.
1584  */
1585 static unsigned int __cold
1586 nf_conntrack_handle_icmp(struct nf_conn *tmpl,
1587                          struct sk_buff *skb,
1588                          unsigned int dataoff,
1589                          u8 protonum,
1590                          const struct nf_hook_state *state)
1591 {
1592         int ret;
1593
1594         if (state->pf == NFPROTO_IPV4 && protonum == IPPROTO_ICMP)
1595                 ret = nf_conntrack_icmpv4_error(tmpl, skb, dataoff, state);
1596 #if IS_ENABLED(CONFIG_IPV6)
1597         else if (state->pf == NFPROTO_IPV6 && protonum == IPPROTO_ICMPV6)
1598                 ret = nf_conntrack_icmpv6_error(tmpl, skb, dataoff, state);
1599 #endif
1600         else
1601                 return NF_ACCEPT;
1602
1603         if (ret <= 0) {
1604                 NF_CT_STAT_INC_ATOMIC(state->net, error);
1605                 NF_CT_STAT_INC_ATOMIC(state->net, invalid);
1606         }
1607
1608         return ret;
1609 }
1610
1611 static int generic_packet(struct nf_conn *ct, struct sk_buff *skb,
1612                           enum ip_conntrack_info ctinfo)
1613 {
1614         const unsigned int *timeout = nf_ct_timeout_lookup(ct);
1615
1616         if (!timeout)
1617                 timeout = &nf_generic_pernet(nf_ct_net(ct))->timeout;
1618
1619         nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
1620         return NF_ACCEPT;
1621 }
1622
1623 /* Returns verdict for packet, or -1 for invalid. */
1624 static int nf_conntrack_handle_packet(struct nf_conn *ct,
1625                                       struct sk_buff *skb,
1626                                       unsigned int dataoff,
1627                                       enum ip_conntrack_info ctinfo,
1628                                       const struct nf_hook_state *state)
1629 {
1630         switch (nf_ct_protonum(ct)) {
1631         case IPPROTO_TCP:
1632                 return nf_conntrack_tcp_packet(ct, skb, dataoff,
1633                                                ctinfo, state);
1634         case IPPROTO_UDP:
1635                 return nf_conntrack_udp_packet(ct, skb, dataoff,
1636                                                ctinfo, state);
1637         case IPPROTO_ICMP:
1638                 return nf_conntrack_icmp_packet(ct, skb, ctinfo, state);
1639 #if IS_ENABLED(CONFIG_IPV6)
1640         case IPPROTO_ICMPV6:
1641                 return nf_conntrack_icmpv6_packet(ct, skb, ctinfo, state);
1642 #endif
1643 #ifdef CONFIG_NF_CT_PROTO_UDPLITE
1644         case IPPROTO_UDPLITE:
1645                 return nf_conntrack_udplite_packet(ct, skb, dataoff,
1646                                                    ctinfo, state);
1647 #endif
1648 #ifdef CONFIG_NF_CT_PROTO_SCTP
1649         case IPPROTO_SCTP:
1650                 return nf_conntrack_sctp_packet(ct, skb, dataoff,
1651                                                 ctinfo, state);
1652 #endif
1653 #ifdef CONFIG_NF_CT_PROTO_DCCP
1654         case IPPROTO_DCCP:
1655                 return nf_conntrack_dccp_packet(ct, skb, dataoff,
1656                                                 ctinfo, state);
1657 #endif
1658 #ifdef CONFIG_NF_CT_PROTO_GRE
1659         case IPPROTO_GRE:
1660                 return nf_conntrack_gre_packet(ct, skb, dataoff,
1661                                                ctinfo, state);
1662 #endif
1663         }
1664
1665         return generic_packet(ct, skb, ctinfo);
1666 }
1667
1668 unsigned int
1669 nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
1670 {
1671         enum ip_conntrack_info ctinfo;
1672         struct nf_conn *ct, *tmpl;
1673         u_int8_t protonum;
1674         int dataoff, ret;
1675
1676         tmpl = nf_ct_get(skb, &ctinfo);
1677         if (tmpl || ctinfo == IP_CT_UNTRACKED) {
1678                 /* Previously seen (loopback or untracked)?  Ignore. */
1679                 if ((tmpl && !nf_ct_is_template(tmpl)) ||
1680                      ctinfo == IP_CT_UNTRACKED) {
1681                         NF_CT_STAT_INC_ATOMIC(state->net, ignore);
1682                         return NF_ACCEPT;
1683                 }
1684                 skb->_nfct = 0;
1685         }
1686
1687         /* rcu_read_lock()ed by nf_hook_thresh */
1688         dataoff = get_l4proto(skb, skb_network_offset(skb), state->pf, &protonum);
1689         if (dataoff <= 0) {
1690                 pr_debug("not prepared to track yet or error occurred\n");
1691                 NF_CT_STAT_INC_ATOMIC(state->net, error);
1692                 NF_CT_STAT_INC_ATOMIC(state->net, invalid);
1693                 ret = NF_ACCEPT;
1694                 goto out;
1695         }
1696
1697         if (protonum == IPPROTO_ICMP || protonum == IPPROTO_ICMPV6) {
1698                 ret = nf_conntrack_handle_icmp(tmpl, skb, dataoff,
1699                                                protonum, state);
1700                 if (ret <= 0) {
1701                         ret = -ret;
1702                         goto out;
1703                 }
1704                 /* ICMP[v6] protocol trackers may assign one conntrack. */
1705                 if (skb->_nfct)
1706                         goto out;
1707         }
1708 repeat:
1709         ret = resolve_normal_ct(tmpl, skb, dataoff,
1710                                 protonum, state);
1711         if (ret < 0) {
1712                 /* Too stressed to deal. */
1713                 NF_CT_STAT_INC_ATOMIC(state->net, drop);
1714                 ret = NF_DROP;
1715                 goto out;
1716         }
1717
1718         ct = nf_ct_get(skb, &ctinfo);
1719         if (!ct) {
1720                 /* Not valid part of a connection */
1721                 NF_CT_STAT_INC_ATOMIC(state->net, invalid);
1722                 ret = NF_ACCEPT;
1723                 goto out;
1724         }
1725
1726         ret = nf_conntrack_handle_packet(ct, skb, dataoff, ctinfo, state);
1727         if (ret <= 0) {
1728                 /* Invalid: inverse of the return code tells
1729                  * the netfilter core what to do */
1730                 pr_debug("nf_conntrack_in: Can't track with proto module\n");
1731                 nf_conntrack_put(&ct->ct_general);
1732                 skb->_nfct = 0;
1733                 NF_CT_STAT_INC_ATOMIC(state->net, invalid);
1734                 if (ret == -NF_DROP)
1735                         NF_CT_STAT_INC_ATOMIC(state->net, drop);
1736                 /* Special case: TCP tracker reports an attempt to reopen a
1737                  * closed/aborted connection. We have to go back and create a
1738                  * fresh conntrack.
1739                  */
1740                 if (ret == -NF_REPEAT)
1741                         goto repeat;
1742                 ret = -ret;
1743                 goto out;
1744         }
1745
1746         if (ctinfo == IP_CT_ESTABLISHED_REPLY &&
1747             !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
1748                 nf_conntrack_event_cache(IPCT_REPLY, ct);
1749 out:
1750         if (tmpl)
1751                 nf_ct_put(tmpl);
1752
1753         return ret;
1754 }
1755 EXPORT_SYMBOL_GPL(nf_conntrack_in);
1756
1757 /* Alter reply tuple (maybe alter helper).  This is for NAT, and is
1758    implicitly racy: see __nf_conntrack_confirm */
1759 void nf_conntrack_alter_reply(struct nf_conn *ct,
1760                               const struct nf_conntrack_tuple *newreply)
1761 {
1762         struct nf_conn_help *help = nfct_help(ct);
1763
1764         /* Should be unconfirmed, so not in hash table yet */
1765         WARN_ON(nf_ct_is_confirmed(ct));
1766
1767         pr_debug("Altering reply tuple of %p to ", ct);
1768         nf_ct_dump_tuple(newreply);
1769
1770         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
1771         if (ct->master || (help && !hlist_empty(&help->expectations)))
1772                 return;
1773
1774         rcu_read_lock();
1775         __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1776         rcu_read_unlock();
1777 }
1778 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
1779
1780 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
1781 void __nf_ct_refresh_acct(struct nf_conn *ct,
1782                           enum ip_conntrack_info ctinfo,
1783                           const struct sk_buff *skb,
1784                           u32 extra_jiffies,
1785                           bool do_acct)
1786 {
1787         /* Only update if this is not a fixed timeout */
1788         if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
1789                 goto acct;
1790
1791         /* If not in hash table, timer will not be active yet */
1792         if (nf_ct_is_confirmed(ct))
1793                 extra_jiffies += nfct_time_stamp;
1794
1795         if (ct->timeout != extra_jiffies)
1796                 ct->timeout = extra_jiffies;
1797 acct:
1798         if (do_acct)
1799                 nf_ct_acct_update(ct, ctinfo, skb->len);
1800 }
1801 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
1802
1803 bool nf_ct_kill_acct(struct nf_conn *ct,
1804                      enum ip_conntrack_info ctinfo,
1805                      const struct sk_buff *skb)
1806 {
1807         nf_ct_acct_update(ct, ctinfo, skb->len);
1808
1809         return nf_ct_delete(ct, 0, 0);
1810 }
1811 EXPORT_SYMBOL_GPL(nf_ct_kill_acct);
1812
1813 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1814
1815 #include <linux/netfilter/nfnetlink.h>
1816 #include <linux/netfilter/nfnetlink_conntrack.h>
1817 #include <linux/mutex.h>
1818
1819 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
1820  * in ip_conntrack_core, since we don't want the protocols to autoload
1821  * or depend on ctnetlink */
1822 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
1823                                const struct nf_conntrack_tuple *tuple)
1824 {
1825         if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
1826             nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
1827                 goto nla_put_failure;
1828         return 0;
1829
1830 nla_put_failure:
1831         return -1;
1832 }
1833 EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
1834
1835 const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
1836         [CTA_PROTO_SRC_PORT]  = { .type = NLA_U16 },
1837         [CTA_PROTO_DST_PORT]  = { .type = NLA_U16 },
1838 };
1839 EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
1840
1841 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
1842                                struct nf_conntrack_tuple *t)
1843 {
1844         if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
1845                 return -EINVAL;
1846
1847         t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
1848         t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
1849
1850         return 0;
1851 }
1852 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
1853
1854 unsigned int nf_ct_port_nlattr_tuple_size(void)
1855 {
1856         static unsigned int size __read_mostly;
1857
1858         if (!size)
1859                 size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1860
1861         return size;
1862 }
1863 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
1864 #endif
1865
1866 /* Used by ipt_REJECT and ip6t_REJECT. */
1867 static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
1868 {
1869         struct nf_conn *ct;
1870         enum ip_conntrack_info ctinfo;
1871
1872         /* This ICMP is in reverse direction to the packet which caused it */
1873         ct = nf_ct_get(skb, &ctinfo);
1874         if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1875                 ctinfo = IP_CT_RELATED_REPLY;
1876         else
1877                 ctinfo = IP_CT_RELATED;
1878
1879         /* Attach to new skbuff, and increment count */
1880         nf_ct_set(nskb, ct, ctinfo);
1881         nf_conntrack_get(skb_nfct(nskb));
1882 }
1883
1884 static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
1885 {
1886         struct nf_conntrack_tuple_hash *h;
1887         struct nf_conntrack_tuple tuple;
1888         enum ip_conntrack_info ctinfo;
1889         struct nf_nat_hook *nat_hook;
1890         unsigned int status;
1891         struct nf_conn *ct;
1892         int dataoff;
1893         u16 l3num;
1894         u8 l4num;
1895
1896         ct = nf_ct_get(skb, &ctinfo);
1897         if (!ct || nf_ct_is_confirmed(ct))
1898                 return 0;
1899
1900         l3num = nf_ct_l3num(ct);
1901
1902         dataoff = get_l4proto(skb, skb_network_offset(skb), l3num, &l4num);
1903         if (dataoff <= 0)
1904                 return -1;
1905
1906         if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num,
1907                              l4num, net, &tuple))
1908                 return -1;
1909
1910         if (ct->status & IPS_SRC_NAT) {
1911                 memcpy(tuple.src.u3.all,
1912                        ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.all,
1913                        sizeof(tuple.src.u3.all));
1914                 tuple.src.u.all =
1915                         ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all;
1916         }
1917
1918         if (ct->status & IPS_DST_NAT) {
1919                 memcpy(tuple.dst.u3.all,
1920                        ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.all,
1921                        sizeof(tuple.dst.u3.all));
1922                 tuple.dst.u.all =
1923                         ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all;
1924         }
1925
1926         h = nf_conntrack_find_get(net, nf_ct_zone(ct), &tuple);
1927         if (!h)
1928                 return 0;
1929
1930         /* Store status bits of the conntrack that is clashing to re-do NAT
1931          * mangling according to what it has been done already to this packet.
1932          */
1933         status = ct->status;
1934
1935         nf_ct_put(ct);
1936         ct = nf_ct_tuplehash_to_ctrack(h);
1937         nf_ct_set(skb, ct, ctinfo);
1938
1939         nat_hook = rcu_dereference(nf_nat_hook);
1940         if (!nat_hook)
1941                 return 0;
1942
1943         if (status & IPS_SRC_NAT &&
1944             nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_SRC,
1945                                 IP_CT_DIR_ORIGINAL) == NF_DROP)
1946                 return -1;
1947
1948         if (status & IPS_DST_NAT &&
1949             nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_DST,
1950                                 IP_CT_DIR_ORIGINAL) == NF_DROP)
1951                 return -1;
1952
1953         return 0;
1954 }
1955
1956 static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
1957                                        const struct sk_buff *skb)
1958 {
1959         const struct nf_conntrack_tuple *src_tuple;
1960         const struct nf_conntrack_tuple_hash *hash;
1961         struct nf_conntrack_tuple srctuple;
1962         enum ip_conntrack_info ctinfo;
1963         struct nf_conn *ct;
1964
1965         ct = nf_ct_get(skb, &ctinfo);
1966         if (ct) {
1967                 src_tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
1968                 memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
1969                 return true;
1970         }
1971
1972         if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
1973                                NFPROTO_IPV4, dev_net(skb->dev),
1974                                &srctuple))
1975                 return false;
1976
1977         hash = nf_conntrack_find_get(dev_net(skb->dev),
1978                                      &nf_ct_zone_dflt,
1979                                      &srctuple);
1980         if (!hash)
1981                 return false;
1982
1983         ct = nf_ct_tuplehash_to_ctrack(hash);
1984         src_tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
1985         memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
1986         nf_ct_put(ct);
1987
1988         return true;
1989 }
1990
1991 /* Bring out ya dead! */
1992 static struct nf_conn *
1993 get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
1994                 void *data, unsigned int *bucket)
1995 {
1996         struct nf_conntrack_tuple_hash *h;
1997         struct nf_conn *ct;
1998         struct hlist_nulls_node *n;
1999         spinlock_t *lockp;
2000
2001         for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
2002                 lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
2003                 local_bh_disable();
2004                 nf_conntrack_lock(lockp);
2005                 if (*bucket < nf_conntrack_htable_size) {
2006                         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {
2007                                 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
2008                                         continue;
2009                                 ct = nf_ct_tuplehash_to_ctrack(h);
2010                                 if (iter(ct, data))
2011                                         goto found;
2012                         }
2013                 }
2014                 spin_unlock(lockp);
2015                 local_bh_enable();
2016                 cond_resched();
2017         }
2018
2019         return NULL;
2020 found:
2021         atomic_inc(&ct->ct_general.use);
2022         spin_unlock(lockp);
2023         local_bh_enable();
2024         return ct;
2025 }
2026
2027 static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
2028                                   void *data, u32 portid, int report)
2029 {
2030         unsigned int bucket = 0, sequence;
2031         struct nf_conn *ct;
2032
2033         might_sleep();
2034
2035         for (;;) {
2036                 sequence = read_seqcount_begin(&nf_conntrack_generation);
2037
2038                 while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
2039                         /* Time to push up daises... */
2040
2041                         nf_ct_delete(ct, portid, report);
2042                         nf_ct_put(ct);
2043                         cond_resched();
2044                 }
2045
2046                 if (!read_seqcount_retry(&nf_conntrack_generation, sequence))
2047                         break;
2048                 bucket = 0;
2049         }
2050 }
2051
2052 struct iter_data {
2053         int (*iter)(struct nf_conn *i, void *data);
2054         void *data;
2055         struct net *net;
2056 };
2057
2058 static int iter_net_only(struct nf_conn *i, void *data)
2059 {
2060         struct iter_data *d = data;
2061
2062         if (!net_eq(d->net, nf_ct_net(i)))
2063                 return 0;
2064
2065         return d->iter(i, d->data);
2066 }
2067
2068 static void
2069 __nf_ct_unconfirmed_destroy(struct net *net)
2070 {
2071         int cpu;
2072
2073         for_each_possible_cpu(cpu) {
2074                 struct nf_conntrack_tuple_hash *h;
2075                 struct hlist_nulls_node *n;
2076                 struct ct_pcpu *pcpu;
2077
2078                 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
2079
2080                 spin_lock_bh(&pcpu->lock);
2081                 hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
2082                         struct nf_conn *ct;
2083
2084                         ct = nf_ct_tuplehash_to_ctrack(h);
2085
2086                         /* we cannot call iter() on unconfirmed list, the
2087                          * owning cpu can reallocate ct->ext at any time.
2088                          */
2089                         set_bit(IPS_DYING_BIT, &ct->status);
2090                 }
2091                 spin_unlock_bh(&pcpu->lock);
2092                 cond_resched();
2093         }
2094 }
2095
2096 void nf_ct_unconfirmed_destroy(struct net *net)
2097 {
2098         might_sleep();
2099
2100         if (atomic_read(&net->ct.count) > 0) {
2101                 __nf_ct_unconfirmed_destroy(net);
2102                 nf_queue_nf_hook_drop(net);
2103                 synchronize_net();
2104         }
2105 }
2106 EXPORT_SYMBOL_GPL(nf_ct_unconfirmed_destroy);
2107
2108 void nf_ct_iterate_cleanup_net(struct net *net,
2109                                int (*iter)(struct nf_conn *i, void *data),
2110                                void *data, u32 portid, int report)
2111 {
2112         struct iter_data d;
2113
2114         might_sleep();
2115
2116         if (atomic_read(&net->ct.count) == 0)
2117                 return;
2118
2119         d.iter = iter;
2120         d.data = data;
2121         d.net = net;
2122
2123         nf_ct_iterate_cleanup(iter_net_only, &d, portid, report);
2124 }
2125 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net);
2126
2127 /**
2128  * nf_ct_iterate_destroy - destroy unconfirmed conntracks and iterate table
2129  * @iter: callback to invoke for each conntrack
2130  * @data: data to pass to @iter
2131  *
2132  * Like nf_ct_iterate_cleanup, but first marks conntracks on the
2133  * unconfirmed list as dying (so they will not be inserted into
2134  * main table).
2135  *
2136  * Can only be called in module exit path.
2137  */
2138 void
2139 nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data)
2140 {
2141         struct net *net;
2142
2143         down_read(&net_rwsem);
2144         for_each_net(net) {
2145                 if (atomic_read(&net->ct.count) == 0)
2146                         continue;
2147                 __nf_ct_unconfirmed_destroy(net);
2148                 nf_queue_nf_hook_drop(net);
2149         }
2150         up_read(&net_rwsem);
2151
2152         /* Need to wait for netns cleanup worker to finish, if its
2153          * running -- it might have deleted a net namespace from
2154          * the global list, so our __nf_ct_unconfirmed_destroy() might
2155          * not have affected all namespaces.
2156          */
2157         net_ns_barrier();
2158
2159         /* a conntrack could have been unlinked from unconfirmed list
2160          * before we grabbed pcpu lock in __nf_ct_unconfirmed_destroy().
2161          * This makes sure its inserted into conntrack table.
2162          */
2163         synchronize_net();
2164
2165         nf_ct_iterate_cleanup(iter, data, 0, 0);
2166 }
2167 EXPORT_SYMBOL_GPL(nf_ct_iterate_destroy);
2168
2169 static int kill_all(struct nf_conn *i, void *data)
2170 {
2171         return net_eq(nf_ct_net(i), data);
2172 }
2173
2174 void nf_conntrack_cleanup_start(void)
2175 {
2176         conntrack_gc_work.exiting = true;
2177         RCU_INIT_POINTER(ip_ct_attach, NULL);
2178 }
2179
2180 void nf_conntrack_cleanup_end(void)
2181 {
2182         RCU_INIT_POINTER(nf_ct_hook, NULL);
2183         cancel_delayed_work_sync(&conntrack_gc_work.dwork);
2184         kvfree(nf_conntrack_hash);
2185
2186         nf_conntrack_proto_fini();
2187         nf_conntrack_seqadj_fini();
2188         nf_conntrack_labels_fini();
2189         nf_conntrack_helper_fini();
2190         nf_conntrack_timeout_fini();
2191         nf_conntrack_ecache_fini();
2192         nf_conntrack_tstamp_fini();
2193         nf_conntrack_acct_fini();
2194         nf_conntrack_expect_fini();
2195
2196         kmem_cache_destroy(nf_conntrack_cachep);
2197 }
2198
2199 /*
2200  * Mishearing the voices in his head, our hero wonders how he's
2201  * supposed to kill the mall.
2202  */
2203 void nf_conntrack_cleanup_net(struct net *net)
2204 {
2205         LIST_HEAD(single);
2206
2207         list_add(&net->exit_list, &single);
2208         nf_conntrack_cleanup_net_list(&single);
2209 }
2210
2211 void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
2212 {
2213         int busy;
2214         struct net *net;
2215
2216         /*
2217          * This makes sure all current packets have passed through
2218          *  netfilter framework.  Roll on, two-stage module
2219          *  delete...
2220          */
2221         synchronize_net();
2222 i_see_dead_people:
2223         busy = 0;
2224         list_for_each_entry(net, net_exit_list, exit_list) {
2225                 nf_ct_iterate_cleanup(kill_all, net, 0, 0);
2226                 if (atomic_read(&net->ct.count) != 0)
2227                         busy = 1;
2228         }
2229         if (busy) {
2230                 schedule();
2231                 goto i_see_dead_people;
2232         }
2233
2234         list_for_each_entry(net, net_exit_list, exit_list) {
2235                 nf_conntrack_proto_pernet_fini(net);
2236                 nf_conntrack_ecache_pernet_fini(net);
2237                 nf_conntrack_expect_pernet_fini(net);
2238                 free_percpu(net->ct.stat);
2239                 free_percpu(net->ct.pcpu_lists);
2240         }
2241 }
2242
2243 void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
2244 {
2245         struct hlist_nulls_head *hash;
2246         unsigned int nr_slots, i;
2247
2248         if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
2249                 return NULL;
2250
2251         BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
2252         nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
2253
2254         hash = kvmalloc_array(nr_slots, sizeof(struct hlist_nulls_head),
2255                               GFP_KERNEL | __GFP_ZERO);
2256
2257         if (hash && nulls)
2258                 for (i = 0; i < nr_slots; i++)
2259                         INIT_HLIST_NULLS_HEAD(&hash[i], i);
2260
2261         return hash;
2262 }
2263 EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
2264
2265 int nf_conntrack_hash_resize(unsigned int hashsize)
2266 {
2267         int i, bucket;
2268         unsigned int old_size;
2269         struct hlist_nulls_head *hash, *old_hash;
2270         struct nf_conntrack_tuple_hash *h;
2271         struct nf_conn *ct;
2272
2273         if (!hashsize)
2274                 return -EINVAL;
2275
2276         hash = nf_ct_alloc_hashtable(&hashsize, 1);
2277         if (!hash)
2278                 return -ENOMEM;
2279
2280         old_size = nf_conntrack_htable_size;
2281         if (old_size == hashsize) {
2282                 kvfree(hash);
2283                 return 0;
2284         }
2285
2286         local_bh_disable();
2287         nf_conntrack_all_lock();
2288         write_seqcount_begin(&nf_conntrack_generation);
2289
2290         /* Lookups in the old hash might happen in parallel, which means we
2291          * might get false negatives during connection lookup. New connections
2292          * created because of a false negative won't make it into the hash
2293          * though since that required taking the locks.
2294          */
2295
2296         for (i = 0; i < nf_conntrack_htable_size; i++) {
2297                 while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {
2298                         h = hlist_nulls_entry(nf_conntrack_hash[i].first,
2299                                               struct nf_conntrack_tuple_hash, hnnode);
2300                         ct = nf_ct_tuplehash_to_ctrack(h);
2301                         hlist_nulls_del_rcu(&h->hnnode);
2302                         bucket = __hash_conntrack(nf_ct_net(ct),
2303                                                   &h->tuple, hashsize);
2304                         hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
2305                 }
2306         }
2307         old_size = nf_conntrack_htable_size;
2308         old_hash = nf_conntrack_hash;
2309
2310         nf_conntrack_hash = hash;
2311         nf_conntrack_htable_size = hashsize;
2312
2313         write_seqcount_end(&nf_conntrack_generation);
2314         nf_conntrack_all_unlock();
2315         local_bh_enable();
2316
2317         synchronize_net();
2318         kvfree(old_hash);
2319         return 0;
2320 }
2321
2322 int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp)
2323 {
2324         unsigned int hashsize;
2325         int rc;
2326
2327         if (current->nsproxy->net_ns != &init_net)
2328                 return -EOPNOTSUPP;
2329
2330         /* On boot, we can set this without any fancy locking. */
2331         if (!nf_conntrack_hash)
2332                 return param_set_uint(val, kp);
2333
2334         rc = kstrtouint(val, 0, &hashsize);
2335         if (rc)
2336                 return rc;
2337
2338         return nf_conntrack_hash_resize(hashsize);
2339 }
2340 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
2341
2342 static __always_inline unsigned int total_extension_size(void)
2343 {
2344         /* remember to add new extensions below */
2345         BUILD_BUG_ON(NF_CT_EXT_NUM > 9);
2346
2347         return sizeof(struct nf_ct_ext) +
2348                sizeof(struct nf_conn_help)
2349 #if IS_ENABLED(CONFIG_NF_NAT)
2350                 + sizeof(struct nf_conn_nat)
2351 #endif
2352                 + sizeof(struct nf_conn_seqadj)
2353                 + sizeof(struct nf_conn_acct)
2354 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2355                 + sizeof(struct nf_conntrack_ecache)
2356 #endif
2357 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
2358                 + sizeof(struct nf_conn_tstamp)
2359 #endif
2360 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
2361                 + sizeof(struct nf_conn_timeout)
2362 #endif
2363 #ifdef CONFIG_NF_CONNTRACK_LABELS
2364                 + sizeof(struct nf_conn_labels)
2365 #endif
2366 #if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
2367                 + sizeof(struct nf_conn_synproxy)
2368 #endif
2369         ;
2370 };
2371
2372 int nf_conntrack_init_start(void)
2373 {
2374         unsigned long nr_pages = totalram_pages();
2375         int max_factor = 8;
2376         int ret = -ENOMEM;
2377         int i;
2378
2379         /* struct nf_ct_ext uses u8 to store offsets/size */
2380         BUILD_BUG_ON(total_extension_size() > 255u);
2381
2382         seqcount_init(&nf_conntrack_generation);
2383
2384         for (i = 0; i < CONNTRACK_LOCKS; i++)
2385                 spin_lock_init(&nf_conntrack_locks[i]);
2386
2387         if (!nf_conntrack_htable_size) {
2388                 /* Idea from tcp.c: use 1/16384 of memory.
2389                  * On i386: 32MB machine has 512 buckets.
2390                  * >= 1GB machines have 16384 buckets.
2391                  * >= 4GB machines have 65536 buckets.
2392                  */
2393                 nf_conntrack_htable_size
2394                         = (((nr_pages << PAGE_SHIFT) / 16384)
2395                            / sizeof(struct hlist_head));
2396                 if (nr_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
2397                         nf_conntrack_htable_size = 65536;
2398                 else if (nr_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
2399                         nf_conntrack_htable_size = 16384;
2400                 if (nf_conntrack_htable_size < 32)
2401                         nf_conntrack_htable_size = 32;
2402
2403                 /* Use a max. factor of four by default to get the same max as
2404                  * with the old struct list_heads. When a table size is given
2405                  * we use the old value of 8 to avoid reducing the max.
2406                  * entries. */
2407                 max_factor = 4;
2408         }
2409
2410         nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1);
2411         if (!nf_conntrack_hash)
2412                 return -ENOMEM;
2413
2414         nf_conntrack_max = max_factor * nf_conntrack_htable_size;
2415
2416         nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
2417                                                 sizeof(struct nf_conn),
2418                                                 NFCT_INFOMASK + 1,
2419                                                 SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
2420         if (!nf_conntrack_cachep)
2421                 goto err_cachep;
2422
2423         ret = nf_conntrack_expect_init();
2424         if (ret < 0)
2425                 goto err_expect;
2426
2427         ret = nf_conntrack_acct_init();
2428         if (ret < 0)
2429                 goto err_acct;
2430
2431         ret = nf_conntrack_tstamp_init();
2432         if (ret < 0)
2433                 goto err_tstamp;
2434
2435         ret = nf_conntrack_ecache_init();
2436         if (ret < 0)
2437                 goto err_ecache;
2438
2439         ret = nf_conntrack_timeout_init();
2440         if (ret < 0)
2441                 goto err_timeout;
2442
2443         ret = nf_conntrack_helper_init();
2444         if (ret < 0)
2445                 goto err_helper;
2446
2447         ret = nf_conntrack_labels_init();
2448         if (ret < 0)
2449                 goto err_labels;
2450
2451         ret = nf_conntrack_seqadj_init();
2452         if (ret < 0)
2453                 goto err_seqadj;
2454
2455         ret = nf_conntrack_proto_init();
2456         if (ret < 0)
2457                 goto err_proto;
2458
2459         conntrack_gc_work_init(&conntrack_gc_work);
2460         queue_delayed_work(system_power_efficient_wq, &conntrack_gc_work.dwork, HZ);
2461
2462         return 0;
2463
2464 err_proto:
2465         nf_conntrack_seqadj_fini();
2466 err_seqadj:
2467         nf_conntrack_labels_fini();
2468 err_labels:
2469         nf_conntrack_helper_fini();
2470 err_helper:
2471         nf_conntrack_timeout_fini();
2472 err_timeout:
2473         nf_conntrack_ecache_fini();
2474 err_ecache:
2475         nf_conntrack_tstamp_fini();
2476 err_tstamp:
2477         nf_conntrack_acct_fini();
2478 err_acct:
2479         nf_conntrack_expect_fini();
2480 err_expect:
2481         kmem_cache_destroy(nf_conntrack_cachep);
2482 err_cachep:
2483         kvfree(nf_conntrack_hash);
2484         return ret;
2485 }
2486
2487 static struct nf_ct_hook nf_conntrack_hook = {
2488         .update         = nf_conntrack_update,
2489         .destroy        = destroy_conntrack,
2490         .get_tuple_skb  = nf_conntrack_get_tuple_skb,
2491 };
2492
2493 void nf_conntrack_init_end(void)
2494 {
2495         /* For use by REJECT target */
2496         RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
2497         RCU_INIT_POINTER(nf_ct_hook, &nf_conntrack_hook);
2498 }
2499
2500 /*
2501  * We need to use special "null" values, not used in hash table
2502  */
2503 #define UNCONFIRMED_NULLS_VAL   ((1<<30)+0)
2504 #define DYING_NULLS_VAL         ((1<<30)+1)
2505 #define TEMPLATE_NULLS_VAL      ((1<<30)+2)
2506
2507 int nf_conntrack_init_net(struct net *net)
2508 {
2509         int ret = -ENOMEM;
2510         int cpu;
2511
2512         BUILD_BUG_ON(IP_CT_UNTRACKED == IP_CT_NUMBER);
2513         BUILD_BUG_ON_NOT_POWER_OF_2(CONNTRACK_LOCKS);
2514         atomic_set(&net->ct.count, 0);
2515
2516         net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
2517         if (!net->ct.pcpu_lists)
2518                 goto err_stat;
2519
2520         for_each_possible_cpu(cpu) {
2521                 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
2522
2523                 spin_lock_init(&pcpu->lock);
2524                 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
2525                 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
2526         }
2527
2528         net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
2529         if (!net->ct.stat)
2530                 goto err_pcpu_lists;
2531
2532         ret = nf_conntrack_expect_pernet_init(net);
2533         if (ret < 0)
2534                 goto err_expect;
2535
2536         nf_conntrack_acct_pernet_init(net);
2537         nf_conntrack_tstamp_pernet_init(net);
2538         nf_conntrack_ecache_pernet_init(net);
2539         nf_conntrack_helper_pernet_init(net);
2540         nf_conntrack_proto_pernet_init(net);
2541
2542         return 0;
2543
2544 err_expect:
2545         free_percpu(net->ct.stat);
2546 err_pcpu_lists:
2547         free_percpu(net->ct.pcpu_lists);
2548 err_stat:
2549         return ret;
2550 }