1 /* Connection state tracking for netfilter. This is separated from,
2 but required by, the NAT layer; it can also be used by an iptables
5 /* (C) 1999-2001 Paul `Rusty' Russell
6 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/types.h>
15 #include <linux/netfilter.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/skbuff.h>
19 #include <linux/proc_fs.h>
20 #include <linux/vmalloc.h>
21 #include <linux/stddef.h>
22 #include <linux/slab.h>
23 #include <linux/random.h>
24 #include <linux/jhash.h>
25 #include <linux/err.h>
26 #include <linux/percpu.h>
27 #include <linux/moduleparam.h>
28 #include <linux/notifier.h>
29 #include <linux/kernel.h>
30 #include <linux/netdevice.h>
31 #include <linux/socket.h>
33 #include <linux/nsproxy.h>
34 #include <linux/rculist_nulls.h>
36 #include <net/netfilter/nf_conntrack.h>
37 #include <net/netfilter/nf_conntrack_l3proto.h>
38 #include <net/netfilter/nf_conntrack_l4proto.h>
39 #include <net/netfilter/nf_conntrack_expect.h>
40 #include <net/netfilter/nf_conntrack_helper.h>
41 #include <net/netfilter/nf_conntrack_core.h>
42 #include <net/netfilter/nf_conntrack_extend.h>
43 #include <net/netfilter/nf_conntrack_acct.h>
44 #include <net/netfilter/nf_conntrack_ecache.h>
45 #include <net/netfilter/nf_conntrack_zones.h>
46 #include <net/netfilter/nf_conntrack_timestamp.h>
47 #include <net/netfilter/nf_conntrack_timeout.h>
48 #include <net/netfilter/nf_conntrack_labels.h>
49 #include <net/netfilter/nf_nat.h>
50 #include <net/netfilter/nf_nat_core.h>
51 #include <net/netfilter/nf_nat_helper.h>
53 #define NF_CONNTRACK_VERSION "0.5.0"
55 int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
56 enum nf_nat_manip_type manip,
57 const struct nlattr *attr) __read_mostly;
58 EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
60 int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb,
62 enum ip_conntrack_info ctinfo,
63 unsigned int protoff);
64 EXPORT_SYMBOL_GPL(nf_nat_seq_adjust_hook);
66 DEFINE_SPINLOCK(nf_conntrack_lock);
67 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
69 unsigned int nf_conntrack_htable_size __read_mostly;
70 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
72 unsigned int nf_conntrack_max __read_mostly;
73 EXPORT_SYMBOL_GPL(nf_conntrack_max);
75 DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
76 EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
78 unsigned int nf_conntrack_hash_rnd __read_mostly;
79 EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd);
81 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
85 /* The direction must be ignored, so we hash everything up to the
86 * destination ports (which is a multiple of 4) and treat the last
87 * three bytes manually.
89 n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
90 return jhash2((u32 *)tuple, n, zone ^ nf_conntrack_hash_rnd ^
91 (((__force __u16)tuple->dst.u.all << 16) |
92 tuple->dst.protonum));
95 static u32 __hash_bucket(u32 hash, unsigned int size)
97 return ((u64)hash * size) >> 32;
100 static u32 hash_bucket(u32 hash, const struct net *net)
102 return __hash_bucket(hash, net->ct.htable_size);
105 static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
106 u16 zone, unsigned int size)
108 return __hash_bucket(hash_conntrack_raw(tuple, zone), size);
111 static inline u_int32_t hash_conntrack(const struct net *net, u16 zone,
112 const struct nf_conntrack_tuple *tuple)
114 return __hash_conntrack(tuple, zone, net->ct.htable_size);
118 nf_ct_get_tuple(const struct sk_buff *skb,
120 unsigned int dataoff,
123 struct nf_conntrack_tuple *tuple,
124 const struct nf_conntrack_l3proto *l3proto,
125 const struct nf_conntrack_l4proto *l4proto)
127 memset(tuple, 0, sizeof(*tuple));
129 tuple->src.l3num = l3num;
130 if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
133 tuple->dst.protonum = protonum;
134 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
136 return l4proto->pkt_to_tuple(skb, dataoff, tuple);
138 EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
140 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
141 u_int16_t l3num, struct nf_conntrack_tuple *tuple)
143 struct nf_conntrack_l3proto *l3proto;
144 struct nf_conntrack_l4proto *l4proto;
145 unsigned int protoff;
151 l3proto = __nf_ct_l3proto_find(l3num);
152 ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
153 if (ret != NF_ACCEPT) {
158 l4proto = __nf_ct_l4proto_find(l3num, protonum);
160 ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple,
166 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
169 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
170 const struct nf_conntrack_tuple *orig,
171 const struct nf_conntrack_l3proto *l3proto,
172 const struct nf_conntrack_l4proto *l4proto)
174 memset(inverse, 0, sizeof(*inverse));
176 inverse->src.l3num = orig->src.l3num;
177 if (l3proto->invert_tuple(inverse, orig) == 0)
180 inverse->dst.dir = !orig->dst.dir;
182 inverse->dst.protonum = orig->dst.protonum;
183 return l4proto->invert_tuple(inverse, orig);
185 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
188 clean_from_lists(struct nf_conn *ct)
190 pr_debug("clean_from_lists(%p)\n", ct);
191 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
192 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
194 /* Destroy all pending expectations */
195 nf_ct_remove_expectations(ct);
199 destroy_conntrack(struct nf_conntrack *nfct)
201 struct nf_conn *ct = (struct nf_conn *)nfct;
202 struct net *net = nf_ct_net(ct);
203 struct nf_conntrack_l4proto *l4proto;
205 pr_debug("destroy_conntrack(%p)\n", ct);
206 NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
207 NF_CT_ASSERT(!timer_pending(&ct->timeout));
209 /* To make sure we don't get any weird locking issues here:
210 * destroy_conntrack() MUST NOT be called with a write lock
211 * to nf_conntrack_lock!!! -HW */
213 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
214 if (l4proto && l4proto->destroy)
215 l4proto->destroy(ct);
219 spin_lock_bh(&nf_conntrack_lock);
220 /* Expectations will have been removed in clean_from_lists,
221 * except TFTP can create an expectation on the first packet,
222 * before connection is in the list, so we need to clean here,
224 nf_ct_remove_expectations(ct);
226 /* We overload first tuple to link into unconfirmed or dying list.*/
227 BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
228 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
230 NF_CT_STAT_INC(net, delete);
231 spin_unlock_bh(&nf_conntrack_lock);
234 nf_ct_put(ct->master);
236 pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
237 nf_conntrack_free(ct);
240 void nf_ct_delete_from_lists(struct nf_conn *ct)
242 struct net *net = nf_ct_net(ct);
244 nf_ct_helper_destroy(ct);
245 spin_lock_bh(&nf_conntrack_lock);
246 /* Inside lock so preempt is disabled on module removal path.
247 * Otherwise we can get spurious warnings. */
248 NF_CT_STAT_INC(net, delete_list);
249 clean_from_lists(ct);
250 /* add this conntrack to the dying list */
251 hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
253 spin_unlock_bh(&nf_conntrack_lock);
255 EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists);
257 static void death_by_event(unsigned long ul_conntrack)
259 struct nf_conn *ct = (void *)ul_conntrack;
260 struct net *net = nf_ct_net(ct);
261 struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
263 BUG_ON(ecache == NULL);
265 if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
266 /* bad luck, let's retry again */
267 ecache->timeout.expires = jiffies +
268 (random32() % net->ct.sysctl_events_retry_timeout);
269 add_timer(&ecache->timeout);
272 /* we've got the event delivered, now it's dying */
273 set_bit(IPS_DYING_BIT, &ct->status);
277 void nf_ct_dying_timeout(struct nf_conn *ct)
279 struct net *net = nf_ct_net(ct);
280 struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
282 BUG_ON(ecache == NULL);
284 /* set a new timer to retry event delivery */
285 setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct);
286 ecache->timeout.expires = jiffies +
287 (random32() % net->ct.sysctl_events_retry_timeout);
288 add_timer(&ecache->timeout);
290 EXPORT_SYMBOL_GPL(nf_ct_dying_timeout);
292 static void death_by_timeout(unsigned long ul_conntrack)
294 struct nf_conn *ct = (void *)ul_conntrack;
295 struct nf_conn_tstamp *tstamp;
297 tstamp = nf_conn_tstamp_find(ct);
298 if (tstamp && tstamp->stop == 0)
299 tstamp->stop = ktime_to_ns(ktime_get_real());
301 if (!test_bit(IPS_DYING_BIT, &ct->status) &&
302 unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) {
303 /* destroy event was not delivered */
304 nf_ct_delete_from_lists(ct);
305 nf_ct_dying_timeout(ct);
308 set_bit(IPS_DYING_BIT, &ct->status);
309 nf_ct_delete_from_lists(ct);
315 * - Caller must take a reference on returned object
316 * and recheck nf_ct_tuple_equal(tuple, &h->tuple)
318 * - Caller must lock nf_conntrack_lock before calling this function
320 static struct nf_conntrack_tuple_hash *
321 ____nf_conntrack_find(struct net *net, u16 zone,
322 const struct nf_conntrack_tuple *tuple, u32 hash)
324 struct nf_conntrack_tuple_hash *h;
325 struct hlist_nulls_node *n;
326 unsigned int bucket = hash_bucket(hash, net);
328 /* Disable BHs the entire time since we normally need to disable them
329 * at least once for the stats anyway.
333 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) {
334 if (nf_ct_tuple_equal(tuple, &h->tuple) &&
335 nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) {
336 NF_CT_STAT_INC(net, found);
340 NF_CT_STAT_INC(net, searched);
343 * if the nulls value we got at the end of this lookup is
344 * not the expected one, we must restart lookup.
345 * We probably met an item that was moved to another chain.
347 if (get_nulls_value(n) != bucket) {
348 NF_CT_STAT_INC(net, search_restart);
356 struct nf_conntrack_tuple_hash *
357 __nf_conntrack_find(struct net *net, u16 zone,
358 const struct nf_conntrack_tuple *tuple)
360 return ____nf_conntrack_find(net, zone, tuple,
361 hash_conntrack_raw(tuple, zone));
363 EXPORT_SYMBOL_GPL(__nf_conntrack_find);
365 /* Find a connection corresponding to a tuple. */
366 static struct nf_conntrack_tuple_hash *
367 __nf_conntrack_find_get(struct net *net, u16 zone,
368 const struct nf_conntrack_tuple *tuple, u32 hash)
370 struct nf_conntrack_tuple_hash *h;
375 h = ____nf_conntrack_find(net, zone, tuple, hash);
377 ct = nf_ct_tuplehash_to_ctrack(h);
378 if (unlikely(nf_ct_is_dying(ct) ||
379 !atomic_inc_not_zero(&ct->ct_general.use)))
382 if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) ||
383 nf_ct_zone(ct) != zone)) {
394 struct nf_conntrack_tuple_hash *
395 nf_conntrack_find_get(struct net *net, u16 zone,
396 const struct nf_conntrack_tuple *tuple)
398 return __nf_conntrack_find_get(net, zone, tuple,
399 hash_conntrack_raw(tuple, zone));
401 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
403 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
405 unsigned int repl_hash)
407 struct net *net = nf_ct_net(ct);
409 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
410 &net->ct.hash[hash]);
411 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
412 &net->ct.hash[repl_hash]);
416 nf_conntrack_hash_check_insert(struct nf_conn *ct)
418 struct net *net = nf_ct_net(ct);
419 unsigned int hash, repl_hash;
420 struct nf_conntrack_tuple_hash *h;
421 struct hlist_nulls_node *n;
424 zone = nf_ct_zone(ct);
425 hash = hash_conntrack(net, zone,
426 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
427 repl_hash = hash_conntrack(net, zone,
428 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
430 spin_lock_bh(&nf_conntrack_lock);
432 /* See if there's one in the list already, including reverse */
433 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
434 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
436 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
438 hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
439 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
441 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
444 add_timer(&ct->timeout);
445 nf_conntrack_get(&ct->ct_general);
446 __nf_conntrack_hash_insert(ct, hash, repl_hash);
447 NF_CT_STAT_INC(net, insert);
448 spin_unlock_bh(&nf_conntrack_lock);
453 NF_CT_STAT_INC(net, insert_failed);
454 spin_unlock_bh(&nf_conntrack_lock);
457 EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
459 /* Confirm a connection given skb; places it in hash table */
461 __nf_conntrack_confirm(struct sk_buff *skb)
463 unsigned int hash, repl_hash;
464 struct nf_conntrack_tuple_hash *h;
466 struct nf_conn_help *help;
467 struct nf_conn_tstamp *tstamp;
468 struct hlist_nulls_node *n;
469 enum ip_conntrack_info ctinfo;
473 ct = nf_ct_get(skb, &ctinfo);
476 /* ipt_REJECT uses nf_conntrack_attach to attach related
477 ICMP/TCP RST packets in other direction. Actual packet
478 which created connection will be IP_CT_NEW or for an
479 expected connection, IP_CT_RELATED. */
480 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
483 zone = nf_ct_zone(ct);
484 /* reuse the hash saved before */
485 hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
486 hash = hash_bucket(hash, net);
487 repl_hash = hash_conntrack(net, zone,
488 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
490 /* We're not in hash table, and we refuse to set up related
491 connections for unconfirmed conns. But packet copies and
492 REJECT will give spurious warnings here. */
493 /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
495 /* No external references means no one else could have
497 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
498 pr_debug("Confirming conntrack %p\n", ct);
500 spin_lock_bh(&nf_conntrack_lock);
502 /* We have to check the DYING flag inside the lock to prevent
503 a race against nf_ct_get_next_corpse() possibly called from
504 user context, else we insert an already 'dead' hash, blocking
505 further use of that particular connection -JM */
507 if (unlikely(nf_ct_is_dying(ct))) {
508 spin_unlock_bh(&nf_conntrack_lock);
512 /* See if there's one in the list already, including reverse:
513 NAT could have grabbed it without realizing, since we're
514 not in the hash. If there is, we lost race. */
515 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
516 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
518 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
520 hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
521 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
523 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
526 /* Remove from unconfirmed list */
527 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
529 /* Timer relative to confirmation time, not original
530 setting time, otherwise we'd get timer wrap in
531 weird delay cases. */
532 ct->timeout.expires += jiffies;
533 add_timer(&ct->timeout);
534 atomic_inc(&ct->ct_general.use);
535 ct->status |= IPS_CONFIRMED;
537 /* set conntrack timestamp, if enabled. */
538 tstamp = nf_conn_tstamp_find(ct);
540 if (skb->tstamp.tv64 == 0)
541 __net_timestamp(skb);
543 tstamp->start = ktime_to_ns(skb->tstamp);
545 /* Since the lookup is lockless, hash insertion must be done after
546 * starting the timer and setting the CONFIRMED bit. The RCU barriers
547 * guarantee that no other CPU can find the conntrack before the above
548 * stores are visible.
550 __nf_conntrack_hash_insert(ct, hash, repl_hash);
551 NF_CT_STAT_INC(net, insert);
552 spin_unlock_bh(&nf_conntrack_lock);
554 help = nfct_help(ct);
555 if (help && help->helper)
556 nf_conntrack_event_cache(IPCT_HELPER, ct);
558 nf_conntrack_event_cache(master_ct(ct) ?
559 IPCT_RELATED : IPCT_NEW, ct);
563 NF_CT_STAT_INC(net, insert_failed);
564 spin_unlock_bh(&nf_conntrack_lock);
567 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
569 /* Returns true if a connection correspondings to the tuple (required
572 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
573 const struct nf_conn *ignored_conntrack)
575 struct net *net = nf_ct_net(ignored_conntrack);
576 struct nf_conntrack_tuple_hash *h;
577 struct hlist_nulls_node *n;
579 u16 zone = nf_ct_zone(ignored_conntrack);
580 unsigned int hash = hash_conntrack(net, zone, tuple);
582 /* Disable BHs the entire time since we need to disable them at
583 * least once for the stats anyway.
586 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
587 ct = nf_ct_tuplehash_to_ctrack(h);
588 if (ct != ignored_conntrack &&
589 nf_ct_tuple_equal(tuple, &h->tuple) &&
590 nf_ct_zone(ct) == zone) {
591 NF_CT_STAT_INC(net, found);
592 rcu_read_unlock_bh();
595 NF_CT_STAT_INC(net, searched);
597 rcu_read_unlock_bh();
601 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
603 #define NF_CT_EVICTION_RANGE 8
605 /* There's a small race here where we may free a just-assured
606 connection. Too bad: we're in trouble anyway. */
607 static noinline int early_drop(struct net *net, unsigned int hash)
609 /* Use oldest entry, which is roughly LRU */
610 struct nf_conntrack_tuple_hash *h;
611 struct nf_conn *ct = NULL, *tmp;
612 struct hlist_nulls_node *n;
613 unsigned int i, cnt = 0;
617 for (i = 0; i < net->ct.htable_size; i++) {
618 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
620 tmp = nf_ct_tuplehash_to_ctrack(h);
621 if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
627 if (likely(!nf_ct_is_dying(ct) &&
628 atomic_inc_not_zero(&ct->ct_general.use)))
634 if (cnt >= NF_CT_EVICTION_RANGE)
637 hash = (hash + 1) % net->ct.htable_size;
644 if (del_timer(&ct->timeout)) {
645 death_by_timeout((unsigned long)ct);
646 /* Check if we indeed killed this entry. Reliable event
647 delivery may have inserted it into the dying list. */
648 if (test_bit(IPS_DYING_BIT, &ct->status)) {
650 NF_CT_STAT_INC_ATOMIC(net, early_drop);
657 void init_nf_conntrack_hash_rnd(void)
662 * Why not initialize nf_conntrack_rnd in a "init()" function ?
663 * Because there isn't enough entropy when system initializing,
664 * and we initialize it as late as possible.
667 get_random_bytes(&rand, sizeof(rand));
669 cmpxchg(&nf_conntrack_hash_rnd, 0, rand);
672 static struct nf_conn *
673 __nf_conntrack_alloc(struct net *net, u16 zone,
674 const struct nf_conntrack_tuple *orig,
675 const struct nf_conntrack_tuple *repl,
680 if (unlikely(!nf_conntrack_hash_rnd)) {
681 init_nf_conntrack_hash_rnd();
682 /* recompute the hash as nf_conntrack_hash_rnd is initialized */
683 hash = hash_conntrack_raw(orig, zone);
686 /* We don't want any race condition at early drop stage */
687 atomic_inc(&net->ct.count);
689 if (nf_conntrack_max &&
690 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
691 if (!early_drop(net, hash_bucket(hash, net))) {
692 atomic_dec(&net->ct.count);
693 net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
694 return ERR_PTR(-ENOMEM);
699 * Do not use kmem_cache_zalloc(), as this cache uses
700 * SLAB_DESTROY_BY_RCU.
702 ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
704 atomic_dec(&net->ct.count);
705 return ERR_PTR(-ENOMEM);
708 * Let ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.next
709 * and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged.
711 memset(&ct->tuplehash[IP_CT_DIR_MAX], 0,
712 offsetof(struct nf_conn, proto) -
713 offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
714 spin_lock_init(&ct->lock);
715 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
716 ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
717 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
718 /* save hash for reusing when confirming */
719 *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
720 /* Don't set timer yet: wait for confirmation */
721 setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
722 write_pnet(&ct->ct_net, net);
723 #ifdef CONFIG_NF_CONNTRACK_ZONES
725 struct nf_conntrack_zone *nf_ct_zone;
727 nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC);
730 nf_ct_zone->id = zone;
734 * changes to lookup keys must be done before setting refcnt to 1
737 atomic_set(&ct->ct_general.use, 1);
740 #ifdef CONFIG_NF_CONNTRACK_ZONES
742 atomic_dec(&net->ct.count);
743 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
744 return ERR_PTR(-ENOMEM);
748 struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
749 const struct nf_conntrack_tuple *orig,
750 const struct nf_conntrack_tuple *repl,
753 return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
755 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
757 void nf_conntrack_free(struct nf_conn *ct)
759 struct net *net = nf_ct_net(ct);
761 nf_ct_ext_destroy(ct);
762 atomic_dec(&net->ct.count);
764 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
766 EXPORT_SYMBOL_GPL(nf_conntrack_free);
769 /* Allocate a new conntrack: we return -ENOMEM if classification
770 failed due to stress. Otherwise it really is unclassifiable. */
771 static struct nf_conntrack_tuple_hash *
772 init_conntrack(struct net *net, struct nf_conn *tmpl,
773 const struct nf_conntrack_tuple *tuple,
774 struct nf_conntrack_l3proto *l3proto,
775 struct nf_conntrack_l4proto *l4proto,
777 unsigned int dataoff, u32 hash)
780 struct nf_conn_help *help;
781 struct nf_conntrack_tuple repl_tuple;
782 struct nf_conntrack_ecache *ecache;
783 struct nf_conntrack_expect *exp;
784 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
785 struct nf_conn_timeout *timeout_ext;
786 unsigned int *timeouts;
788 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
789 pr_debug("Can't invert tuple.\n");
793 ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
796 return (struct nf_conntrack_tuple_hash *)ct;
798 timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
800 timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext);
802 timeouts = l4proto->get_timeouts(net);
804 if (!l4proto->new(ct, skb, dataoff, timeouts)) {
805 nf_conntrack_free(ct);
806 pr_debug("init conntrack: can't track with proto module\n");
811 nf_ct_timeout_ext_add(ct, timeout_ext->timeout, GFP_ATOMIC);
813 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
814 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
815 nf_ct_labels_ext_add(ct);
817 ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
818 nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
819 ecache ? ecache->expmask : 0,
822 spin_lock_bh(&nf_conntrack_lock);
823 exp = nf_ct_find_expectation(net, zone, tuple);
825 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
827 /* Welcome, Mr. Bond. We've been expecting you... */
828 __set_bit(IPS_EXPECTED_BIT, &ct->status);
829 ct->master = exp->master;
831 help = nf_ct_helper_ext_add(ct, exp->helper,
834 rcu_assign_pointer(help->helper, exp->helper);
837 #ifdef CONFIG_NF_CONNTRACK_MARK
838 ct->mark = exp->master->mark;
840 #ifdef CONFIG_NF_CONNTRACK_SECMARK
841 ct->secmark = exp->master->secmark;
843 nf_conntrack_get(&ct->master->ct_general);
844 NF_CT_STAT_INC(net, expect_new);
846 __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
847 NF_CT_STAT_INC(net, new);
850 /* Overload tuple linked list to put us in unconfirmed list. */
851 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
852 &net->ct.unconfirmed);
854 spin_unlock_bh(&nf_conntrack_lock);
858 exp->expectfn(ct, exp);
859 nf_ct_expect_put(exp);
862 return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
865 /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
866 static inline struct nf_conn *
867 resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
869 unsigned int dataoff,
872 struct nf_conntrack_l3proto *l3proto,
873 struct nf_conntrack_l4proto *l4proto,
875 enum ip_conntrack_info *ctinfo)
877 struct nf_conntrack_tuple tuple;
878 struct nf_conntrack_tuple_hash *h;
880 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
883 if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
884 dataoff, l3num, protonum, &tuple, l3proto,
886 pr_debug("resolve_normal_ct: Can't get tuple\n");
890 /* look for tuple match */
891 hash = hash_conntrack_raw(&tuple, zone);
892 h = __nf_conntrack_find_get(net, zone, &tuple, hash);
894 h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
901 ct = nf_ct_tuplehash_to_ctrack(h);
903 /* It exists; we have (non-exclusive) reference. */
904 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
905 *ctinfo = IP_CT_ESTABLISHED_REPLY;
906 /* Please set reply bit if this packet OK */
909 /* Once we've had two way comms, always ESTABLISHED. */
910 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
911 pr_debug("nf_conntrack_in: normal packet for %p\n", ct);
912 *ctinfo = IP_CT_ESTABLISHED;
913 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
914 pr_debug("nf_conntrack_in: related packet for %p\n",
916 *ctinfo = IP_CT_RELATED;
918 pr_debug("nf_conntrack_in: new packet for %p\n", ct);
923 skb->nfct = &ct->ct_general;
924 skb->nfctinfo = *ctinfo;
929 nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
932 struct nf_conn *ct, *tmpl = NULL;
933 enum ip_conntrack_info ctinfo;
934 struct nf_conntrack_l3proto *l3proto;
935 struct nf_conntrack_l4proto *l4proto;
936 unsigned int *timeouts;
937 unsigned int dataoff;
943 /* Previously seen (loopback or untracked)? Ignore. */
944 tmpl = (struct nf_conn *)skb->nfct;
945 if (!nf_ct_is_template(tmpl)) {
946 NF_CT_STAT_INC_ATOMIC(net, ignore);
952 /* rcu_read_lock()ed by nf_hook_slow */
953 l3proto = __nf_ct_l3proto_find(pf);
954 ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
955 &dataoff, &protonum);
957 pr_debug("not prepared to track yet or error occurred\n");
958 NF_CT_STAT_INC_ATOMIC(net, error);
959 NF_CT_STAT_INC_ATOMIC(net, invalid);
964 l4proto = __nf_ct_l4proto_find(pf, protonum);
966 /* It may be an special packet, error, unclean...
967 * inverse of the return code tells to the netfilter
968 * core what to do with the packet. */
969 if (l4proto->error != NULL) {
970 ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo,
973 NF_CT_STAT_INC_ATOMIC(net, error);
974 NF_CT_STAT_INC_ATOMIC(net, invalid);
978 /* ICMP[v6] protocol trackers may assign one conntrack. */
983 ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
984 l3proto, l4proto, &set_reply, &ctinfo);
986 /* Not valid part of a connection */
987 NF_CT_STAT_INC_ATOMIC(net, invalid);
993 /* Too stressed to deal. */
994 NF_CT_STAT_INC_ATOMIC(net, drop);
999 NF_CT_ASSERT(skb->nfct);
1001 /* Decide what timeout policy we want to apply to this flow. */
1002 timeouts = nf_ct_timeout_lookup(net, ct, l4proto);
1004 ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts);
1006 /* Invalid: inverse of the return code tells
1007 * the netfilter core what to do */
1008 pr_debug("nf_conntrack_in: Can't track with proto module\n");
1009 nf_conntrack_put(skb->nfct);
1011 NF_CT_STAT_INC_ATOMIC(net, invalid);
1012 if (ret == -NF_DROP)
1013 NF_CT_STAT_INC_ATOMIC(net, drop);
1018 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
1019 nf_conntrack_event_cache(IPCT_REPLY, ct);
1022 /* Special case: we have to repeat this hook, assign the
1023 * template again to this packet. We assume that this packet
1024 * has no conntrack assigned. This is used by nf_ct_tcp. */
1025 if (ret == NF_REPEAT)
1026 skb->nfct = (struct nf_conntrack *)tmpl;
1033 EXPORT_SYMBOL_GPL(nf_conntrack_in);
1035 bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
1036 const struct nf_conntrack_tuple *orig)
1041 ret = nf_ct_invert_tuple(inverse, orig,
1042 __nf_ct_l3proto_find(orig->src.l3num),
1043 __nf_ct_l4proto_find(orig->src.l3num,
1044 orig->dst.protonum));
1048 EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
1050 /* Alter reply tuple (maybe alter helper). This is for NAT, and is
1051 implicitly racy: see __nf_conntrack_confirm */
1052 void nf_conntrack_alter_reply(struct nf_conn *ct,
1053 const struct nf_conntrack_tuple *newreply)
1055 struct nf_conn_help *help = nfct_help(ct);
1057 /* Should be unconfirmed, so not in hash table yet */
1058 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
1060 pr_debug("Altering reply tuple of %p to ", ct);
1061 nf_ct_dump_tuple(newreply);
1063 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
1064 if (ct->master || (help && !hlist_empty(&help->expectations)))
1068 __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1071 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
1073 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
1074 void __nf_ct_refresh_acct(struct nf_conn *ct,
1075 enum ip_conntrack_info ctinfo,
1076 const struct sk_buff *skb,
1077 unsigned long extra_jiffies,
1080 NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
1083 /* Only update if this is not a fixed timeout */
1084 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
1087 /* If not in hash table, timer will not be active yet */
1088 if (!nf_ct_is_confirmed(ct)) {
1089 ct->timeout.expires = extra_jiffies;
1091 unsigned long newtime = jiffies + extra_jiffies;
1093 /* Only update the timeout if the new timeout is at least
1094 HZ jiffies from the old timeout. Need del_timer for race
1095 avoidance (may already be dying). */
1096 if (newtime - ct->timeout.expires >= HZ)
1097 mod_timer_pending(&ct->timeout, newtime);
1102 struct nf_conn_counter *acct;
1104 acct = nf_conn_acct_find(ct);
1106 atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets);
1107 atomic64_add(skb->len, &acct[CTINFO2DIR(ctinfo)].bytes);
1111 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
1113 bool __nf_ct_kill_acct(struct nf_conn *ct,
1114 enum ip_conntrack_info ctinfo,
1115 const struct sk_buff *skb,
1119 struct nf_conn_counter *acct;
1121 acct = nf_conn_acct_find(ct);
1123 atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets);
1124 atomic64_add(skb->len - skb_network_offset(skb),
1125 &acct[CTINFO2DIR(ctinfo)].bytes);
1129 if (del_timer(&ct->timeout)) {
1130 ct->timeout.function((unsigned long)ct);
1135 EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
1137 #ifdef CONFIG_NF_CONNTRACK_ZONES
1138 static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
1139 .len = sizeof(struct nf_conntrack_zone),
1140 .align = __alignof__(struct nf_conntrack_zone),
1141 .id = NF_CT_EXT_ZONE,
1145 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1147 #include <linux/netfilter/nfnetlink.h>
1148 #include <linux/netfilter/nfnetlink_conntrack.h>
1149 #include <linux/mutex.h>
1151 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
1152 * in ip_conntrack_core, since we don't want the protocols to autoload
1153 * or depend on ctnetlink */
1154 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
1155 const struct nf_conntrack_tuple *tuple)
1157 if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
1158 nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
1159 goto nla_put_failure;
1165 EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
1167 const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
1168 [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 },
1169 [CTA_PROTO_DST_PORT] = { .type = NLA_U16 },
1171 EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
1173 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
1174 struct nf_conntrack_tuple *t)
1176 if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
1179 t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
1180 t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
1184 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
1186 int nf_ct_port_nlattr_tuple_size(void)
1188 return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1190 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
1193 /* Used by ipt_REJECT and ip6t_REJECT. */
1194 static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
1197 enum ip_conntrack_info ctinfo;
1199 /* This ICMP is in reverse direction to the packet which caused it */
1200 ct = nf_ct_get(skb, &ctinfo);
1201 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1202 ctinfo = IP_CT_RELATED_REPLY;
1204 ctinfo = IP_CT_RELATED;
1206 /* Attach to new skbuff, and increment count */
1207 nskb->nfct = &ct->ct_general;
1208 nskb->nfctinfo = ctinfo;
1209 nf_conntrack_get(nskb->nfct);
1212 /* Bring out ya dead! */
1213 static struct nf_conn *
1214 get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1215 void *data, unsigned int *bucket)
1217 struct nf_conntrack_tuple_hash *h;
1219 struct hlist_nulls_node *n;
1221 spin_lock_bh(&nf_conntrack_lock);
1222 for (; *bucket < net->ct.htable_size; (*bucket)++) {
1223 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
1224 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
1226 ct = nf_ct_tuplehash_to_ctrack(h);
1231 hlist_nulls_for_each_entry(h, n, &net->ct.unconfirmed, hnnode) {
1232 ct = nf_ct_tuplehash_to_ctrack(h);
1234 set_bit(IPS_DYING_BIT, &ct->status);
1236 spin_unlock_bh(&nf_conntrack_lock);
1239 atomic_inc(&ct->ct_general.use);
1240 spin_unlock_bh(&nf_conntrack_lock);
1244 void nf_ct_iterate_cleanup(struct net *net,
1245 int (*iter)(struct nf_conn *i, void *data),
1249 unsigned int bucket = 0;
1251 while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
1252 /* Time to push up daises... */
1253 if (del_timer(&ct->timeout))
1254 death_by_timeout((unsigned long)ct);
1255 /* ... else the timer will get him soon. */
1260 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
1262 struct __nf_ct_flush_report {
1267 static int kill_report(struct nf_conn *i, void *data)
1269 struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
1270 struct nf_conn_tstamp *tstamp;
1272 tstamp = nf_conn_tstamp_find(i);
1273 if (tstamp && tstamp->stop == 0)
1274 tstamp->stop = ktime_to_ns(ktime_get_real());
1276 /* If we fail to deliver the event, death_by_timeout() will retry */
1277 if (nf_conntrack_event_report(IPCT_DESTROY, i,
1278 fr->pid, fr->report) < 0)
1281 /* Avoid the delivery of the destroy event in death_by_timeout(). */
1282 set_bit(IPS_DYING_BIT, &i->status);
1286 static int kill_all(struct nf_conn *i, void *data)
1291 void nf_ct_free_hashtable(void *hash, unsigned int size)
1293 if (is_vmalloc_addr(hash))
1296 free_pages((unsigned long)hash,
1297 get_order(sizeof(struct hlist_head) * size));
1299 EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1301 void nf_conntrack_flush_report(struct net *net, u32 pid, int report)
1303 struct __nf_ct_flush_report fr = {
1307 nf_ct_iterate_cleanup(net, kill_report, &fr);
1309 EXPORT_SYMBOL_GPL(nf_conntrack_flush_report);
1311 static void nf_ct_release_dying_list(struct net *net)
1313 struct nf_conntrack_tuple_hash *h;
1315 struct hlist_nulls_node *n;
1317 spin_lock_bh(&nf_conntrack_lock);
1318 hlist_nulls_for_each_entry(h, n, &net->ct.dying, hnnode) {
1319 ct = nf_ct_tuplehash_to_ctrack(h);
1320 /* never fails to remove them, no listeners at this point */
1323 spin_unlock_bh(&nf_conntrack_lock);
1326 static int untrack_refs(void)
1330 for_each_possible_cpu(cpu) {
1331 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1333 cnt += atomic_read(&ct->ct_general.use) - 1;
1338 void nf_conntrack_cleanup_start(void)
1340 RCU_INIT_POINTER(ip_ct_attach, NULL);
1343 void nf_conntrack_cleanup_end(void)
1345 RCU_INIT_POINTER(nf_ct_destroy, NULL);
1346 while (untrack_refs() > 0)
1349 #ifdef CONFIG_NF_CONNTRACK_ZONES
1350 nf_ct_extend_unregister(&nf_ct_zone_extend);
1352 nf_conntrack_proto_fini();
1353 nf_conntrack_labels_fini();
1354 nf_conntrack_helper_fini();
1355 nf_conntrack_timeout_fini();
1356 nf_conntrack_ecache_fini();
1357 nf_conntrack_tstamp_fini();
1358 nf_conntrack_acct_fini();
1359 nf_conntrack_expect_fini();
1363 * Mishearing the voices in his head, our hero wonders how he's
1364 * supposed to kill the mall.
1366 void nf_conntrack_cleanup_net(struct net *net)
1370 list_add(&net->exit_list, &single);
1371 nf_conntrack_cleanup_net_list(&single);
1374 void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
1380 * This makes sure all current packets have passed through
1381 * netfilter framework. Roll on, two-stage module
1387 list_for_each_entry(net, net_exit_list, exit_list) {
1388 nf_ct_iterate_cleanup(net, kill_all, NULL);
1389 nf_ct_release_dying_list(net);
1390 if (atomic_read(&net->ct.count) != 0)
1395 goto i_see_dead_people;
1398 list_for_each_entry(net, net_exit_list, exit_list) {
1399 nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
1400 nf_conntrack_proto_pernet_fini(net);
1401 nf_conntrack_helper_pernet_fini(net);
1402 nf_conntrack_ecache_pernet_fini(net);
1403 nf_conntrack_tstamp_pernet_fini(net);
1404 nf_conntrack_acct_pernet_fini(net);
1405 nf_conntrack_expect_pernet_fini(net);
1406 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1407 kfree(net->ct.slabname);
1408 free_percpu(net->ct.stat);
1412 void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
1414 struct hlist_nulls_head *hash;
1415 unsigned int nr_slots, i;
1418 BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
1419 nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
1420 sz = nr_slots * sizeof(struct hlist_nulls_head);
1421 hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1424 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1429 for (i = 0; i < nr_slots; i++)
1430 INIT_HLIST_NULLS_HEAD(&hash[i], i);
1434 EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1436 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1439 unsigned int hashsize, old_size;
1440 struct hlist_nulls_head *hash, *old_hash;
1441 struct nf_conntrack_tuple_hash *h;
1444 if (current->nsproxy->net_ns != &init_net)
1447 /* On boot, we can set this without any fancy locking. */
1448 if (!nf_conntrack_htable_size)
1449 return param_set_uint(val, kp);
1451 rc = kstrtouint(val, 0, &hashsize);
1457 hash = nf_ct_alloc_hashtable(&hashsize, 1);
1461 /* Lookups in the old hash might happen in parallel, which means we
1462 * might get false negatives during connection lookup. New connections
1463 * created because of a false negative won't make it into the hash
1464 * though since that required taking the lock.
1466 spin_lock_bh(&nf_conntrack_lock);
1467 for (i = 0; i < init_net.ct.htable_size; i++) {
1468 while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
1469 h = hlist_nulls_entry(init_net.ct.hash[i].first,
1470 struct nf_conntrack_tuple_hash, hnnode);
1471 ct = nf_ct_tuplehash_to_ctrack(h);
1472 hlist_nulls_del_rcu(&h->hnnode);
1473 bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct),
1475 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1478 old_size = init_net.ct.htable_size;
1479 old_hash = init_net.ct.hash;
1481 init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
1482 init_net.ct.hash = hash;
1483 spin_unlock_bh(&nf_conntrack_lock);
1485 nf_ct_free_hashtable(old_hash, old_size);
1488 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
1490 module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
1491 &nf_conntrack_htable_size, 0600);
1493 void nf_ct_untracked_status_or(unsigned long bits)
1497 for_each_possible_cpu(cpu)
1498 per_cpu(nf_conntrack_untracked, cpu).status |= bits;
1500 EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or);
1502 int nf_conntrack_init_start(void)
1507 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
1508 * machine has 512 buckets. >= 1GB machines have 16384 buckets. */
1509 if (!nf_conntrack_htable_size) {
1510 nf_conntrack_htable_size
1511 = (((totalram_pages << PAGE_SHIFT) / 16384)
1512 / sizeof(struct hlist_head));
1513 if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
1514 nf_conntrack_htable_size = 16384;
1515 if (nf_conntrack_htable_size < 32)
1516 nf_conntrack_htable_size = 32;
1518 /* Use a max. factor of four by default to get the same max as
1519 * with the old struct list_heads. When a table size is given
1520 * we use the old value of 8 to avoid reducing the max.
1524 nf_conntrack_max = max_factor * nf_conntrack_htable_size;
1526 printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
1527 NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1530 ret = nf_conntrack_expect_init();
1534 ret = nf_conntrack_acct_init();
1538 ret = nf_conntrack_tstamp_init();
1542 ret = nf_conntrack_ecache_init();
1546 ret = nf_conntrack_timeout_init();
1550 ret = nf_conntrack_helper_init();
1554 ret = nf_conntrack_labels_init();
1558 #ifdef CONFIG_NF_CONNTRACK_ZONES
1559 ret = nf_ct_extend_register(&nf_ct_zone_extend);
1563 ret = nf_conntrack_proto_init();
1567 /* Set up fake conntrack: to never be deleted, not in any hashes */
1568 for_each_possible_cpu(cpu) {
1569 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1570 write_pnet(&ct->ct_net, &init_net);
1571 atomic_set(&ct->ct_general.use, 1);
1573 /* - and look it like as a confirmed connection */
1574 nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
1578 #ifdef CONFIG_NF_CONNTRACK_ZONES
1579 nf_ct_extend_unregister(&nf_ct_zone_extend);
1582 nf_conntrack_labels_fini();
1584 nf_conntrack_helper_fini();
1586 nf_conntrack_timeout_fini();
1588 nf_conntrack_ecache_fini();
1590 nf_conntrack_tstamp_fini();
1592 nf_conntrack_acct_fini();
1594 nf_conntrack_expect_fini();
1599 void nf_conntrack_init_end(void)
1601 /* For use by REJECT target */
1602 RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
1603 RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack);
1605 /* Howto get NAT offsets */
1606 RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
1610 * We need to use special "null" values, not used in hash table
1612 #define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
1613 #define DYING_NULLS_VAL ((1<<30)+1)
1614 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
1616 int nf_conntrack_init_net(struct net *net)
1620 atomic_set(&net->ct.count, 0);
1621 INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL);
1622 INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL);
1623 INIT_HLIST_NULLS_HEAD(&net->ct.tmpl, TEMPLATE_NULLS_VAL);
1624 net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
1625 if (!net->ct.stat) {
1630 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
1631 if (!net->ct.slabname) {
1636 net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
1637 sizeof(struct nf_conn), 0,
1638 SLAB_DESTROY_BY_RCU, NULL);
1639 if (!net->ct.nf_conntrack_cachep) {
1640 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1645 net->ct.htable_size = nf_conntrack_htable_size;
1646 net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1);
1647 if (!net->ct.hash) {
1649 printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
1652 ret = nf_conntrack_expect_pernet_init(net);
1655 ret = nf_conntrack_acct_pernet_init(net);
1658 ret = nf_conntrack_tstamp_pernet_init(net);
1661 ret = nf_conntrack_ecache_pernet_init(net);
1664 ret = nf_conntrack_helper_pernet_init(net);
1667 ret = nf_conntrack_proto_pernet_init(net);
1673 nf_conntrack_helper_pernet_fini(net);
1675 nf_conntrack_ecache_pernet_fini(net);
1677 nf_conntrack_tstamp_pernet_fini(net);
1679 nf_conntrack_acct_pernet_fini(net);
1681 nf_conntrack_expect_pernet_fini(net);
1683 nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
1685 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1687 kfree(net->ct.slabname);
1689 free_percpu(net->ct.stat);
1694 s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
1695 enum ip_conntrack_dir dir,
1697 EXPORT_SYMBOL_GPL(nf_ct_nat_offset);