1 /* Expectation handling for nf_conntrack. */
3 /* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
6 * (c) 2005-2012 Patrick McHardy <kaber@trash.net>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/types.h>
14 #include <linux/netfilter.h>
15 #include <linux/skbuff.h>
16 #include <linux/proc_fs.h>
17 #include <linux/seq_file.h>
18 #include <linux/stddef.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/percpu.h>
22 #include <linux/kernel.h>
23 #include <linux/jhash.h>
24 #include <linux/moduleparam.h>
25 #include <linux/export.h>
26 #include <net/net_namespace.h>
27 #include <net/netns/hash.h>
29 #include <net/netfilter/nf_conntrack.h>
30 #include <net/netfilter/nf_conntrack_core.h>
31 #include <net/netfilter/nf_conntrack_expect.h>
32 #include <net/netfilter/nf_conntrack_helper.h>
33 #include <net/netfilter/nf_conntrack_tuple.h>
34 #include <net/netfilter/nf_conntrack_zones.h>
36 unsigned int nf_ct_expect_hsize __read_mostly;
37 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
39 struct hlist_head *nf_ct_expect_hash __read_mostly;
40 EXPORT_SYMBOL_GPL(nf_ct_expect_hash);
42 unsigned int nf_ct_expect_max __read_mostly;
44 static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
45 static unsigned int nf_ct_expect_hashrnd __read_mostly;
47 /* nf_conntrack_expect helper functions */
48 void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
49 u32 portid, int report)
51 struct nf_conn_help *master_help = nfct_help(exp->master);
52 struct net *net = nf_ct_exp_net(exp);
54 WARN_ON(!master_help);
55 WARN_ON(timer_pending(&exp->timeout));
57 hlist_del_rcu(&exp->hnode);
58 net->ct.expect_count--;
60 hlist_del_rcu(&exp->lnode);
61 master_help->expecting[exp->class]--;
63 nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
64 nf_ct_expect_put(exp);
66 NF_CT_STAT_INC(net, expect_delete);
68 EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
70 static void nf_ct_expectation_timed_out(struct timer_list *t)
72 struct nf_conntrack_expect *exp = from_timer(exp, t, timeout);
74 spin_lock_bh(&nf_conntrack_expect_lock);
75 nf_ct_unlink_expect(exp);
76 spin_unlock_bh(&nf_conntrack_expect_lock);
77 nf_ct_expect_put(exp);
80 static unsigned int nf_ct_expect_dst_hash(const struct net *n, const struct nf_conntrack_tuple *tuple)
82 unsigned int hash, seed;
84 get_random_once(&nf_ct_expect_hashrnd, sizeof(nf_ct_expect_hashrnd));
86 seed = nf_ct_expect_hashrnd ^ net_hash_mix(n);
88 hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
89 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
90 (__force __u16)tuple->dst.u.all) ^ seed);
92 return reciprocal_scale(hash, nf_ct_expect_hsize);
96 nf_ct_exp_equal(const struct nf_conntrack_tuple *tuple,
97 const struct nf_conntrack_expect *i,
98 const struct nf_conntrack_zone *zone,
99 const struct net *net)
101 return nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
102 net_eq(net, nf_ct_net(i->master)) &&
103 nf_ct_zone_equal_any(i->master, zone);
106 bool nf_ct_remove_expect(struct nf_conntrack_expect *exp)
108 if (del_timer(&exp->timeout)) {
109 nf_ct_unlink_expect(exp);
110 nf_ct_expect_put(exp);
115 EXPORT_SYMBOL_GPL(nf_ct_remove_expect);
117 struct nf_conntrack_expect *
118 __nf_ct_expect_find(struct net *net,
119 const struct nf_conntrack_zone *zone,
120 const struct nf_conntrack_tuple *tuple)
122 struct nf_conntrack_expect *i;
125 if (!net->ct.expect_count)
128 h = nf_ct_expect_dst_hash(net, tuple);
129 hlist_for_each_entry_rcu(i, &nf_ct_expect_hash[h], hnode) {
130 if (nf_ct_exp_equal(tuple, i, zone, net))
135 EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
137 /* Just find a expectation corresponding to a tuple. */
138 struct nf_conntrack_expect *
139 nf_ct_expect_find_get(struct net *net,
140 const struct nf_conntrack_zone *zone,
141 const struct nf_conntrack_tuple *tuple)
143 struct nf_conntrack_expect *i;
146 i = __nf_ct_expect_find(net, zone, tuple);
147 if (i && !refcount_inc_not_zero(&i->use))
153 EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
155 /* If an expectation for this connection is found, it gets delete from
156 * global list then returned. */
157 struct nf_conntrack_expect *
158 nf_ct_find_expectation(struct net *net,
159 const struct nf_conntrack_zone *zone,
160 const struct nf_conntrack_tuple *tuple)
162 struct nf_conntrack_expect *i, *exp = NULL;
165 if (!net->ct.expect_count)
168 h = nf_ct_expect_dst_hash(net, tuple);
169 hlist_for_each_entry(i, &nf_ct_expect_hash[h], hnode) {
170 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
171 nf_ct_exp_equal(tuple, i, zone, net)) {
179 /* If master is not in hash table yet (ie. packet hasn't left
180 this machine yet), how can other end know about expected?
181 Hence these are not the droids you are looking for (if
182 master ct never got confirmed, we'd hold a reference to it
183 and weird things would happen to future packets). */
184 if (!nf_ct_is_confirmed(exp->master))
187 /* Avoid race with other CPUs, that for exp->master ct, is
188 * about to invoke ->destroy(), or nf_ct_delete() via timeout
191 * The atomic_inc_not_zero() check tells: If that fails, we
192 * know that the ct is being destroyed. If it succeeds, we
193 * can be sure the ct cannot disappear underneath.
195 if (unlikely(nf_ct_is_dying(exp->master) ||
196 !atomic_inc_not_zero(&exp->master->ct_general.use)))
199 if (exp->flags & NF_CT_EXPECT_PERMANENT) {
200 refcount_inc(&exp->use);
202 } else if (del_timer(&exp->timeout)) {
203 nf_ct_unlink_expect(exp);
206 /* Undo exp->master refcnt increase, if del_timer() failed */
207 nf_ct_put(exp->master);
212 /* delete all expectations for this conntrack */
213 void nf_ct_remove_expectations(struct nf_conn *ct)
215 struct nf_conn_help *help = nfct_help(ct);
216 struct nf_conntrack_expect *exp;
217 struct hlist_node *next;
219 /* Optimization: most connection never expect any others. */
223 spin_lock_bh(&nf_conntrack_expect_lock);
224 hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
225 nf_ct_remove_expect(exp);
227 spin_unlock_bh(&nf_conntrack_expect_lock);
229 EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
231 /* Would two expected things clash? */
232 static inline int expect_clash(const struct nf_conntrack_expect *a,
233 const struct nf_conntrack_expect *b)
235 /* Part covered by intersection of masks must be unequal,
236 otherwise they clash */
237 struct nf_conntrack_tuple_mask intersect_mask;
240 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
242 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
243 intersect_mask.src.u3.all[count] =
244 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
247 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
248 net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
249 nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
252 static inline int expect_matches(const struct nf_conntrack_expect *a,
253 const struct nf_conntrack_expect *b)
255 return a->master == b->master && a->class == b->class &&
256 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
257 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
258 net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
259 nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
262 /* Generally a bad idea to call this: could have matched already. */
263 void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
265 spin_lock_bh(&nf_conntrack_expect_lock);
266 nf_ct_remove_expect(exp);
267 spin_unlock_bh(&nf_conntrack_expect_lock);
269 EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
271 /* We don't increase the master conntrack refcount for non-fulfilled
272 * conntracks. During the conntrack destruction, the expectations are
273 * always killed before the conntrack itself */
274 struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
276 struct nf_conntrack_expect *new;
278 new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
283 refcount_set(&new->use, 1);
286 EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
288 void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
290 const union nf_inet_addr *saddr,
291 const union nf_inet_addr *daddr,
292 u_int8_t proto, const __be16 *src, const __be16 *dst)
296 if (family == AF_INET)
303 exp->expectfn = NULL;
305 exp->tuple.src.l3num = family;
306 exp->tuple.dst.protonum = proto;
309 memcpy(&exp->tuple.src.u3, saddr, len);
310 if (sizeof(exp->tuple.src.u3) > len)
311 /* address needs to be cleared for nf_ct_tuple_equal */
312 memset((void *)&exp->tuple.src.u3 + len, 0x00,
313 sizeof(exp->tuple.src.u3) - len);
314 memset(&exp->mask.src.u3, 0xFF, len);
315 if (sizeof(exp->mask.src.u3) > len)
316 memset((void *)&exp->mask.src.u3 + len, 0x00,
317 sizeof(exp->mask.src.u3) - len);
319 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
320 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
324 exp->tuple.src.u.all = *src;
325 exp->mask.src.u.all = htons(0xFFFF);
327 exp->tuple.src.u.all = 0;
328 exp->mask.src.u.all = 0;
331 memcpy(&exp->tuple.dst.u3, daddr, len);
332 if (sizeof(exp->tuple.dst.u3) > len)
333 /* address needs to be cleared for nf_ct_tuple_equal */
334 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
335 sizeof(exp->tuple.dst.u3) - len);
337 exp->tuple.dst.u.all = *dst;
339 #ifdef CONFIG_NF_NAT_NEEDED
340 memset(&exp->saved_addr, 0, sizeof(exp->saved_addr));
341 memset(&exp->saved_proto, 0, sizeof(exp->saved_proto));
344 EXPORT_SYMBOL_GPL(nf_ct_expect_init);
346 static void nf_ct_expect_free_rcu(struct rcu_head *head)
348 struct nf_conntrack_expect *exp;
350 exp = container_of(head, struct nf_conntrack_expect, rcu);
351 kmem_cache_free(nf_ct_expect_cachep, exp);
354 void nf_ct_expect_put(struct nf_conntrack_expect *exp)
356 if (refcount_dec_and_test(&exp->use))
357 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
359 EXPORT_SYMBOL_GPL(nf_ct_expect_put);
361 static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
363 struct nf_conn_help *master_help = nfct_help(exp->master);
364 struct nf_conntrack_helper *helper;
365 struct net *net = nf_ct_exp_net(exp);
366 unsigned int h = nf_ct_expect_dst_hash(net, &exp->tuple);
368 /* two references : one for hash insert, one for the timer */
369 refcount_add(2, &exp->use);
371 timer_setup(&exp->timeout, nf_ct_expectation_timed_out, 0);
372 helper = rcu_dereference_protected(master_help->helper,
373 lockdep_is_held(&nf_conntrack_expect_lock));
375 exp->timeout.expires = jiffies +
376 helper->expect_policy[exp->class].timeout * HZ;
378 add_timer(&exp->timeout);
380 hlist_add_head_rcu(&exp->lnode, &master_help->expectations);
381 master_help->expecting[exp->class]++;
383 hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]);
384 net->ct.expect_count++;
386 NF_CT_STAT_INC(net, expect_create);
389 /* Race with expectations being used means we could have none to find; OK. */
390 static void evict_oldest_expect(struct nf_conn *master,
391 struct nf_conntrack_expect *new)
393 struct nf_conn_help *master_help = nfct_help(master);
394 struct nf_conntrack_expect *exp, *last = NULL;
396 hlist_for_each_entry(exp, &master_help->expectations, lnode) {
397 if (exp->class == new->class)
402 nf_ct_remove_expect(last);
405 static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
407 const struct nf_conntrack_expect_policy *p;
408 struct nf_conntrack_expect *i;
409 struct nf_conn *master = expect->master;
410 struct nf_conn_help *master_help = nfct_help(master);
411 struct nf_conntrack_helper *helper;
412 struct net *net = nf_ct_exp_net(expect);
413 struct hlist_node *next;
421 h = nf_ct_expect_dst_hash(net, &expect->tuple);
422 hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
423 if (expect_matches(i, expect)) {
424 if (nf_ct_remove_expect(i))
426 } else if (expect_clash(i, expect)) {
431 /* Will be over limit? */
432 helper = rcu_dereference_protected(master_help->helper,
433 lockdep_is_held(&nf_conntrack_expect_lock));
435 p = &helper->expect_policy[expect->class];
436 if (p->max_expected &&
437 master_help->expecting[expect->class] >= p->max_expected) {
438 evict_oldest_expect(master, expect);
439 if (master_help->expecting[expect->class]
440 >= p->max_expected) {
447 if (net->ct.expect_count >= nf_ct_expect_max) {
448 net_warn_ratelimited("nf_conntrack: expectation table full\n");
455 int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
456 u32 portid, int report)
460 spin_lock_bh(&nf_conntrack_expect_lock);
461 ret = __nf_ct_expect_check(expect);
465 nf_ct_expect_insert(expect);
467 spin_unlock_bh(&nf_conntrack_expect_lock);
468 nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report);
471 spin_unlock_bh(&nf_conntrack_expect_lock);
474 EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
476 void nf_ct_expect_iterate_destroy(bool (*iter)(struct nf_conntrack_expect *e, void *data),
479 struct nf_conntrack_expect *exp;
480 const struct hlist_node *next;
483 spin_lock_bh(&nf_conntrack_expect_lock);
485 for (i = 0; i < nf_ct_expect_hsize; i++) {
486 hlist_for_each_entry_safe(exp, next,
487 &nf_ct_expect_hash[i],
489 if (iter(exp, data) && del_timer(&exp->timeout)) {
490 nf_ct_unlink_expect(exp);
491 nf_ct_expect_put(exp);
496 spin_unlock_bh(&nf_conntrack_expect_lock);
498 EXPORT_SYMBOL_GPL(nf_ct_expect_iterate_destroy);
500 void nf_ct_expect_iterate_net(struct net *net,
501 bool (*iter)(struct nf_conntrack_expect *e, void *data),
503 u32 portid, int report)
505 struct nf_conntrack_expect *exp;
506 const struct hlist_node *next;
509 spin_lock_bh(&nf_conntrack_expect_lock);
511 for (i = 0; i < nf_ct_expect_hsize; i++) {
512 hlist_for_each_entry_safe(exp, next,
513 &nf_ct_expect_hash[i],
516 if (!net_eq(nf_ct_exp_net(exp), net))
519 if (iter(exp, data) && del_timer(&exp->timeout)) {
520 nf_ct_unlink_expect_report(exp, portid, report);
521 nf_ct_expect_put(exp);
526 spin_unlock_bh(&nf_conntrack_expect_lock);
528 EXPORT_SYMBOL_GPL(nf_ct_expect_iterate_net);
530 #ifdef CONFIG_NF_CONNTRACK_PROCFS
531 struct ct_expect_iter_state {
532 struct seq_net_private p;
536 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
538 struct ct_expect_iter_state *st = seq->private;
539 struct hlist_node *n;
541 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
542 n = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
549 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
550 struct hlist_node *head)
552 struct ct_expect_iter_state *st = seq->private;
554 head = rcu_dereference(hlist_next_rcu(head));
555 while (head == NULL) {
556 if (++st->bucket >= nf_ct_expect_hsize)
558 head = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
563 static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
565 struct hlist_node *head = ct_expect_get_first(seq);
568 while (pos && (head = ct_expect_get_next(seq, head)))
570 return pos ? NULL : head;
573 static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
577 return ct_expect_get_idx(seq, *pos);
580 static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
583 return ct_expect_get_next(seq, v);
586 static void exp_seq_stop(struct seq_file *seq, void *v)
592 static int exp_seq_show(struct seq_file *s, void *v)
594 struct nf_conntrack_expect *expect;
595 struct nf_conntrack_helper *helper;
596 struct hlist_node *n = v;
599 expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
601 if (expect->timeout.function)
602 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
603 ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
606 seq_printf(s, "l3proto = %u proto=%u ",
607 expect->tuple.src.l3num,
608 expect->tuple.dst.protonum);
609 print_tuple(s, &expect->tuple,
610 __nf_ct_l3proto_find(expect->tuple.src.l3num),
611 __nf_ct_l4proto_find(expect->tuple.src.l3num,
612 expect->tuple.dst.protonum));
614 if (expect->flags & NF_CT_EXPECT_PERMANENT) {
615 seq_puts(s, "PERMANENT");
618 if (expect->flags & NF_CT_EXPECT_INACTIVE) {
619 seq_printf(s, "%sINACTIVE", delim);
622 if (expect->flags & NF_CT_EXPECT_USERSPACE)
623 seq_printf(s, "%sUSERSPACE", delim);
625 helper = rcu_dereference(nfct_help(expect->master)->helper);
627 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
628 if (helper->expect_policy[expect->class].name[0])
630 helper->expect_policy[expect->class].name);
638 static const struct seq_operations exp_seq_ops = {
639 .start = exp_seq_start,
640 .next = exp_seq_next,
641 .stop = exp_seq_stop,
645 static int exp_open(struct inode *inode, struct file *file)
647 return seq_open_net(inode, file, &exp_seq_ops,
648 sizeof(struct ct_expect_iter_state));
651 static const struct file_operations exp_file_ops = {
655 .release = seq_release_net,
657 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
659 static int exp_proc_init(struct net *net)
661 #ifdef CONFIG_NF_CONNTRACK_PROCFS
662 struct proc_dir_entry *proc;
666 proc = proc_create("nf_conntrack_expect", 0440, net->proc_net,
671 root_uid = make_kuid(net->user_ns, 0);
672 root_gid = make_kgid(net->user_ns, 0);
673 if (uid_valid(root_uid) && gid_valid(root_gid))
674 proc_set_user(proc, root_uid, root_gid);
675 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
679 static void exp_proc_remove(struct net *net)
681 #ifdef CONFIG_NF_CONNTRACK_PROCFS
682 remove_proc_entry("nf_conntrack_expect", net->proc_net);
683 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
686 module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
688 int nf_conntrack_expect_pernet_init(struct net *net)
690 net->ct.expect_count = 0;
691 return exp_proc_init(net);
694 void nf_conntrack_expect_pernet_fini(struct net *net)
696 exp_proc_remove(net);
699 int nf_conntrack_expect_init(void)
701 if (!nf_ct_expect_hsize) {
702 nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
703 if (!nf_ct_expect_hsize)
704 nf_ct_expect_hsize = 1;
706 nf_ct_expect_max = nf_ct_expect_hsize * 4;
707 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
708 sizeof(struct nf_conntrack_expect),
710 if (!nf_ct_expect_cachep)
713 nf_ct_expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
714 if (!nf_ct_expect_hash) {
715 kmem_cache_destroy(nf_ct_expect_cachep);
722 void nf_conntrack_expect_fini(void)
724 rcu_barrier(); /* Wait for call_rcu() before destroy */
725 kmem_cache_destroy(nf_ct_expect_cachep);
726 nf_ct_free_hashtable(nf_ct_expect_hash, nf_ct_expect_hsize);