Merge branch 'misc.namei' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-microblaze.git] / net / netfilter / nf_conntrack_expect.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Expectation handling for nf_conntrack. */
3
4 /* (C) 1999-2001 Paul `Rusty' Russell
5  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
6  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
7  * (c) 2005-2012 Patrick McHardy <kaber@trash.net>
8  */
9
10 #include <linux/types.h>
11 #include <linux/netfilter.h>
12 #include <linux/skbuff.h>
13 #include <linux/proc_fs.h>
14 #include <linux/seq_file.h>
15 #include <linux/stddef.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/percpu.h>
19 #include <linux/kernel.h>
20 #include <linux/siphash.h>
21 #include <linux/moduleparam.h>
22 #include <linux/export.h>
23 #include <net/net_namespace.h>
24 #include <net/netns/hash.h>
25
26 #include <net/netfilter/nf_conntrack.h>
27 #include <net/netfilter/nf_conntrack_core.h>
28 #include <net/netfilter/nf_conntrack_ecache.h>
29 #include <net/netfilter/nf_conntrack_expect.h>
30 #include <net/netfilter/nf_conntrack_helper.h>
31 #include <net/netfilter/nf_conntrack_l4proto.h>
32 #include <net/netfilter/nf_conntrack_tuple.h>
33 #include <net/netfilter/nf_conntrack_zones.h>
34
35 unsigned int nf_ct_expect_hsize __read_mostly;
36 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
37
38 struct hlist_head *nf_ct_expect_hash __read_mostly;
39 EXPORT_SYMBOL_GPL(nf_ct_expect_hash);
40
41 unsigned int nf_ct_expect_max __read_mostly;
42
43 static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
44 static siphash_key_t nf_ct_expect_hashrnd __read_mostly;
45
46 /* nf_conntrack_expect helper functions */
47 void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
48                                 u32 portid, int report)
49 {
50         struct nf_conn_help *master_help = nfct_help(exp->master);
51         struct net *net = nf_ct_exp_net(exp);
52         struct nf_conntrack_net *cnet;
53
54         WARN_ON(!master_help);
55         WARN_ON(timer_pending(&exp->timeout));
56
57         hlist_del_rcu(&exp->hnode);
58
59         cnet = nf_ct_pernet(net);
60         cnet->expect_count--;
61
62         hlist_del_rcu(&exp->lnode);
63         master_help->expecting[exp->class]--;
64
65         nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
66         nf_ct_expect_put(exp);
67
68         NF_CT_STAT_INC(net, expect_delete);
69 }
70 EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
71
72 static void nf_ct_expectation_timed_out(struct timer_list *t)
73 {
74         struct nf_conntrack_expect *exp = from_timer(exp, t, timeout);
75
76         spin_lock_bh(&nf_conntrack_expect_lock);
77         nf_ct_unlink_expect(exp);
78         spin_unlock_bh(&nf_conntrack_expect_lock);
79         nf_ct_expect_put(exp);
80 }
81
82 static unsigned int nf_ct_expect_dst_hash(const struct net *n, const struct nf_conntrack_tuple *tuple)
83 {
84         struct {
85                 union nf_inet_addr dst_addr;
86                 u32 net_mix;
87                 u16 dport;
88                 u8 l3num;
89                 u8 protonum;
90         } __aligned(SIPHASH_ALIGNMENT) combined;
91         u32 hash;
92
93         get_random_once(&nf_ct_expect_hashrnd, sizeof(nf_ct_expect_hashrnd));
94
95         memset(&combined, 0, sizeof(combined));
96
97         combined.dst_addr = tuple->dst.u3;
98         combined.net_mix = net_hash_mix(n);
99         combined.dport = (__force __u16)tuple->dst.u.all;
100         combined.l3num = tuple->src.l3num;
101         combined.protonum = tuple->dst.protonum;
102
103         hash = siphash(&combined, sizeof(combined), &nf_ct_expect_hashrnd);
104
105         return reciprocal_scale(hash, nf_ct_expect_hsize);
106 }
107
108 static bool
109 nf_ct_exp_equal(const struct nf_conntrack_tuple *tuple,
110                 const struct nf_conntrack_expect *i,
111                 const struct nf_conntrack_zone *zone,
112                 const struct net *net)
113 {
114         return nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
115                net_eq(net, nf_ct_net(i->master)) &&
116                nf_ct_zone_equal_any(i->master, zone);
117 }
118
119 bool nf_ct_remove_expect(struct nf_conntrack_expect *exp)
120 {
121         if (del_timer(&exp->timeout)) {
122                 nf_ct_unlink_expect(exp);
123                 nf_ct_expect_put(exp);
124                 return true;
125         }
126         return false;
127 }
128 EXPORT_SYMBOL_GPL(nf_ct_remove_expect);
129
130 struct nf_conntrack_expect *
131 __nf_ct_expect_find(struct net *net,
132                     const struct nf_conntrack_zone *zone,
133                     const struct nf_conntrack_tuple *tuple)
134 {
135         struct nf_conntrack_net *cnet = nf_ct_pernet(net);
136         struct nf_conntrack_expect *i;
137         unsigned int h;
138
139         if (!cnet->expect_count)
140                 return NULL;
141
142         h = nf_ct_expect_dst_hash(net, tuple);
143         hlist_for_each_entry_rcu(i, &nf_ct_expect_hash[h], hnode) {
144                 if (nf_ct_exp_equal(tuple, i, zone, net))
145                         return i;
146         }
147         return NULL;
148 }
149 EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
150
151 /* Just find a expectation corresponding to a tuple. */
152 struct nf_conntrack_expect *
153 nf_ct_expect_find_get(struct net *net,
154                       const struct nf_conntrack_zone *zone,
155                       const struct nf_conntrack_tuple *tuple)
156 {
157         struct nf_conntrack_expect *i;
158
159         rcu_read_lock();
160         i = __nf_ct_expect_find(net, zone, tuple);
161         if (i && !refcount_inc_not_zero(&i->use))
162                 i = NULL;
163         rcu_read_unlock();
164
165         return i;
166 }
167 EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
168
169 /* If an expectation for this connection is found, it gets delete from
170  * global list then returned. */
171 struct nf_conntrack_expect *
172 nf_ct_find_expectation(struct net *net,
173                        const struct nf_conntrack_zone *zone,
174                        const struct nf_conntrack_tuple *tuple)
175 {
176         struct nf_conntrack_net *cnet = nf_ct_pernet(net);
177         struct nf_conntrack_expect *i, *exp = NULL;
178         unsigned int h;
179
180         if (!cnet->expect_count)
181                 return NULL;
182
183         h = nf_ct_expect_dst_hash(net, tuple);
184         hlist_for_each_entry(i, &nf_ct_expect_hash[h], hnode) {
185                 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
186                     nf_ct_exp_equal(tuple, i, zone, net)) {
187                         exp = i;
188                         break;
189                 }
190         }
191         if (!exp)
192                 return NULL;
193
194         /* If master is not in hash table yet (ie. packet hasn't left
195            this machine yet), how can other end know about expected?
196            Hence these are not the droids you are looking for (if
197            master ct never got confirmed, we'd hold a reference to it
198            and weird things would happen to future packets). */
199         if (!nf_ct_is_confirmed(exp->master))
200                 return NULL;
201
202         /* Avoid race with other CPUs, that for exp->master ct, is
203          * about to invoke ->destroy(), or nf_ct_delete() via timeout
204          * or early_drop().
205          *
206          * The atomic_inc_not_zero() check tells:  If that fails, we
207          * know that the ct is being destroyed.  If it succeeds, we
208          * can be sure the ct cannot disappear underneath.
209          */
210         if (unlikely(nf_ct_is_dying(exp->master) ||
211                      !atomic_inc_not_zero(&exp->master->ct_general.use)))
212                 return NULL;
213
214         if (exp->flags & NF_CT_EXPECT_PERMANENT) {
215                 refcount_inc(&exp->use);
216                 return exp;
217         } else if (del_timer(&exp->timeout)) {
218                 nf_ct_unlink_expect(exp);
219                 return exp;
220         }
221         /* Undo exp->master refcnt increase, if del_timer() failed */
222         nf_ct_put(exp->master);
223
224         return NULL;
225 }
226
227 /* delete all expectations for this conntrack */
228 void nf_ct_remove_expectations(struct nf_conn *ct)
229 {
230         struct nf_conn_help *help = nfct_help(ct);
231         struct nf_conntrack_expect *exp;
232         struct hlist_node *next;
233
234         /* Optimization: most connection never expect any others. */
235         if (!help)
236                 return;
237
238         spin_lock_bh(&nf_conntrack_expect_lock);
239         hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
240                 nf_ct_remove_expect(exp);
241         }
242         spin_unlock_bh(&nf_conntrack_expect_lock);
243 }
244 EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
245
246 /* Would two expected things clash? */
247 static inline int expect_clash(const struct nf_conntrack_expect *a,
248                                const struct nf_conntrack_expect *b)
249 {
250         /* Part covered by intersection of masks must be unequal,
251            otherwise they clash */
252         struct nf_conntrack_tuple_mask intersect_mask;
253         int count;
254
255         intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
256
257         for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
258                 intersect_mask.src.u3.all[count] =
259                         a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
260         }
261
262         return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
263                net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
264                nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
265 }
266
267 static inline int expect_matches(const struct nf_conntrack_expect *a,
268                                  const struct nf_conntrack_expect *b)
269 {
270         return nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
271                nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
272                net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
273                nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
274 }
275
276 static bool master_matches(const struct nf_conntrack_expect *a,
277                            const struct nf_conntrack_expect *b,
278                            unsigned int flags)
279 {
280         if (flags & NF_CT_EXP_F_SKIP_MASTER)
281                 return true;
282
283         return a->master == b->master;
284 }
285
286 /* Generally a bad idea to call this: could have matched already. */
287 void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
288 {
289         spin_lock_bh(&nf_conntrack_expect_lock);
290         nf_ct_remove_expect(exp);
291         spin_unlock_bh(&nf_conntrack_expect_lock);
292 }
293 EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
294
295 /* We don't increase the master conntrack refcount for non-fulfilled
296  * conntracks. During the conntrack destruction, the expectations are
297  * always killed before the conntrack itself */
298 struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
299 {
300         struct nf_conntrack_expect *new;
301
302         new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
303         if (!new)
304                 return NULL;
305
306         new->master = me;
307         refcount_set(&new->use, 1);
308         return new;
309 }
310 EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
311
312 void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
313                        u_int8_t family,
314                        const union nf_inet_addr *saddr,
315                        const union nf_inet_addr *daddr,
316                        u_int8_t proto, const __be16 *src, const __be16 *dst)
317 {
318         int len;
319
320         if (family == AF_INET)
321                 len = 4;
322         else
323                 len = 16;
324
325         exp->flags = 0;
326         exp->class = class;
327         exp->expectfn = NULL;
328         exp->helper = NULL;
329         exp->tuple.src.l3num = family;
330         exp->tuple.dst.protonum = proto;
331
332         if (saddr) {
333                 memcpy(&exp->tuple.src.u3, saddr, len);
334                 if (sizeof(exp->tuple.src.u3) > len)
335                         /* address needs to be cleared for nf_ct_tuple_equal */
336                         memset((void *)&exp->tuple.src.u3 + len, 0x00,
337                                sizeof(exp->tuple.src.u3) - len);
338                 memset(&exp->mask.src.u3, 0xFF, len);
339                 if (sizeof(exp->mask.src.u3) > len)
340                         memset((void *)&exp->mask.src.u3 + len, 0x00,
341                                sizeof(exp->mask.src.u3) - len);
342         } else {
343                 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
344                 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
345         }
346
347         if (src) {
348                 exp->tuple.src.u.all = *src;
349                 exp->mask.src.u.all = htons(0xFFFF);
350         } else {
351                 exp->tuple.src.u.all = 0;
352                 exp->mask.src.u.all = 0;
353         }
354
355         memcpy(&exp->tuple.dst.u3, daddr, len);
356         if (sizeof(exp->tuple.dst.u3) > len)
357                 /* address needs to be cleared for nf_ct_tuple_equal */
358                 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
359                        sizeof(exp->tuple.dst.u3) - len);
360
361         exp->tuple.dst.u.all = *dst;
362
363 #if IS_ENABLED(CONFIG_NF_NAT)
364         memset(&exp->saved_addr, 0, sizeof(exp->saved_addr));
365         memset(&exp->saved_proto, 0, sizeof(exp->saved_proto));
366 #endif
367 }
368 EXPORT_SYMBOL_GPL(nf_ct_expect_init);
369
370 static void nf_ct_expect_free_rcu(struct rcu_head *head)
371 {
372         struct nf_conntrack_expect *exp;
373
374         exp = container_of(head, struct nf_conntrack_expect, rcu);
375         kmem_cache_free(nf_ct_expect_cachep, exp);
376 }
377
378 void nf_ct_expect_put(struct nf_conntrack_expect *exp)
379 {
380         if (refcount_dec_and_test(&exp->use))
381                 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
382 }
383 EXPORT_SYMBOL_GPL(nf_ct_expect_put);
384
385 static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
386 {
387         struct nf_conntrack_net *cnet;
388         struct nf_conn_help *master_help = nfct_help(exp->master);
389         struct nf_conntrack_helper *helper;
390         struct net *net = nf_ct_exp_net(exp);
391         unsigned int h = nf_ct_expect_dst_hash(net, &exp->tuple);
392
393         /* two references : one for hash insert, one for the timer */
394         refcount_add(2, &exp->use);
395
396         timer_setup(&exp->timeout, nf_ct_expectation_timed_out, 0);
397         helper = rcu_dereference_protected(master_help->helper,
398                                            lockdep_is_held(&nf_conntrack_expect_lock));
399         if (helper) {
400                 exp->timeout.expires = jiffies +
401                         helper->expect_policy[exp->class].timeout * HZ;
402         }
403         add_timer(&exp->timeout);
404
405         hlist_add_head_rcu(&exp->lnode, &master_help->expectations);
406         master_help->expecting[exp->class]++;
407
408         hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]);
409         cnet = nf_ct_pernet(net);
410         cnet->expect_count++;
411
412         NF_CT_STAT_INC(net, expect_create);
413 }
414
415 /* Race with expectations being used means we could have none to find; OK. */
416 static void evict_oldest_expect(struct nf_conn *master,
417                                 struct nf_conntrack_expect *new)
418 {
419         struct nf_conn_help *master_help = nfct_help(master);
420         struct nf_conntrack_expect *exp, *last = NULL;
421
422         hlist_for_each_entry(exp, &master_help->expectations, lnode) {
423                 if (exp->class == new->class)
424                         last = exp;
425         }
426
427         if (last)
428                 nf_ct_remove_expect(last);
429 }
430
431 static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect,
432                                        unsigned int flags)
433 {
434         const struct nf_conntrack_expect_policy *p;
435         struct nf_conntrack_expect *i;
436         struct nf_conntrack_net *cnet;
437         struct nf_conn *master = expect->master;
438         struct nf_conn_help *master_help = nfct_help(master);
439         struct nf_conntrack_helper *helper;
440         struct net *net = nf_ct_exp_net(expect);
441         struct hlist_node *next;
442         unsigned int h;
443         int ret = 0;
444
445         if (!master_help) {
446                 ret = -ESHUTDOWN;
447                 goto out;
448         }
449         h = nf_ct_expect_dst_hash(net, &expect->tuple);
450         hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
451                 if (master_matches(i, expect, flags) &&
452                     expect_matches(i, expect)) {
453                         if (i->class != expect->class ||
454                             i->master != expect->master)
455                                 return -EALREADY;
456
457                         if (nf_ct_remove_expect(i))
458                                 break;
459                 } else if (expect_clash(i, expect)) {
460                         ret = -EBUSY;
461                         goto out;
462                 }
463         }
464         /* Will be over limit? */
465         helper = rcu_dereference_protected(master_help->helper,
466                                            lockdep_is_held(&nf_conntrack_expect_lock));
467         if (helper) {
468                 p = &helper->expect_policy[expect->class];
469                 if (p->max_expected &&
470                     master_help->expecting[expect->class] >= p->max_expected) {
471                         evict_oldest_expect(master, expect);
472                         if (master_help->expecting[expect->class]
473                                                 >= p->max_expected) {
474                                 ret = -EMFILE;
475                                 goto out;
476                         }
477                 }
478         }
479
480         cnet = nf_ct_pernet(net);
481         if (cnet->expect_count >= nf_ct_expect_max) {
482                 net_warn_ratelimited("nf_conntrack: expectation table full\n");
483                 ret = -EMFILE;
484         }
485 out:
486         return ret;
487 }
488
489 int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
490                                 u32 portid, int report, unsigned int flags)
491 {
492         int ret;
493
494         spin_lock_bh(&nf_conntrack_expect_lock);
495         ret = __nf_ct_expect_check(expect, flags);
496         if (ret < 0)
497                 goto out;
498
499         nf_ct_expect_insert(expect);
500
501         spin_unlock_bh(&nf_conntrack_expect_lock);
502         nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report);
503         return 0;
504 out:
505         spin_unlock_bh(&nf_conntrack_expect_lock);
506         return ret;
507 }
508 EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
509
510 void nf_ct_expect_iterate_destroy(bool (*iter)(struct nf_conntrack_expect *e, void *data),
511                                   void *data)
512 {
513         struct nf_conntrack_expect *exp;
514         const struct hlist_node *next;
515         unsigned int i;
516
517         spin_lock_bh(&nf_conntrack_expect_lock);
518
519         for (i = 0; i < nf_ct_expect_hsize; i++) {
520                 hlist_for_each_entry_safe(exp, next,
521                                           &nf_ct_expect_hash[i],
522                                           hnode) {
523                         if (iter(exp, data) && del_timer(&exp->timeout)) {
524                                 nf_ct_unlink_expect(exp);
525                                 nf_ct_expect_put(exp);
526                         }
527                 }
528         }
529
530         spin_unlock_bh(&nf_conntrack_expect_lock);
531 }
532 EXPORT_SYMBOL_GPL(nf_ct_expect_iterate_destroy);
533
534 void nf_ct_expect_iterate_net(struct net *net,
535                               bool (*iter)(struct nf_conntrack_expect *e, void *data),
536                               void *data,
537                               u32 portid, int report)
538 {
539         struct nf_conntrack_expect *exp;
540         const struct hlist_node *next;
541         unsigned int i;
542
543         spin_lock_bh(&nf_conntrack_expect_lock);
544
545         for (i = 0; i < nf_ct_expect_hsize; i++) {
546                 hlist_for_each_entry_safe(exp, next,
547                                           &nf_ct_expect_hash[i],
548                                           hnode) {
549
550                         if (!net_eq(nf_ct_exp_net(exp), net))
551                                 continue;
552
553                         if (iter(exp, data) && del_timer(&exp->timeout)) {
554                                 nf_ct_unlink_expect_report(exp, portid, report);
555                                 nf_ct_expect_put(exp);
556                         }
557                 }
558         }
559
560         spin_unlock_bh(&nf_conntrack_expect_lock);
561 }
562 EXPORT_SYMBOL_GPL(nf_ct_expect_iterate_net);
563
564 #ifdef CONFIG_NF_CONNTRACK_PROCFS
565 struct ct_expect_iter_state {
566         struct seq_net_private p;
567         unsigned int bucket;
568 };
569
570 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
571 {
572         struct ct_expect_iter_state *st = seq->private;
573         struct hlist_node *n;
574
575         for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
576                 n = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
577                 if (n)
578                         return n;
579         }
580         return NULL;
581 }
582
583 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
584                                              struct hlist_node *head)
585 {
586         struct ct_expect_iter_state *st = seq->private;
587
588         head = rcu_dereference(hlist_next_rcu(head));
589         while (head == NULL) {
590                 if (++st->bucket >= nf_ct_expect_hsize)
591                         return NULL;
592                 head = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
593         }
594         return head;
595 }
596
597 static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
598 {
599         struct hlist_node *head = ct_expect_get_first(seq);
600
601         if (head)
602                 while (pos && (head = ct_expect_get_next(seq, head)))
603                         pos--;
604         return pos ? NULL : head;
605 }
606
607 static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
608         __acquires(RCU)
609 {
610         rcu_read_lock();
611         return ct_expect_get_idx(seq, *pos);
612 }
613
614 static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
615 {
616         (*pos)++;
617         return ct_expect_get_next(seq, v);
618 }
619
620 static void exp_seq_stop(struct seq_file *seq, void *v)
621         __releases(RCU)
622 {
623         rcu_read_unlock();
624 }
625
626 static int exp_seq_show(struct seq_file *s, void *v)
627 {
628         struct nf_conntrack_expect *expect;
629         struct nf_conntrack_helper *helper;
630         struct hlist_node *n = v;
631         char *delim = "";
632
633         expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
634
635         if (expect->timeout.function)
636                 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
637                            ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
638         else
639                 seq_puts(s, "- ");
640         seq_printf(s, "l3proto = %u proto=%u ",
641                    expect->tuple.src.l3num,
642                    expect->tuple.dst.protonum);
643         print_tuple(s, &expect->tuple,
644                     nf_ct_l4proto_find(expect->tuple.dst.protonum));
645
646         if (expect->flags & NF_CT_EXPECT_PERMANENT) {
647                 seq_puts(s, "PERMANENT");
648                 delim = ",";
649         }
650         if (expect->flags & NF_CT_EXPECT_INACTIVE) {
651                 seq_printf(s, "%sINACTIVE", delim);
652                 delim = ",";
653         }
654         if (expect->flags & NF_CT_EXPECT_USERSPACE)
655                 seq_printf(s, "%sUSERSPACE", delim);
656
657         helper = rcu_dereference(nfct_help(expect->master)->helper);
658         if (helper) {
659                 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
660                 if (helper->expect_policy[expect->class].name[0])
661                         seq_printf(s, "/%s",
662                                    helper->expect_policy[expect->class].name);
663         }
664
665         seq_putc(s, '\n');
666
667         return 0;
668 }
669
670 static const struct seq_operations exp_seq_ops = {
671         .start = exp_seq_start,
672         .next = exp_seq_next,
673         .stop = exp_seq_stop,
674         .show = exp_seq_show
675 };
676 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
677
678 static int exp_proc_init(struct net *net)
679 {
680 #ifdef CONFIG_NF_CONNTRACK_PROCFS
681         struct proc_dir_entry *proc;
682         kuid_t root_uid;
683         kgid_t root_gid;
684
685         proc = proc_create_net("nf_conntrack_expect", 0440, net->proc_net,
686                         &exp_seq_ops, sizeof(struct ct_expect_iter_state));
687         if (!proc)
688                 return -ENOMEM;
689
690         root_uid = make_kuid(net->user_ns, 0);
691         root_gid = make_kgid(net->user_ns, 0);
692         if (uid_valid(root_uid) && gid_valid(root_gid))
693                 proc_set_user(proc, root_uid, root_gid);
694 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
695         return 0;
696 }
697
698 static void exp_proc_remove(struct net *net)
699 {
700 #ifdef CONFIG_NF_CONNTRACK_PROCFS
701         remove_proc_entry("nf_conntrack_expect", net->proc_net);
702 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
703 }
704
705 module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
706
707 int nf_conntrack_expect_pernet_init(struct net *net)
708 {
709         return exp_proc_init(net);
710 }
711
712 void nf_conntrack_expect_pernet_fini(struct net *net)
713 {
714         exp_proc_remove(net);
715 }
716
717 int nf_conntrack_expect_init(void)
718 {
719         if (!nf_ct_expect_hsize) {
720                 nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
721                 if (!nf_ct_expect_hsize)
722                         nf_ct_expect_hsize = 1;
723         }
724         nf_ct_expect_max = nf_ct_expect_hsize * 4;
725         nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
726                                 sizeof(struct nf_conntrack_expect),
727                                 0, 0, NULL);
728         if (!nf_ct_expect_cachep)
729                 return -ENOMEM;
730
731         nf_ct_expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
732         if (!nf_ct_expect_hash) {
733                 kmem_cache_destroy(nf_ct_expect_cachep);
734                 return -ENOMEM;
735         }
736
737         return 0;
738 }
739
740 void nf_conntrack_expect_fini(void)
741 {
742         rcu_barrier(); /* Wait for call_rcu() before destroy */
743         kmem_cache_destroy(nf_ct_expect_cachep);
744         kvfree(nf_ct_expect_hash);
745 }