netfilter: nft_socket: only do sk lookups when indev is available
[linux-2.6-microblaze.git] / net / netfilter / nf_flow_table_core.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/netfilter.h>
6 #include <linux/rhashtable.h>
7 #include <linux/netdevice.h>
8 #include <net/ip.h>
9 #include <net/ip6_route.h>
10 #include <net/netfilter/nf_tables.h>
11 #include <net/netfilter/nf_flow_table.h>
12 #include <net/netfilter/nf_conntrack.h>
13 #include <net/netfilter/nf_conntrack_core.h>
14 #include <net/netfilter/nf_conntrack_l4proto.h>
15 #include <net/netfilter/nf_conntrack_tuple.h>
16
17 static DEFINE_MUTEX(flowtable_lock);
18 static LIST_HEAD(flowtables);
19
20 static void
21 flow_offload_fill_dir(struct flow_offload *flow,
22                       enum flow_offload_tuple_dir dir)
23 {
24         struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
25         struct nf_conntrack_tuple *ctt = &flow->ct->tuplehash[dir].tuple;
26
27         ft->dir = dir;
28
29         switch (ctt->src.l3num) {
30         case NFPROTO_IPV4:
31                 ft->src_v4 = ctt->src.u3.in;
32                 ft->dst_v4 = ctt->dst.u3.in;
33                 break;
34         case NFPROTO_IPV6:
35                 ft->src_v6 = ctt->src.u3.in6;
36                 ft->dst_v6 = ctt->dst.u3.in6;
37                 break;
38         }
39
40         ft->l3proto = ctt->src.l3num;
41         ft->l4proto = ctt->dst.protonum;
42
43         switch (ctt->dst.protonum) {
44         case IPPROTO_TCP:
45         case IPPROTO_UDP:
46                 ft->src_port = ctt->src.u.tcp.port;
47                 ft->dst_port = ctt->dst.u.tcp.port;
48                 break;
49         }
50 }
51
52 struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
53 {
54         struct flow_offload *flow;
55
56         if (unlikely(nf_ct_is_dying(ct) ||
57             !refcount_inc_not_zero(&ct->ct_general.use)))
58                 return NULL;
59
60         flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
61         if (!flow)
62                 goto err_ct_refcnt;
63
64         flow->ct = ct;
65
66         flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
67         flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY);
68
69         if (ct->status & IPS_SRC_NAT)
70                 __set_bit(NF_FLOW_SNAT, &flow->flags);
71         if (ct->status & IPS_DST_NAT)
72                 __set_bit(NF_FLOW_DNAT, &flow->flags);
73
74         return flow;
75
76 err_ct_refcnt:
77         nf_ct_put(ct);
78
79         return NULL;
80 }
81 EXPORT_SYMBOL_GPL(flow_offload_alloc);
82
83 static u32 flow_offload_dst_cookie(struct flow_offload_tuple *flow_tuple)
84 {
85         const struct rt6_info *rt;
86
87         if (flow_tuple->l3proto == NFPROTO_IPV6) {
88                 rt = (const struct rt6_info *)flow_tuple->dst_cache;
89                 return rt6_get_cookie(rt);
90         }
91
92         return 0;
93 }
94
95 static int flow_offload_fill_route(struct flow_offload *flow,
96                                    const struct nf_flow_route *route,
97                                    enum flow_offload_tuple_dir dir)
98 {
99         struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
100         struct dst_entry *dst = route->tuple[dir].dst;
101         int i, j = 0;
102
103         switch (flow_tuple->l3proto) {
104         case NFPROTO_IPV4:
105                 flow_tuple->mtu = ip_dst_mtu_maybe_forward(dst, true);
106                 break;
107         case NFPROTO_IPV6:
108                 flow_tuple->mtu = ip6_dst_mtu_maybe_forward(dst, true);
109                 break;
110         }
111
112         flow_tuple->iifidx = route->tuple[dir].in.ifindex;
113         for (i = route->tuple[dir].in.num_encaps - 1; i >= 0; i--) {
114                 flow_tuple->encap[j].id = route->tuple[dir].in.encap[i].id;
115                 flow_tuple->encap[j].proto = route->tuple[dir].in.encap[i].proto;
116                 if (route->tuple[dir].in.ingress_vlans & BIT(i))
117                         flow_tuple->in_vlan_ingress |= BIT(j);
118                 j++;
119         }
120         flow_tuple->encap_num = route->tuple[dir].in.num_encaps;
121
122         switch (route->tuple[dir].xmit_type) {
123         case FLOW_OFFLOAD_XMIT_DIRECT:
124                 memcpy(flow_tuple->out.h_dest, route->tuple[dir].out.h_dest,
125                        ETH_ALEN);
126                 memcpy(flow_tuple->out.h_source, route->tuple[dir].out.h_source,
127                        ETH_ALEN);
128                 flow_tuple->out.ifidx = route->tuple[dir].out.ifindex;
129                 flow_tuple->out.hw_ifidx = route->tuple[dir].out.hw_ifindex;
130                 break;
131         case FLOW_OFFLOAD_XMIT_XFRM:
132         case FLOW_OFFLOAD_XMIT_NEIGH:
133                 if (!dst_hold_safe(route->tuple[dir].dst))
134                         return -1;
135
136                 flow_tuple->dst_cache = dst;
137                 flow_tuple->dst_cookie = flow_offload_dst_cookie(flow_tuple);
138                 break;
139         default:
140                 WARN_ON_ONCE(1);
141                 break;
142         }
143         flow_tuple->xmit_type = route->tuple[dir].xmit_type;
144
145         return 0;
146 }
147
148 static void nft_flow_dst_release(struct flow_offload *flow,
149                                  enum flow_offload_tuple_dir dir)
150 {
151         if (flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
152             flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)
153                 dst_release(flow->tuplehash[dir].tuple.dst_cache);
154 }
155
156 int flow_offload_route_init(struct flow_offload *flow,
157                             const struct nf_flow_route *route)
158 {
159         int err;
160
161         err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
162         if (err < 0)
163                 return err;
164
165         err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
166         if (err < 0)
167                 goto err_route_reply;
168
169         flow->type = NF_FLOW_OFFLOAD_ROUTE;
170
171         return 0;
172
173 err_route_reply:
174         nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
175
176         return err;
177 }
178 EXPORT_SYMBOL_GPL(flow_offload_route_init);
179
180 static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
181 {
182         tcp->state = TCP_CONNTRACK_ESTABLISHED;
183         tcp->seen[0].td_maxwin = 0;
184         tcp->seen[1].td_maxwin = 0;
185 }
186
187 static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
188 {
189         struct net *net = nf_ct_net(ct);
190         int l4num = nf_ct_protonum(ct);
191         s32 timeout;
192
193         if (l4num == IPPROTO_TCP) {
194                 struct nf_tcp_net *tn = nf_tcp_pernet(net);
195
196                 timeout = tn->timeouts[TCP_CONNTRACK_ESTABLISHED];
197                 timeout -= tn->offload_timeout;
198         } else if (l4num == IPPROTO_UDP) {
199                 struct nf_udp_net *tn = nf_udp_pernet(net);
200
201                 timeout = tn->timeouts[UDP_CT_REPLIED];
202                 timeout -= tn->offload_timeout;
203         } else {
204                 return;
205         }
206
207         if (timeout < 0)
208                 timeout = 0;
209
210         if (nf_flow_timeout_delta(READ_ONCE(ct->timeout)) > (__s32)timeout)
211                 WRITE_ONCE(ct->timeout, nfct_time_stamp + timeout);
212 }
213
214 static void flow_offload_fixup_ct_state(struct nf_conn *ct)
215 {
216         if (nf_ct_protonum(ct) == IPPROTO_TCP)
217                 flow_offload_fixup_tcp(&ct->proto.tcp);
218 }
219
220 static void flow_offload_fixup_ct(struct nf_conn *ct)
221 {
222         flow_offload_fixup_ct_state(ct);
223         flow_offload_fixup_ct_timeout(ct);
224 }
225
226 static void flow_offload_route_release(struct flow_offload *flow)
227 {
228         nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
229         nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_REPLY);
230 }
231
232 void flow_offload_free(struct flow_offload *flow)
233 {
234         switch (flow->type) {
235         case NF_FLOW_OFFLOAD_ROUTE:
236                 flow_offload_route_release(flow);
237                 break;
238         default:
239                 break;
240         }
241         nf_ct_put(flow->ct);
242         kfree_rcu(flow, rcu_head);
243 }
244 EXPORT_SYMBOL_GPL(flow_offload_free);
245
246 static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
247 {
248         const struct flow_offload_tuple *tuple = data;
249
250         return jhash(tuple, offsetof(struct flow_offload_tuple, __hash), seed);
251 }
252
253 static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
254 {
255         const struct flow_offload_tuple_rhash *tuplehash = data;
256
257         return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, __hash), seed);
258 }
259
260 static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
261                                         const void *ptr)
262 {
263         const struct flow_offload_tuple *tuple = arg->key;
264         const struct flow_offload_tuple_rhash *x = ptr;
265
266         if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, __hash)))
267                 return 1;
268
269         return 0;
270 }
271
272 static const struct rhashtable_params nf_flow_offload_rhash_params = {
273         .head_offset            = offsetof(struct flow_offload_tuple_rhash, node),
274         .hashfn                 = flow_offload_hash,
275         .obj_hashfn             = flow_offload_hash_obj,
276         .obj_cmpfn              = flow_offload_hash_cmp,
277         .automatic_shrinking    = true,
278 };
279
280 unsigned long flow_offload_get_timeout(struct flow_offload *flow)
281 {
282         unsigned long timeout = NF_FLOW_TIMEOUT;
283         struct net *net = nf_ct_net(flow->ct);
284         int l4num = nf_ct_protonum(flow->ct);
285
286         if (l4num == IPPROTO_TCP) {
287                 struct nf_tcp_net *tn = nf_tcp_pernet(net);
288
289                 timeout = tn->offload_timeout;
290         } else if (l4num == IPPROTO_UDP) {
291                 struct nf_udp_net *tn = nf_udp_pernet(net);
292
293                 timeout = tn->offload_timeout;
294         }
295
296         return timeout;
297 }
298
299 int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
300 {
301         int err;
302
303         flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
304
305         err = rhashtable_insert_fast(&flow_table->rhashtable,
306                                      &flow->tuplehash[0].node,
307                                      nf_flow_offload_rhash_params);
308         if (err < 0)
309                 return err;
310
311         err = rhashtable_insert_fast(&flow_table->rhashtable,
312                                      &flow->tuplehash[1].node,
313                                      nf_flow_offload_rhash_params);
314         if (err < 0) {
315                 rhashtable_remove_fast(&flow_table->rhashtable,
316                                        &flow->tuplehash[0].node,
317                                        nf_flow_offload_rhash_params);
318                 return err;
319         }
320
321         nf_ct_offload_timeout(flow->ct);
322
323         if (nf_flowtable_hw_offload(flow_table)) {
324                 __set_bit(NF_FLOW_HW, &flow->flags);
325                 nf_flow_offload_add(flow_table, flow);
326         }
327
328         return 0;
329 }
330 EXPORT_SYMBOL_GPL(flow_offload_add);
331
332 void flow_offload_refresh(struct nf_flowtable *flow_table,
333                           struct flow_offload *flow)
334 {
335         u32 timeout;
336
337         timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
338         if (READ_ONCE(flow->timeout) != timeout)
339                 WRITE_ONCE(flow->timeout, timeout);
340
341         if (likely(!nf_flowtable_hw_offload(flow_table)))
342                 return;
343
344         nf_flow_offload_add(flow_table, flow);
345 }
346 EXPORT_SYMBOL_GPL(flow_offload_refresh);
347
348 static inline bool nf_flow_has_expired(const struct flow_offload *flow)
349 {
350         return nf_flow_timeout_delta(flow->timeout) <= 0;
351 }
352
353 static void flow_offload_del(struct nf_flowtable *flow_table,
354                              struct flow_offload *flow)
355 {
356         rhashtable_remove_fast(&flow_table->rhashtable,
357                                &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
358                                nf_flow_offload_rhash_params);
359         rhashtable_remove_fast(&flow_table->rhashtable,
360                                &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
361                                nf_flow_offload_rhash_params);
362
363         clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
364
365         if (nf_flow_has_expired(flow))
366                 flow_offload_fixup_ct(flow->ct);
367         else
368                 flow_offload_fixup_ct_timeout(flow->ct);
369
370         flow_offload_free(flow);
371 }
372
373 void flow_offload_teardown(struct flow_offload *flow)
374 {
375         set_bit(NF_FLOW_TEARDOWN, &flow->flags);
376
377         flow_offload_fixup_ct_state(flow->ct);
378 }
379 EXPORT_SYMBOL_GPL(flow_offload_teardown);
380
381 struct flow_offload_tuple_rhash *
382 flow_offload_lookup(struct nf_flowtable *flow_table,
383                     struct flow_offload_tuple *tuple)
384 {
385         struct flow_offload_tuple_rhash *tuplehash;
386         struct flow_offload *flow;
387         int dir;
388
389         tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
390                                       nf_flow_offload_rhash_params);
391         if (!tuplehash)
392                 return NULL;
393
394         dir = tuplehash->tuple.dir;
395         flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
396         if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
397                 return NULL;
398
399         if (unlikely(nf_ct_is_dying(flow->ct)))
400                 return NULL;
401
402         return tuplehash;
403 }
404 EXPORT_SYMBOL_GPL(flow_offload_lookup);
405
406 static int
407 nf_flow_table_iterate(struct nf_flowtable *flow_table,
408                       void (*iter)(struct nf_flowtable *flowtable,
409                                    struct flow_offload *flow, void *data),
410                       void *data)
411 {
412         struct flow_offload_tuple_rhash *tuplehash;
413         struct rhashtable_iter hti;
414         struct flow_offload *flow;
415         int err = 0;
416
417         rhashtable_walk_enter(&flow_table->rhashtable, &hti);
418         rhashtable_walk_start(&hti);
419
420         while ((tuplehash = rhashtable_walk_next(&hti))) {
421                 if (IS_ERR(tuplehash)) {
422                         if (PTR_ERR(tuplehash) != -EAGAIN) {
423                                 err = PTR_ERR(tuplehash);
424                                 break;
425                         }
426                         continue;
427                 }
428                 if (tuplehash->tuple.dir)
429                         continue;
430
431                 flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
432
433                 iter(flow_table, flow, data);
434         }
435         rhashtable_walk_stop(&hti);
436         rhashtable_walk_exit(&hti);
437
438         return err;
439 }
440
441 static bool flow_offload_stale_dst(struct flow_offload_tuple *tuple)
442 {
443         struct dst_entry *dst;
444
445         if (tuple->xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
446             tuple->xmit_type == FLOW_OFFLOAD_XMIT_XFRM) {
447                 dst = tuple->dst_cache;
448                 if (!dst_check(dst, tuple->dst_cookie))
449                         return true;
450         }
451
452         return false;
453 }
454
455 static bool nf_flow_has_stale_dst(struct flow_offload *flow)
456 {
457         return flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple) ||
458                flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple);
459 }
460
461 static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
462                                     struct flow_offload *flow, void *data)
463 {
464         if (nf_flow_has_expired(flow) ||
465             nf_ct_is_dying(flow->ct) ||
466             nf_flow_has_stale_dst(flow))
467                 set_bit(NF_FLOW_TEARDOWN, &flow->flags);
468
469         if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
470                 if (test_bit(NF_FLOW_HW, &flow->flags)) {
471                         if (!test_bit(NF_FLOW_HW_DYING, &flow->flags))
472                                 nf_flow_offload_del(flow_table, flow);
473                         else if (test_bit(NF_FLOW_HW_DEAD, &flow->flags))
474                                 flow_offload_del(flow_table, flow);
475                 } else {
476                         flow_offload_del(flow_table, flow);
477                 }
478         } else if (test_bit(NF_FLOW_HW, &flow->flags)) {
479                 nf_flow_offload_stats(flow_table, flow);
480         }
481 }
482
483 static void nf_flow_offload_work_gc(struct work_struct *work)
484 {
485         struct nf_flowtable *flow_table;
486
487         flow_table = container_of(work, struct nf_flowtable, gc_work.work);
488         nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
489         queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
490 }
491
492 static void nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
493                                  __be16 port, __be16 new_port)
494 {
495         struct tcphdr *tcph;
496
497         tcph = (void *)(skb_network_header(skb) + thoff);
498         inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false);
499 }
500
501 static void nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
502                                  __be16 port, __be16 new_port)
503 {
504         struct udphdr *udph;
505
506         udph = (void *)(skb_network_header(skb) + thoff);
507         if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
508                 inet_proto_csum_replace2(&udph->check, skb, port,
509                                          new_port, false);
510                 if (!udph->check)
511                         udph->check = CSUM_MANGLED_0;
512         }
513 }
514
515 static void nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
516                              u8 protocol, __be16 port, __be16 new_port)
517 {
518         switch (protocol) {
519         case IPPROTO_TCP:
520                 nf_flow_nat_port_tcp(skb, thoff, port, new_port);
521                 break;
522         case IPPROTO_UDP:
523                 nf_flow_nat_port_udp(skb, thoff, port, new_port);
524                 break;
525         }
526 }
527
528 void nf_flow_snat_port(const struct flow_offload *flow,
529                        struct sk_buff *skb, unsigned int thoff,
530                        u8 protocol, enum flow_offload_tuple_dir dir)
531 {
532         struct flow_ports *hdr;
533         __be16 port, new_port;
534
535         hdr = (void *)(skb_network_header(skb) + thoff);
536
537         switch (dir) {
538         case FLOW_OFFLOAD_DIR_ORIGINAL:
539                 port = hdr->source;
540                 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
541                 hdr->source = new_port;
542                 break;
543         case FLOW_OFFLOAD_DIR_REPLY:
544                 port = hdr->dest;
545                 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
546                 hdr->dest = new_port;
547                 break;
548         }
549
550         nf_flow_nat_port(skb, thoff, protocol, port, new_port);
551 }
552 EXPORT_SYMBOL_GPL(nf_flow_snat_port);
553
554 void nf_flow_dnat_port(const struct flow_offload *flow, struct sk_buff *skb,
555                        unsigned int thoff, u8 protocol,
556                        enum flow_offload_tuple_dir dir)
557 {
558         struct flow_ports *hdr;
559         __be16 port, new_port;
560
561         hdr = (void *)(skb_network_header(skb) + thoff);
562
563         switch (dir) {
564         case FLOW_OFFLOAD_DIR_ORIGINAL:
565                 port = hdr->dest;
566                 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port;
567                 hdr->dest = new_port;
568                 break;
569         case FLOW_OFFLOAD_DIR_REPLY:
570                 port = hdr->source;
571                 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
572                 hdr->source = new_port;
573                 break;
574         }
575
576         nf_flow_nat_port(skb, thoff, protocol, port, new_port);
577 }
578 EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
579
580 int nf_flow_table_init(struct nf_flowtable *flowtable)
581 {
582         int err;
583
584         INIT_DELAYED_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
585         flow_block_init(&flowtable->flow_block);
586         init_rwsem(&flowtable->flow_block_lock);
587
588         err = rhashtable_init(&flowtable->rhashtable,
589                               &nf_flow_offload_rhash_params);
590         if (err < 0)
591                 return err;
592
593         queue_delayed_work(system_power_efficient_wq,
594                            &flowtable->gc_work, HZ);
595
596         mutex_lock(&flowtable_lock);
597         list_add(&flowtable->list, &flowtables);
598         mutex_unlock(&flowtable_lock);
599
600         return 0;
601 }
602 EXPORT_SYMBOL_GPL(nf_flow_table_init);
603
604 static void nf_flow_table_do_cleanup(struct nf_flowtable *flow_table,
605                                      struct flow_offload *flow, void *data)
606 {
607         struct net_device *dev = data;
608
609         if (!dev) {
610                 flow_offload_teardown(flow);
611                 return;
612         }
613
614         if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) &&
615             (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
616              flow->tuplehash[1].tuple.iifidx == dev->ifindex))
617                 flow_offload_teardown(flow);
618 }
619
620 void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable,
621                               struct net_device *dev)
622 {
623         nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
624         flush_delayed_work(&flowtable->gc_work);
625         nf_flow_table_offload_flush(flowtable);
626 }
627
628 void nf_flow_table_cleanup(struct net_device *dev)
629 {
630         struct nf_flowtable *flowtable;
631
632         mutex_lock(&flowtable_lock);
633         list_for_each_entry(flowtable, &flowtables, list)
634                 nf_flow_table_gc_cleanup(flowtable, dev);
635         mutex_unlock(&flowtable_lock);
636 }
637 EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
638
639 void nf_flow_table_free(struct nf_flowtable *flow_table)
640 {
641         mutex_lock(&flowtable_lock);
642         list_del(&flow_table->list);
643         mutex_unlock(&flowtable_lock);
644
645         cancel_delayed_work_sync(&flow_table->gc_work);
646         nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
647         nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
648         nf_flow_table_offload_flush(flow_table);
649         if (nf_flowtable_hw_offload(flow_table))
650                 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
651         rhashtable_destroy(&flow_table->rhashtable);
652 }
653 EXPORT_SYMBOL_GPL(nf_flow_table_free);
654
655 static int __init nf_flow_table_module_init(void)
656 {
657         return nf_flow_table_offload_init();
658 }
659
660 static void __exit nf_flow_table_module_exit(void)
661 {
662         nf_flow_table_offload_exit();
663 }
664
665 module_init(nf_flow_table_module_init);
666 module_exit(nf_flow_table_module_exit);
667
668 MODULE_LICENSE("GPL");
669 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
670 MODULE_DESCRIPTION("Netfilter flow table module");