Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
[linux-2.6-microblaze.git] / net / netfilter / nf_flow_table_core.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/netfilter.h>
6 #include <linux/rhashtable.h>
7 #include <linux/netdevice.h>
8 #include <net/ip.h>
9 #include <net/ip6_route.h>
10 #include <net/netfilter/nf_tables.h>
11 #include <net/netfilter/nf_flow_table.h>
12 #include <net/netfilter/nf_conntrack.h>
13 #include <net/netfilter/nf_conntrack_core.h>
14 #include <net/netfilter/nf_conntrack_l4proto.h>
15 #include <net/netfilter/nf_conntrack_tuple.h>
16
17 static DEFINE_MUTEX(flowtable_lock);
18 static LIST_HEAD(flowtables);
19
20 static void
21 flow_offload_fill_dir(struct flow_offload *flow,
22                       enum flow_offload_tuple_dir dir)
23 {
24         struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
25         struct nf_conntrack_tuple *ctt = &flow->ct->tuplehash[dir].tuple;
26
27         ft->dir = dir;
28
29         switch (ctt->src.l3num) {
30         case NFPROTO_IPV4:
31                 ft->src_v4 = ctt->src.u3.in;
32                 ft->dst_v4 = ctt->dst.u3.in;
33                 break;
34         case NFPROTO_IPV6:
35                 ft->src_v6 = ctt->src.u3.in6;
36                 ft->dst_v6 = ctt->dst.u3.in6;
37                 break;
38         }
39
40         ft->l3proto = ctt->src.l3num;
41         ft->l4proto = ctt->dst.protonum;
42         ft->src_port = ctt->src.u.tcp.port;
43         ft->dst_port = ctt->dst.u.tcp.port;
44 }
45
46 struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
47 {
48         struct flow_offload *flow;
49
50         if (unlikely(nf_ct_is_dying(ct) ||
51             !atomic_inc_not_zero(&ct->ct_general.use)))
52                 return NULL;
53
54         flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
55         if (!flow)
56                 goto err_ct_refcnt;
57
58         flow->ct = ct;
59
60         flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
61         flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY);
62
63         if (ct->status & IPS_SRC_NAT)
64                 __set_bit(NF_FLOW_SNAT, &flow->flags);
65         if (ct->status & IPS_DST_NAT)
66                 __set_bit(NF_FLOW_DNAT, &flow->flags);
67
68         return flow;
69
70 err_ct_refcnt:
71         nf_ct_put(ct);
72
73         return NULL;
74 }
75 EXPORT_SYMBOL_GPL(flow_offload_alloc);
76
77 static u32 flow_offload_dst_cookie(struct flow_offload_tuple *flow_tuple)
78 {
79         const struct rt6_info *rt;
80
81         if (flow_tuple->l3proto == NFPROTO_IPV6) {
82                 rt = (const struct rt6_info *)flow_tuple->dst_cache;
83                 return rt6_get_cookie(rt);
84         }
85
86         return 0;
87 }
88
89 static int flow_offload_fill_route(struct flow_offload *flow,
90                                    const struct nf_flow_route *route,
91                                    enum flow_offload_tuple_dir dir)
92 {
93         struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
94         struct dst_entry *dst = route->tuple[dir].dst;
95         int i, j = 0;
96
97         switch (flow_tuple->l3proto) {
98         case NFPROTO_IPV4:
99                 flow_tuple->mtu = ip_dst_mtu_maybe_forward(dst, true);
100                 break;
101         case NFPROTO_IPV6:
102                 flow_tuple->mtu = ip6_dst_mtu_forward(dst);
103                 break;
104         }
105
106         flow_tuple->iifidx = route->tuple[dir].in.ifindex;
107         for (i = route->tuple[dir].in.num_encaps - 1; i >= 0; i--) {
108                 flow_tuple->encap[j].id = route->tuple[dir].in.encap[i].id;
109                 flow_tuple->encap[j].proto = route->tuple[dir].in.encap[i].proto;
110                 if (route->tuple[dir].in.ingress_vlans & BIT(i))
111                         flow_tuple->in_vlan_ingress |= BIT(j);
112                 j++;
113         }
114         flow_tuple->encap_num = route->tuple[dir].in.num_encaps;
115
116         switch (route->tuple[dir].xmit_type) {
117         case FLOW_OFFLOAD_XMIT_DIRECT:
118                 memcpy(flow_tuple->out.h_dest, route->tuple[dir].out.h_dest,
119                        ETH_ALEN);
120                 memcpy(flow_tuple->out.h_source, route->tuple[dir].out.h_source,
121                        ETH_ALEN);
122                 flow_tuple->out.ifidx = route->tuple[dir].out.ifindex;
123                 flow_tuple->out.hw_ifidx = route->tuple[dir].out.hw_ifindex;
124                 break;
125         case FLOW_OFFLOAD_XMIT_XFRM:
126         case FLOW_OFFLOAD_XMIT_NEIGH:
127                 if (!dst_hold_safe(route->tuple[dir].dst))
128                         return -1;
129
130                 flow_tuple->dst_cache = dst;
131                 flow_tuple->dst_cookie = flow_offload_dst_cookie(flow_tuple);
132                 break;
133         default:
134                 WARN_ON_ONCE(1);
135                 break;
136         }
137         flow_tuple->xmit_type = route->tuple[dir].xmit_type;
138
139         return 0;
140 }
141
142 static void nft_flow_dst_release(struct flow_offload *flow,
143                                  enum flow_offload_tuple_dir dir)
144 {
145         if (flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
146             flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)
147                 dst_release(flow->tuplehash[dir].tuple.dst_cache);
148 }
149
150 int flow_offload_route_init(struct flow_offload *flow,
151                             const struct nf_flow_route *route)
152 {
153         int err;
154
155         err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
156         if (err < 0)
157                 return err;
158
159         err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
160         if (err < 0)
161                 goto err_route_reply;
162
163         flow->type = NF_FLOW_OFFLOAD_ROUTE;
164
165         return 0;
166
167 err_route_reply:
168         nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
169
170         return err;
171 }
172 EXPORT_SYMBOL_GPL(flow_offload_route_init);
173
174 static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
175 {
176         tcp->state = TCP_CONNTRACK_ESTABLISHED;
177         tcp->seen[0].td_maxwin = 0;
178         tcp->seen[1].td_maxwin = 0;
179 }
180
181 static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
182 {
183         const struct nf_conntrack_l4proto *l4proto;
184         struct net *net = nf_ct_net(ct);
185         int l4num = nf_ct_protonum(ct);
186         unsigned int timeout;
187
188         l4proto = nf_ct_l4proto_find(l4num);
189         if (!l4proto)
190                 return;
191
192         if (l4num == IPPROTO_TCP) {
193                 struct nf_tcp_net *tn = nf_tcp_pernet(net);
194
195                 timeout = tn->offload_pickup;
196         } else if (l4num == IPPROTO_UDP) {
197                 struct nf_udp_net *tn = nf_udp_pernet(net);
198
199                 timeout = tn->offload_pickup;
200         } else {
201                 return;
202         }
203
204         if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
205                 ct->timeout = nfct_time_stamp + timeout;
206 }
207
208 static void flow_offload_fixup_ct_state(struct nf_conn *ct)
209 {
210         if (nf_ct_protonum(ct) == IPPROTO_TCP)
211                 flow_offload_fixup_tcp(&ct->proto.tcp);
212 }
213
214 static void flow_offload_fixup_ct(struct nf_conn *ct)
215 {
216         flow_offload_fixup_ct_state(ct);
217         flow_offload_fixup_ct_timeout(ct);
218 }
219
220 static void flow_offload_route_release(struct flow_offload *flow)
221 {
222         nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
223         nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_REPLY);
224 }
225
226 void flow_offload_free(struct flow_offload *flow)
227 {
228         switch (flow->type) {
229         case NF_FLOW_OFFLOAD_ROUTE:
230                 flow_offload_route_release(flow);
231                 break;
232         default:
233                 break;
234         }
235         nf_ct_put(flow->ct);
236         kfree_rcu(flow, rcu_head);
237 }
238 EXPORT_SYMBOL_GPL(flow_offload_free);
239
240 static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
241 {
242         const struct flow_offload_tuple *tuple = data;
243
244         return jhash(tuple, offsetof(struct flow_offload_tuple, __hash), seed);
245 }
246
247 static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
248 {
249         const struct flow_offload_tuple_rhash *tuplehash = data;
250
251         return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, __hash), seed);
252 }
253
254 static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
255                                         const void *ptr)
256 {
257         const struct flow_offload_tuple *tuple = arg->key;
258         const struct flow_offload_tuple_rhash *x = ptr;
259
260         if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, __hash)))
261                 return 1;
262
263         return 0;
264 }
265
266 static const struct rhashtable_params nf_flow_offload_rhash_params = {
267         .head_offset            = offsetof(struct flow_offload_tuple_rhash, node),
268         .hashfn                 = flow_offload_hash,
269         .obj_hashfn             = flow_offload_hash_obj,
270         .obj_cmpfn              = flow_offload_hash_cmp,
271         .automatic_shrinking    = true,
272 };
273
274 unsigned long flow_offload_get_timeout(struct flow_offload *flow)
275 {
276         const struct nf_conntrack_l4proto *l4proto;
277         unsigned long timeout = NF_FLOW_TIMEOUT;
278         struct net *net = nf_ct_net(flow->ct);
279         int l4num = nf_ct_protonum(flow->ct);
280
281         l4proto = nf_ct_l4proto_find(l4num);
282         if (!l4proto)
283                 return timeout;
284
285         if (l4num == IPPROTO_TCP) {
286                 struct nf_tcp_net *tn = nf_tcp_pernet(net);
287
288                 timeout = tn->offload_timeout;
289         } else if (l4num == IPPROTO_UDP) {
290                 struct nf_udp_net *tn = nf_udp_pernet(net);
291
292                 timeout = tn->offload_timeout;
293         }
294
295         return timeout;
296 }
297
298 int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
299 {
300         int err;
301
302         flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
303
304         err = rhashtable_insert_fast(&flow_table->rhashtable,
305                                      &flow->tuplehash[0].node,
306                                      nf_flow_offload_rhash_params);
307         if (err < 0)
308                 return err;
309
310         err = rhashtable_insert_fast(&flow_table->rhashtable,
311                                      &flow->tuplehash[1].node,
312                                      nf_flow_offload_rhash_params);
313         if (err < 0) {
314                 rhashtable_remove_fast(&flow_table->rhashtable,
315                                        &flow->tuplehash[0].node,
316                                        nf_flow_offload_rhash_params);
317                 return err;
318         }
319
320         nf_ct_offload_timeout(flow->ct);
321
322         if (nf_flowtable_hw_offload(flow_table)) {
323                 __set_bit(NF_FLOW_HW, &flow->flags);
324                 nf_flow_offload_add(flow_table, flow);
325         }
326
327         return 0;
328 }
329 EXPORT_SYMBOL_GPL(flow_offload_add);
330
331 void flow_offload_refresh(struct nf_flowtable *flow_table,
332                           struct flow_offload *flow)
333 {
334         flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
335
336         if (likely(!nf_flowtable_hw_offload(flow_table)))
337                 return;
338
339         nf_flow_offload_add(flow_table, flow);
340 }
341 EXPORT_SYMBOL_GPL(flow_offload_refresh);
342
343 static inline bool nf_flow_has_expired(const struct flow_offload *flow)
344 {
345         return nf_flow_timeout_delta(flow->timeout) <= 0;
346 }
347
348 static void flow_offload_del(struct nf_flowtable *flow_table,
349                              struct flow_offload *flow)
350 {
351         rhashtable_remove_fast(&flow_table->rhashtable,
352                                &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
353                                nf_flow_offload_rhash_params);
354         rhashtable_remove_fast(&flow_table->rhashtable,
355                                &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
356                                nf_flow_offload_rhash_params);
357
358         clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
359
360         if (nf_flow_has_expired(flow))
361                 flow_offload_fixup_ct(flow->ct);
362         else
363                 flow_offload_fixup_ct_timeout(flow->ct);
364
365         flow_offload_free(flow);
366 }
367
368 void flow_offload_teardown(struct flow_offload *flow)
369 {
370         set_bit(NF_FLOW_TEARDOWN, &flow->flags);
371
372         flow_offload_fixup_ct_state(flow->ct);
373 }
374 EXPORT_SYMBOL_GPL(flow_offload_teardown);
375
376 struct flow_offload_tuple_rhash *
377 flow_offload_lookup(struct nf_flowtable *flow_table,
378                     struct flow_offload_tuple *tuple)
379 {
380         struct flow_offload_tuple_rhash *tuplehash;
381         struct flow_offload *flow;
382         int dir;
383
384         tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
385                                       nf_flow_offload_rhash_params);
386         if (!tuplehash)
387                 return NULL;
388
389         dir = tuplehash->tuple.dir;
390         flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
391         if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
392                 return NULL;
393
394         if (unlikely(nf_ct_is_dying(flow->ct)))
395                 return NULL;
396
397         return tuplehash;
398 }
399 EXPORT_SYMBOL_GPL(flow_offload_lookup);
400
401 static int
402 nf_flow_table_iterate(struct nf_flowtable *flow_table,
403                       void (*iter)(struct flow_offload *flow, void *data),
404                       void *data)
405 {
406         struct flow_offload_tuple_rhash *tuplehash;
407         struct rhashtable_iter hti;
408         struct flow_offload *flow;
409         int err = 0;
410
411         rhashtable_walk_enter(&flow_table->rhashtable, &hti);
412         rhashtable_walk_start(&hti);
413
414         while ((tuplehash = rhashtable_walk_next(&hti))) {
415                 if (IS_ERR(tuplehash)) {
416                         if (PTR_ERR(tuplehash) != -EAGAIN) {
417                                 err = PTR_ERR(tuplehash);
418                                 break;
419                         }
420                         continue;
421                 }
422                 if (tuplehash->tuple.dir)
423                         continue;
424
425                 flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
426
427                 iter(flow, data);
428         }
429         rhashtable_walk_stop(&hti);
430         rhashtable_walk_exit(&hti);
431
432         return err;
433 }
434
435 static bool flow_offload_stale_dst(struct flow_offload_tuple *tuple)
436 {
437         struct dst_entry *dst;
438
439         if (tuple->xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
440             tuple->xmit_type == FLOW_OFFLOAD_XMIT_XFRM) {
441                 dst = tuple->dst_cache;
442                 if (!dst_check(dst, tuple->dst_cookie))
443                         return true;
444         }
445
446         return false;
447 }
448
449 static bool nf_flow_has_stale_dst(struct flow_offload *flow)
450 {
451         return flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple) ||
452                flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple);
453 }
454
455 static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
456 {
457         struct nf_flowtable *flow_table = data;
458
459         if (nf_flow_has_expired(flow) ||
460             nf_ct_is_dying(flow->ct) ||
461             nf_flow_has_stale_dst(flow))
462                 set_bit(NF_FLOW_TEARDOWN, &flow->flags);
463
464         if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
465                 if (test_bit(NF_FLOW_HW, &flow->flags)) {
466                         if (!test_bit(NF_FLOW_HW_DYING, &flow->flags))
467                                 nf_flow_offload_del(flow_table, flow);
468                         else if (test_bit(NF_FLOW_HW_DEAD, &flow->flags))
469                                 flow_offload_del(flow_table, flow);
470                 } else {
471                         flow_offload_del(flow_table, flow);
472                 }
473         } else if (test_bit(NF_FLOW_HW, &flow->flags)) {
474                 nf_flow_offload_stats(flow_table, flow);
475         }
476 }
477
478 static void nf_flow_offload_work_gc(struct work_struct *work)
479 {
480         struct nf_flowtable *flow_table;
481
482         flow_table = container_of(work, struct nf_flowtable, gc_work.work);
483         nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
484         queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
485 }
486
487 static void nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
488                                  __be16 port, __be16 new_port)
489 {
490         struct tcphdr *tcph;
491
492         tcph = (void *)(skb_network_header(skb) + thoff);
493         inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false);
494 }
495
496 static void nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
497                                  __be16 port, __be16 new_port)
498 {
499         struct udphdr *udph;
500
501         udph = (void *)(skb_network_header(skb) + thoff);
502         if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
503                 inet_proto_csum_replace2(&udph->check, skb, port,
504                                          new_port, false);
505                 if (!udph->check)
506                         udph->check = CSUM_MANGLED_0;
507         }
508 }
509
510 static void nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
511                              u8 protocol, __be16 port, __be16 new_port)
512 {
513         switch (protocol) {
514         case IPPROTO_TCP:
515                 nf_flow_nat_port_tcp(skb, thoff, port, new_port);
516                 break;
517         case IPPROTO_UDP:
518                 nf_flow_nat_port_udp(skb, thoff, port, new_port);
519                 break;
520         }
521 }
522
523 void nf_flow_snat_port(const struct flow_offload *flow,
524                        struct sk_buff *skb, unsigned int thoff,
525                        u8 protocol, enum flow_offload_tuple_dir dir)
526 {
527         struct flow_ports *hdr;
528         __be16 port, new_port;
529
530         hdr = (void *)(skb_network_header(skb) + thoff);
531
532         switch (dir) {
533         case FLOW_OFFLOAD_DIR_ORIGINAL:
534                 port = hdr->source;
535                 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
536                 hdr->source = new_port;
537                 break;
538         case FLOW_OFFLOAD_DIR_REPLY:
539                 port = hdr->dest;
540                 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
541                 hdr->dest = new_port;
542                 break;
543         }
544
545         nf_flow_nat_port(skb, thoff, protocol, port, new_port);
546 }
547 EXPORT_SYMBOL_GPL(nf_flow_snat_port);
548
549 void nf_flow_dnat_port(const struct flow_offload *flow, struct sk_buff *skb,
550                        unsigned int thoff, u8 protocol,
551                        enum flow_offload_tuple_dir dir)
552 {
553         struct flow_ports *hdr;
554         __be16 port, new_port;
555
556         hdr = (void *)(skb_network_header(skb) + thoff);
557
558         switch (dir) {
559         case FLOW_OFFLOAD_DIR_ORIGINAL:
560                 port = hdr->dest;
561                 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port;
562                 hdr->dest = new_port;
563                 break;
564         case FLOW_OFFLOAD_DIR_REPLY:
565                 port = hdr->source;
566                 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
567                 hdr->source = new_port;
568                 break;
569         }
570
571         nf_flow_nat_port(skb, thoff, protocol, port, new_port);
572 }
573 EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
574
575 int nf_flow_table_init(struct nf_flowtable *flowtable)
576 {
577         int err;
578
579         INIT_DELAYED_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
580         flow_block_init(&flowtable->flow_block);
581         init_rwsem(&flowtable->flow_block_lock);
582
583         err = rhashtable_init(&flowtable->rhashtable,
584                               &nf_flow_offload_rhash_params);
585         if (err < 0)
586                 return err;
587
588         queue_delayed_work(system_power_efficient_wq,
589                            &flowtable->gc_work, HZ);
590
591         mutex_lock(&flowtable_lock);
592         list_add(&flowtable->list, &flowtables);
593         mutex_unlock(&flowtable_lock);
594
595         return 0;
596 }
597 EXPORT_SYMBOL_GPL(nf_flow_table_init);
598
599 static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
600 {
601         struct net_device *dev = data;
602
603         if (!dev) {
604                 flow_offload_teardown(flow);
605                 return;
606         }
607
608         if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) &&
609             (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
610              flow->tuplehash[1].tuple.iifidx == dev->ifindex))
611                 flow_offload_teardown(flow);
612 }
613
614 void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable,
615                               struct net_device *dev)
616 {
617         nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
618         flush_delayed_work(&flowtable->gc_work);
619         nf_flow_table_offload_flush(flowtable);
620 }
621
622 void nf_flow_table_cleanup(struct net_device *dev)
623 {
624         struct nf_flowtable *flowtable;
625
626         mutex_lock(&flowtable_lock);
627         list_for_each_entry(flowtable, &flowtables, list)
628                 nf_flow_table_gc_cleanup(flowtable, dev);
629         mutex_unlock(&flowtable_lock);
630 }
631 EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
632
633 void nf_flow_table_free(struct nf_flowtable *flow_table)
634 {
635         mutex_lock(&flowtable_lock);
636         list_del(&flow_table->list);
637         mutex_unlock(&flowtable_lock);
638
639         cancel_delayed_work_sync(&flow_table->gc_work);
640         nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
641         nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
642         nf_flow_table_offload_flush(flow_table);
643         if (nf_flowtable_hw_offload(flow_table))
644                 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step,
645                                       flow_table);
646         rhashtable_destroy(&flow_table->rhashtable);
647 }
648 EXPORT_SYMBOL_GPL(nf_flow_table_free);
649
650 static int __init nf_flow_table_module_init(void)
651 {
652         return nf_flow_table_offload_init();
653 }
654
655 static void __exit nf_flow_table_module_exit(void)
656 {
657         nf_flow_table_offload_exit();
658 }
659
660 module_init(nf_flow_table_module_init);
661 module_exit(nf_flow_table_module_exit);
662
663 MODULE_LICENSE("GPL");
664 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
665 MODULE_DESCRIPTION("Netfilter flow table module");