Merge branch 'next' into for-linus
[linux-2.6-microblaze.git] / net / netfilter / nf_flow_table_offload.c
1 #include <linux/kernel.h>
2 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/netfilter.h>
5 #include <linux/rhashtable.h>
6 #include <linux/netdevice.h>
7 #include <linux/tc_act/tc_csum.h>
8 #include <net/flow_offload.h>
9 #include <net/netfilter/nf_flow_table.h>
10 #include <net/netfilter/nf_tables.h>
11 #include <net/netfilter/nf_conntrack.h>
12 #include <net/netfilter/nf_conntrack_acct.h>
13 #include <net/netfilter/nf_conntrack_core.h>
14 #include <net/netfilter/nf_conntrack_tuple.h>
15
16 static struct workqueue_struct *nf_flow_offload_add_wq;
17 static struct workqueue_struct *nf_flow_offload_del_wq;
18 static struct workqueue_struct *nf_flow_offload_stats_wq;
19
20 struct flow_offload_work {
21         struct list_head        list;
22         enum flow_cls_command   cmd;
23         int                     priority;
24         struct nf_flowtable     *flowtable;
25         struct flow_offload     *flow;
26         struct work_struct      work;
27 };
28
29 #define NF_FLOW_DISSECTOR(__match, __type, __field)     \
30         (__match)->dissector.offset[__type] =           \
31                 offsetof(struct nf_flow_key, __field)
32
33 static void nf_flow_rule_lwt_match(struct nf_flow_match *match,
34                                    struct ip_tunnel_info *tun_info)
35 {
36         struct nf_flow_key *mask = &match->mask;
37         struct nf_flow_key *key = &match->key;
38         unsigned int enc_keys;
39
40         if (!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX))
41                 return;
42
43         NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_ENC_CONTROL, enc_control);
44         NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
45         key->enc_key_id.keyid = tunnel_id_to_key32(tun_info->key.tun_id);
46         mask->enc_key_id.keyid = 0xffffffff;
47         enc_keys = BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
48                    BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL);
49
50         if (ip_tunnel_info_af(tun_info) == AF_INET) {
51                 NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
52                                   enc_ipv4);
53                 key->enc_ipv4.src = tun_info->key.u.ipv4.dst;
54                 key->enc_ipv4.dst = tun_info->key.u.ipv4.src;
55                 if (key->enc_ipv4.src)
56                         mask->enc_ipv4.src = 0xffffffff;
57                 if (key->enc_ipv4.dst)
58                         mask->enc_ipv4.dst = 0xffffffff;
59                 enc_keys |= BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
60                 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
61         } else {
62                 memcpy(&key->enc_ipv6.src, &tun_info->key.u.ipv6.dst,
63                        sizeof(struct in6_addr));
64                 memcpy(&key->enc_ipv6.dst, &tun_info->key.u.ipv6.src,
65                        sizeof(struct in6_addr));
66                 if (memcmp(&key->enc_ipv6.src, &in6addr_any,
67                            sizeof(struct in6_addr)))
68                         memset(&mask->enc_ipv6.src, 0xff,
69                                sizeof(struct in6_addr));
70                 if (memcmp(&key->enc_ipv6.dst, &in6addr_any,
71                            sizeof(struct in6_addr)))
72                         memset(&mask->enc_ipv6.dst, 0xff,
73                                sizeof(struct in6_addr));
74                 enc_keys |= BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS);
75                 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
76         }
77
78         match->dissector.used_keys |= enc_keys;
79 }
80
81 static void nf_flow_rule_vlan_match(struct flow_dissector_key_vlan *key,
82                                     struct flow_dissector_key_vlan *mask,
83                                     u16 vlan_id, __be16 proto)
84 {
85         key->vlan_id = vlan_id;
86         mask->vlan_id = VLAN_VID_MASK;
87         key->vlan_tpid = proto;
88         mask->vlan_tpid = 0xffff;
89 }
90
91 static int nf_flow_rule_match(struct nf_flow_match *match,
92                               const struct flow_offload_tuple *tuple,
93                               struct dst_entry *other_dst)
94 {
95         struct nf_flow_key *mask = &match->mask;
96         struct nf_flow_key *key = &match->key;
97         struct ip_tunnel_info *tun_info;
98         bool vlan_encap = false;
99
100         NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_META, meta);
101         NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_CONTROL, control);
102         NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_BASIC, basic);
103         NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
104         NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
105         NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_TCP, tcp);
106         NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_PORTS, tp);
107
108         if (other_dst && other_dst->lwtstate) {
109                 tun_info = lwt_tun_info(other_dst->lwtstate);
110                 nf_flow_rule_lwt_match(match, tun_info);
111         }
112
113         if (tuple->xmit_type == FLOW_OFFLOAD_XMIT_TC)
114                 key->meta.ingress_ifindex = tuple->tc.iifidx;
115         else
116                 key->meta.ingress_ifindex = tuple->iifidx;
117
118         mask->meta.ingress_ifindex = 0xffffffff;
119
120         if (tuple->encap_num > 0 && !(tuple->in_vlan_ingress & BIT(0)) &&
121             tuple->encap[0].proto == htons(ETH_P_8021Q)) {
122                 NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_VLAN, vlan);
123                 nf_flow_rule_vlan_match(&key->vlan, &mask->vlan,
124                                         tuple->encap[0].id,
125                                         tuple->encap[0].proto);
126                 vlan_encap = true;
127         }
128
129         if (tuple->encap_num > 1 && !(tuple->in_vlan_ingress & BIT(1)) &&
130             tuple->encap[1].proto == htons(ETH_P_8021Q)) {
131                 if (vlan_encap) {
132                         NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_CVLAN,
133                                           cvlan);
134                         nf_flow_rule_vlan_match(&key->cvlan, &mask->cvlan,
135                                                 tuple->encap[1].id,
136                                                 tuple->encap[1].proto);
137                 } else {
138                         NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_VLAN,
139                                           vlan);
140                         nf_flow_rule_vlan_match(&key->vlan, &mask->vlan,
141                                                 tuple->encap[1].id,
142                                                 tuple->encap[1].proto);
143                 }
144         }
145
146         switch (tuple->l3proto) {
147         case AF_INET:
148                 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
149                 key->basic.n_proto = htons(ETH_P_IP);
150                 key->ipv4.src = tuple->src_v4.s_addr;
151                 mask->ipv4.src = 0xffffffff;
152                 key->ipv4.dst = tuple->dst_v4.s_addr;
153                 mask->ipv4.dst = 0xffffffff;
154                 break;
155        case AF_INET6:
156                 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
157                 key->basic.n_proto = htons(ETH_P_IPV6);
158                 key->ipv6.src = tuple->src_v6;
159                 memset(&mask->ipv6.src, 0xff, sizeof(mask->ipv6.src));
160                 key->ipv6.dst = tuple->dst_v6;
161                 memset(&mask->ipv6.dst, 0xff, sizeof(mask->ipv6.dst));
162                 break;
163         default:
164                 return -EOPNOTSUPP;
165         }
166         mask->control.addr_type = 0xffff;
167         match->dissector.used_keys |= BIT(key->control.addr_type);
168         mask->basic.n_proto = 0xffff;
169
170         switch (tuple->l4proto) {
171         case IPPROTO_TCP:
172                 key->tcp.flags = 0;
173                 mask->tcp.flags = cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16);
174                 match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
175                 break;
176         case IPPROTO_UDP:
177                 break;
178         default:
179                 return -EOPNOTSUPP;
180         }
181
182         key->basic.ip_proto = tuple->l4proto;
183         mask->basic.ip_proto = 0xff;
184
185         key->tp.src = tuple->src_port;
186         mask->tp.src = 0xffff;
187         key->tp.dst = tuple->dst_port;
188         mask->tp.dst = 0xffff;
189
190         match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_META) |
191                                       BIT(FLOW_DISSECTOR_KEY_CONTROL) |
192                                       BIT(FLOW_DISSECTOR_KEY_BASIC) |
193                                       BIT(FLOW_DISSECTOR_KEY_PORTS);
194         return 0;
195 }
196
197 static void flow_offload_mangle(struct flow_action_entry *entry,
198                                 enum flow_action_mangle_base htype, u32 offset,
199                                 const __be32 *value, const __be32 *mask)
200 {
201         entry->id = FLOW_ACTION_MANGLE;
202         entry->mangle.htype = htype;
203         entry->mangle.offset = offset;
204         memcpy(&entry->mangle.mask, mask, sizeof(u32));
205         memcpy(&entry->mangle.val, value, sizeof(u32));
206 }
207
208 static inline struct flow_action_entry *
209 flow_action_entry_next(struct nf_flow_rule *flow_rule)
210 {
211         int i = flow_rule->rule->action.num_entries++;
212
213         return &flow_rule->rule->action.entries[i];
214 }
215
216 static int flow_offload_eth_src(struct net *net,
217                                 const struct flow_offload *flow,
218                                 enum flow_offload_tuple_dir dir,
219                                 struct nf_flow_rule *flow_rule)
220 {
221         struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
222         struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
223         const struct flow_offload_tuple *other_tuple, *this_tuple;
224         struct net_device *dev = NULL;
225         const unsigned char *addr;
226         u32 mask, val;
227         u16 val16;
228
229         this_tuple = &flow->tuplehash[dir].tuple;
230
231         switch (this_tuple->xmit_type) {
232         case FLOW_OFFLOAD_XMIT_DIRECT:
233                 addr = this_tuple->out.h_source;
234                 break;
235         case FLOW_OFFLOAD_XMIT_NEIGH:
236                 other_tuple = &flow->tuplehash[!dir].tuple;
237                 dev = dev_get_by_index(net, other_tuple->iifidx);
238                 if (!dev)
239                         return -ENOENT;
240
241                 addr = dev->dev_addr;
242                 break;
243         default:
244                 return -EOPNOTSUPP;
245         }
246
247         mask = ~0xffff0000;
248         memcpy(&val16, addr, 2);
249         val = val16 << 16;
250         flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
251                             &val, &mask);
252
253         mask = ~0xffffffff;
254         memcpy(&val, addr + 2, 4);
255         flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 8,
256                             &val, &mask);
257
258         dev_put(dev);
259
260         return 0;
261 }
262
263 static int flow_offload_eth_dst(struct net *net,
264                                 const struct flow_offload *flow,
265                                 enum flow_offload_tuple_dir dir,
266                                 struct nf_flow_rule *flow_rule)
267 {
268         struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
269         struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
270         const struct flow_offload_tuple *other_tuple, *this_tuple;
271         const struct dst_entry *dst_cache;
272         unsigned char ha[ETH_ALEN];
273         struct neighbour *n;
274         const void *daddr;
275         u32 mask, val;
276         u8 nud_state;
277         u16 val16;
278
279         this_tuple = &flow->tuplehash[dir].tuple;
280
281         switch (this_tuple->xmit_type) {
282         case FLOW_OFFLOAD_XMIT_DIRECT:
283                 ether_addr_copy(ha, this_tuple->out.h_dest);
284                 break;
285         case FLOW_OFFLOAD_XMIT_NEIGH:
286                 other_tuple = &flow->tuplehash[!dir].tuple;
287                 daddr = &other_tuple->src_v4;
288                 dst_cache = this_tuple->dst_cache;
289                 n = dst_neigh_lookup(dst_cache, daddr);
290                 if (!n)
291                         return -ENOENT;
292
293                 read_lock_bh(&n->lock);
294                 nud_state = n->nud_state;
295                 ether_addr_copy(ha, n->ha);
296                 read_unlock_bh(&n->lock);
297                 neigh_release(n);
298
299                 if (!(nud_state & NUD_VALID))
300                         return -ENOENT;
301                 break;
302         default:
303                 return -EOPNOTSUPP;
304         }
305
306         mask = ~0xffffffff;
307         memcpy(&val, ha, 4);
308         flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 0,
309                             &val, &mask);
310
311         mask = ~0x0000ffff;
312         memcpy(&val16, ha + 4, 2);
313         val = val16;
314         flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
315                             &val, &mask);
316
317         return 0;
318 }
319
320 static void flow_offload_ipv4_snat(struct net *net,
321                                    const struct flow_offload *flow,
322                                    enum flow_offload_tuple_dir dir,
323                                    struct nf_flow_rule *flow_rule)
324 {
325         struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
326         u32 mask = ~htonl(0xffffffff);
327         __be32 addr;
328         u32 offset;
329
330         switch (dir) {
331         case FLOW_OFFLOAD_DIR_ORIGINAL:
332                 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
333                 offset = offsetof(struct iphdr, saddr);
334                 break;
335         case FLOW_OFFLOAD_DIR_REPLY:
336                 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
337                 offset = offsetof(struct iphdr, daddr);
338                 break;
339         default:
340                 return;
341         }
342
343         flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
344                             &addr, &mask);
345 }
346
347 static void flow_offload_ipv4_dnat(struct net *net,
348                                    const struct flow_offload *flow,
349                                    enum flow_offload_tuple_dir dir,
350                                    struct nf_flow_rule *flow_rule)
351 {
352         struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
353         u32 mask = ~htonl(0xffffffff);
354         __be32 addr;
355         u32 offset;
356
357         switch (dir) {
358         case FLOW_OFFLOAD_DIR_ORIGINAL:
359                 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
360                 offset = offsetof(struct iphdr, daddr);
361                 break;
362         case FLOW_OFFLOAD_DIR_REPLY:
363                 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
364                 offset = offsetof(struct iphdr, saddr);
365                 break;
366         default:
367                 return;
368         }
369
370         flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
371                             &addr, &mask);
372 }
373
374 static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
375                                      unsigned int offset,
376                                      const __be32 *addr, const __be32 *mask)
377 {
378         struct flow_action_entry *entry;
379         int i, j;
380
381         for (i = 0, j = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32), j++) {
382                 entry = flow_action_entry_next(flow_rule);
383                 flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
384                                     offset + i, &addr[j], mask);
385         }
386 }
387
388 static void flow_offload_ipv6_snat(struct net *net,
389                                    const struct flow_offload *flow,
390                                    enum flow_offload_tuple_dir dir,
391                                    struct nf_flow_rule *flow_rule)
392 {
393         u32 mask = ~htonl(0xffffffff);
394         const __be32 *addr;
395         u32 offset;
396
397         switch (dir) {
398         case FLOW_OFFLOAD_DIR_ORIGINAL:
399                 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6.s6_addr32;
400                 offset = offsetof(struct ipv6hdr, saddr);
401                 break;
402         case FLOW_OFFLOAD_DIR_REPLY:
403                 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6.s6_addr32;
404                 offset = offsetof(struct ipv6hdr, daddr);
405                 break;
406         default:
407                 return;
408         }
409
410         flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask);
411 }
412
413 static void flow_offload_ipv6_dnat(struct net *net,
414                                    const struct flow_offload *flow,
415                                    enum flow_offload_tuple_dir dir,
416                                    struct nf_flow_rule *flow_rule)
417 {
418         u32 mask = ~htonl(0xffffffff);
419         const __be32 *addr;
420         u32 offset;
421
422         switch (dir) {
423         case FLOW_OFFLOAD_DIR_ORIGINAL:
424                 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6.s6_addr32;
425                 offset = offsetof(struct ipv6hdr, daddr);
426                 break;
427         case FLOW_OFFLOAD_DIR_REPLY:
428                 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6.s6_addr32;
429                 offset = offsetof(struct ipv6hdr, saddr);
430                 break;
431         default:
432                 return;
433         }
434
435         flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask);
436 }
437
438 static int flow_offload_l4proto(const struct flow_offload *flow)
439 {
440         u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto;
441         u8 type = 0;
442
443         switch (protonum) {
444         case IPPROTO_TCP:
445                 type = FLOW_ACT_MANGLE_HDR_TYPE_TCP;
446                 break;
447         case IPPROTO_UDP:
448                 type = FLOW_ACT_MANGLE_HDR_TYPE_UDP;
449                 break;
450         default:
451                 break;
452         }
453
454         return type;
455 }
456
457 static void flow_offload_port_snat(struct net *net,
458                                    const struct flow_offload *flow,
459                                    enum flow_offload_tuple_dir dir,
460                                    struct nf_flow_rule *flow_rule)
461 {
462         struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
463         u32 mask, port;
464         u32 offset;
465
466         switch (dir) {
467         case FLOW_OFFLOAD_DIR_ORIGINAL:
468                 port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port);
469                 offset = 0; /* offsetof(struct tcphdr, source); */
470                 port = htonl(port << 16);
471                 mask = ~htonl(0xffff0000);
472                 break;
473         case FLOW_OFFLOAD_DIR_REPLY:
474                 port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port);
475                 offset = 0; /* offsetof(struct tcphdr, dest); */
476                 port = htonl(port);
477                 mask = ~htonl(0xffff);
478                 break;
479         default:
480                 return;
481         }
482
483         flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
484                             &port, &mask);
485 }
486
487 static void flow_offload_port_dnat(struct net *net,
488                                    const struct flow_offload *flow,
489                                    enum flow_offload_tuple_dir dir,
490                                    struct nf_flow_rule *flow_rule)
491 {
492         struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
493         u32 mask, port;
494         u32 offset;
495
496         switch (dir) {
497         case FLOW_OFFLOAD_DIR_ORIGINAL:
498                 port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port);
499                 offset = 0; /* offsetof(struct tcphdr, dest); */
500                 port = htonl(port);
501                 mask = ~htonl(0xffff);
502                 break;
503         case FLOW_OFFLOAD_DIR_REPLY:
504                 port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port);
505                 offset = 0; /* offsetof(struct tcphdr, source); */
506                 port = htonl(port << 16);
507                 mask = ~htonl(0xffff0000);
508                 break;
509         default:
510                 return;
511         }
512
513         flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
514                             &port, &mask);
515 }
516
517 static void flow_offload_ipv4_checksum(struct net *net,
518                                        const struct flow_offload *flow,
519                                        struct nf_flow_rule *flow_rule)
520 {
521         u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto;
522         struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
523
524         entry->id = FLOW_ACTION_CSUM;
525         entry->csum_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR;
526
527         switch (protonum) {
528         case IPPROTO_TCP:
529                 entry->csum_flags |= TCA_CSUM_UPDATE_FLAG_TCP;
530                 break;
531         case IPPROTO_UDP:
532                 entry->csum_flags |= TCA_CSUM_UPDATE_FLAG_UDP;
533                 break;
534         }
535 }
536
537 static void flow_offload_redirect(struct net *net,
538                                   const struct flow_offload *flow,
539                                   enum flow_offload_tuple_dir dir,
540                                   struct nf_flow_rule *flow_rule)
541 {
542         const struct flow_offload_tuple *this_tuple, *other_tuple;
543         struct flow_action_entry *entry;
544         struct net_device *dev;
545         int ifindex;
546
547         this_tuple = &flow->tuplehash[dir].tuple;
548         switch (this_tuple->xmit_type) {
549         case FLOW_OFFLOAD_XMIT_DIRECT:
550                 this_tuple = &flow->tuplehash[dir].tuple;
551                 ifindex = this_tuple->out.hw_ifidx;
552                 break;
553         case FLOW_OFFLOAD_XMIT_NEIGH:
554                 other_tuple = &flow->tuplehash[!dir].tuple;
555                 ifindex = other_tuple->iifidx;
556                 break;
557         default:
558                 return;
559         }
560
561         dev = dev_get_by_index(net, ifindex);
562         if (!dev)
563                 return;
564
565         entry = flow_action_entry_next(flow_rule);
566         entry->id = FLOW_ACTION_REDIRECT;
567         entry->dev = dev;
568 }
569
570 static void flow_offload_encap_tunnel(const struct flow_offload *flow,
571                                       enum flow_offload_tuple_dir dir,
572                                       struct nf_flow_rule *flow_rule)
573 {
574         const struct flow_offload_tuple *this_tuple;
575         struct flow_action_entry *entry;
576         struct dst_entry *dst;
577
578         this_tuple = &flow->tuplehash[dir].tuple;
579         if (this_tuple->xmit_type == FLOW_OFFLOAD_XMIT_DIRECT)
580                 return;
581
582         dst = this_tuple->dst_cache;
583         if (dst && dst->lwtstate) {
584                 struct ip_tunnel_info *tun_info;
585
586                 tun_info = lwt_tun_info(dst->lwtstate);
587                 if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX)) {
588                         entry = flow_action_entry_next(flow_rule);
589                         entry->id = FLOW_ACTION_TUNNEL_ENCAP;
590                         entry->tunnel = tun_info;
591                 }
592         }
593 }
594
595 static void flow_offload_decap_tunnel(const struct flow_offload *flow,
596                                       enum flow_offload_tuple_dir dir,
597                                       struct nf_flow_rule *flow_rule)
598 {
599         const struct flow_offload_tuple *other_tuple;
600         struct flow_action_entry *entry;
601         struct dst_entry *dst;
602
603         other_tuple = &flow->tuplehash[!dir].tuple;
604         if (other_tuple->xmit_type == FLOW_OFFLOAD_XMIT_DIRECT)
605                 return;
606
607         dst = other_tuple->dst_cache;
608         if (dst && dst->lwtstate) {
609                 struct ip_tunnel_info *tun_info;
610
611                 tun_info = lwt_tun_info(dst->lwtstate);
612                 if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX)) {
613                         entry = flow_action_entry_next(flow_rule);
614                         entry->id = FLOW_ACTION_TUNNEL_DECAP;
615                 }
616         }
617 }
618
619 static int
620 nf_flow_rule_route_common(struct net *net, const struct flow_offload *flow,
621                           enum flow_offload_tuple_dir dir,
622                           struct nf_flow_rule *flow_rule)
623 {
624         const struct flow_offload_tuple *other_tuple;
625         const struct flow_offload_tuple *tuple;
626         int i;
627
628         flow_offload_decap_tunnel(flow, dir, flow_rule);
629         flow_offload_encap_tunnel(flow, dir, flow_rule);
630
631         if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 ||
632             flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
633                 return -1;
634
635         tuple = &flow->tuplehash[dir].tuple;
636
637         for (i = 0; i < tuple->encap_num; i++) {
638                 struct flow_action_entry *entry;
639
640                 if (tuple->in_vlan_ingress & BIT(i))
641                         continue;
642
643                 if (tuple->encap[i].proto == htons(ETH_P_8021Q)) {
644                         entry = flow_action_entry_next(flow_rule);
645                         entry->id = FLOW_ACTION_VLAN_POP;
646                 }
647         }
648
649         other_tuple = &flow->tuplehash[!dir].tuple;
650
651         for (i = 0; i < other_tuple->encap_num; i++) {
652                 struct flow_action_entry *entry;
653
654                 if (other_tuple->in_vlan_ingress & BIT(i))
655                         continue;
656
657                 entry = flow_action_entry_next(flow_rule);
658
659                 switch (other_tuple->encap[i].proto) {
660                 case htons(ETH_P_PPP_SES):
661                         entry->id = FLOW_ACTION_PPPOE_PUSH;
662                         entry->pppoe.sid = other_tuple->encap[i].id;
663                         break;
664                 case htons(ETH_P_8021Q):
665                         entry->id = FLOW_ACTION_VLAN_PUSH;
666                         entry->vlan.vid = other_tuple->encap[i].id;
667                         entry->vlan.proto = other_tuple->encap[i].proto;
668                         break;
669                 }
670         }
671
672         return 0;
673 }
674
675 int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
676                             enum flow_offload_tuple_dir dir,
677                             struct nf_flow_rule *flow_rule)
678 {
679         if (nf_flow_rule_route_common(net, flow, dir, flow_rule) < 0)
680                 return -1;
681
682         if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
683                 flow_offload_ipv4_snat(net, flow, dir, flow_rule);
684                 flow_offload_port_snat(net, flow, dir, flow_rule);
685         }
686         if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
687                 flow_offload_ipv4_dnat(net, flow, dir, flow_rule);
688                 flow_offload_port_dnat(net, flow, dir, flow_rule);
689         }
690         if (test_bit(NF_FLOW_SNAT, &flow->flags) ||
691             test_bit(NF_FLOW_DNAT, &flow->flags))
692                 flow_offload_ipv4_checksum(net, flow, flow_rule);
693
694         flow_offload_redirect(net, flow, dir, flow_rule);
695
696         return 0;
697 }
698 EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv4);
699
700 int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow,
701                             enum flow_offload_tuple_dir dir,
702                             struct nf_flow_rule *flow_rule)
703 {
704         if (nf_flow_rule_route_common(net, flow, dir, flow_rule) < 0)
705                 return -1;
706
707         if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
708                 flow_offload_ipv6_snat(net, flow, dir, flow_rule);
709                 flow_offload_port_snat(net, flow, dir, flow_rule);
710         }
711         if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
712                 flow_offload_ipv6_dnat(net, flow, dir, flow_rule);
713                 flow_offload_port_dnat(net, flow, dir, flow_rule);
714         }
715
716         flow_offload_redirect(net, flow, dir, flow_rule);
717
718         return 0;
719 }
720 EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv6);
721
722 #define NF_FLOW_RULE_ACTION_MAX 16
723
724 static struct nf_flow_rule *
725 nf_flow_offload_rule_alloc(struct net *net,
726                            const struct flow_offload_work *offload,
727                            enum flow_offload_tuple_dir dir)
728 {
729         const struct nf_flowtable *flowtable = offload->flowtable;
730         const struct flow_offload_tuple *tuple, *other_tuple;
731         const struct flow_offload *flow = offload->flow;
732         struct dst_entry *other_dst = NULL;
733         struct nf_flow_rule *flow_rule;
734         int err = -ENOMEM;
735
736         flow_rule = kzalloc(sizeof(*flow_rule), GFP_KERNEL);
737         if (!flow_rule)
738                 goto err_flow;
739
740         flow_rule->rule = flow_rule_alloc(NF_FLOW_RULE_ACTION_MAX);
741         if (!flow_rule->rule)
742                 goto err_flow_rule;
743
744         flow_rule->rule->match.dissector = &flow_rule->match.dissector;
745         flow_rule->rule->match.mask = &flow_rule->match.mask;
746         flow_rule->rule->match.key = &flow_rule->match.key;
747
748         tuple = &flow->tuplehash[dir].tuple;
749         other_tuple = &flow->tuplehash[!dir].tuple;
750         if (other_tuple->xmit_type == FLOW_OFFLOAD_XMIT_NEIGH)
751                 other_dst = other_tuple->dst_cache;
752
753         err = nf_flow_rule_match(&flow_rule->match, tuple, other_dst);
754         if (err < 0)
755                 goto err_flow_match;
756
757         flow_rule->rule->action.num_entries = 0;
758         if (flowtable->type->action(net, flow, dir, flow_rule) < 0)
759                 goto err_flow_match;
760
761         return flow_rule;
762
763 err_flow_match:
764         kfree(flow_rule->rule);
765 err_flow_rule:
766         kfree(flow_rule);
767 err_flow:
768         return NULL;
769 }
770
771 static void __nf_flow_offload_destroy(struct nf_flow_rule *flow_rule)
772 {
773         struct flow_action_entry *entry;
774         int i;
775
776         for (i = 0; i < flow_rule->rule->action.num_entries; i++) {
777                 entry = &flow_rule->rule->action.entries[i];
778                 if (entry->id != FLOW_ACTION_REDIRECT)
779                         continue;
780
781                 dev_put(entry->dev);
782         }
783         kfree(flow_rule->rule);
784         kfree(flow_rule);
785 }
786
787 static void nf_flow_offload_destroy(struct nf_flow_rule *flow_rule[])
788 {
789         int i;
790
791         for (i = 0; i < FLOW_OFFLOAD_DIR_MAX; i++)
792                 __nf_flow_offload_destroy(flow_rule[i]);
793 }
794
795 static int nf_flow_offload_alloc(const struct flow_offload_work *offload,
796                                  struct nf_flow_rule *flow_rule[])
797 {
798         struct net *net = read_pnet(&offload->flowtable->net);
799
800         flow_rule[0] = nf_flow_offload_rule_alloc(net, offload,
801                                                   FLOW_OFFLOAD_DIR_ORIGINAL);
802         if (!flow_rule[0])
803                 return -ENOMEM;
804
805         flow_rule[1] = nf_flow_offload_rule_alloc(net, offload,
806                                                   FLOW_OFFLOAD_DIR_REPLY);
807         if (!flow_rule[1]) {
808                 __nf_flow_offload_destroy(flow_rule[0]);
809                 return -ENOMEM;
810         }
811
812         return 0;
813 }
814
815 static void nf_flow_offload_init(struct flow_cls_offload *cls_flow,
816                                  __be16 proto, int priority,
817                                  enum flow_cls_command cmd,
818                                  const struct flow_offload_tuple *tuple,
819                                  struct netlink_ext_ack *extack)
820 {
821         cls_flow->common.protocol = proto;
822         cls_flow->common.prio = priority;
823         cls_flow->common.extack = extack;
824         cls_flow->command = cmd;
825         cls_flow->cookie = (unsigned long)tuple;
826 }
827
828 static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
829                                  struct flow_offload *flow,
830                                  struct nf_flow_rule *flow_rule,
831                                  enum flow_offload_tuple_dir dir,
832                                  int priority, int cmd,
833                                  struct flow_stats *stats,
834                                  struct list_head *block_cb_list)
835 {
836         struct flow_cls_offload cls_flow = {};
837         struct flow_block_cb *block_cb;
838         struct netlink_ext_ack extack;
839         __be16 proto = ETH_P_ALL;
840         int err, i = 0;
841
842         nf_flow_offload_init(&cls_flow, proto, priority, cmd,
843                              &flow->tuplehash[dir].tuple, &extack);
844         if (cmd == FLOW_CLS_REPLACE)
845                 cls_flow.rule = flow_rule->rule;
846
847         down_read(&flowtable->flow_block_lock);
848         list_for_each_entry(block_cb, block_cb_list, list) {
849                 err = block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow,
850                                    block_cb->cb_priv);
851                 if (err < 0)
852                         continue;
853
854                 i++;
855         }
856         up_read(&flowtable->flow_block_lock);
857
858         if (cmd == FLOW_CLS_STATS)
859                 memcpy(stats, &cls_flow.stats, sizeof(*stats));
860
861         return i;
862 }
863
864 static int flow_offload_tuple_add(struct flow_offload_work *offload,
865                                   struct nf_flow_rule *flow_rule,
866                                   enum flow_offload_tuple_dir dir)
867 {
868         return nf_flow_offload_tuple(offload->flowtable, offload->flow,
869                                      flow_rule, dir, offload->priority,
870                                      FLOW_CLS_REPLACE, NULL,
871                                      &offload->flowtable->flow_block.cb_list);
872 }
873
874 static void flow_offload_tuple_del(struct flow_offload_work *offload,
875                                    enum flow_offload_tuple_dir dir)
876 {
877         nf_flow_offload_tuple(offload->flowtable, offload->flow, NULL, dir,
878                               offload->priority, FLOW_CLS_DESTROY, NULL,
879                               &offload->flowtable->flow_block.cb_list);
880 }
881
882 static int flow_offload_rule_add(struct flow_offload_work *offload,
883                                  struct nf_flow_rule *flow_rule[])
884 {
885         int ok_count = 0;
886
887         ok_count += flow_offload_tuple_add(offload, flow_rule[0],
888                                            FLOW_OFFLOAD_DIR_ORIGINAL);
889         ok_count += flow_offload_tuple_add(offload, flow_rule[1],
890                                            FLOW_OFFLOAD_DIR_REPLY);
891         if (ok_count == 0)
892                 return -ENOENT;
893
894         return 0;
895 }
896
897 static void flow_offload_work_add(struct flow_offload_work *offload)
898 {
899         struct nf_flow_rule *flow_rule[FLOW_OFFLOAD_DIR_MAX];
900         int err;
901
902         err = nf_flow_offload_alloc(offload, flow_rule);
903         if (err < 0)
904                 return;
905
906         err = flow_offload_rule_add(offload, flow_rule);
907         if (err < 0)
908                 goto out;
909
910         set_bit(IPS_HW_OFFLOAD_BIT, &offload->flow->ct->status);
911
912 out:
913         nf_flow_offload_destroy(flow_rule);
914 }
915
916 static void flow_offload_work_del(struct flow_offload_work *offload)
917 {
918         clear_bit(IPS_HW_OFFLOAD_BIT, &offload->flow->ct->status);
919         flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_ORIGINAL);
920         flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_REPLY);
921         set_bit(NF_FLOW_HW_DEAD, &offload->flow->flags);
922 }
923
924 static void flow_offload_tuple_stats(struct flow_offload_work *offload,
925                                      enum flow_offload_tuple_dir dir,
926                                      struct flow_stats *stats)
927 {
928         nf_flow_offload_tuple(offload->flowtable, offload->flow, NULL, dir,
929                               offload->priority, FLOW_CLS_STATS, stats,
930                               &offload->flowtable->flow_block.cb_list);
931 }
932
933 static void flow_offload_work_stats(struct flow_offload_work *offload)
934 {
935         struct flow_stats stats[FLOW_OFFLOAD_DIR_MAX] = {};
936         u64 lastused;
937
938         flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_ORIGINAL, &stats[0]);
939         flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_REPLY, &stats[1]);
940
941         lastused = max_t(u64, stats[0].lastused, stats[1].lastused);
942         offload->flow->timeout = max_t(u64, offload->flow->timeout,
943                                        lastused + flow_offload_get_timeout(offload->flow));
944
945         if (offload->flowtable->flags & NF_FLOWTABLE_COUNTER) {
946                 if (stats[0].pkts)
947                         nf_ct_acct_add(offload->flow->ct,
948                                        FLOW_OFFLOAD_DIR_ORIGINAL,
949                                        stats[0].pkts, stats[0].bytes);
950                 if (stats[1].pkts)
951                         nf_ct_acct_add(offload->flow->ct,
952                                        FLOW_OFFLOAD_DIR_REPLY,
953                                        stats[1].pkts, stats[1].bytes);
954         }
955 }
956
957 static void flow_offload_work_handler(struct work_struct *work)
958 {
959         struct flow_offload_work *offload;
960
961         offload = container_of(work, struct flow_offload_work, work);
962         switch (offload->cmd) {
963                 case FLOW_CLS_REPLACE:
964                         flow_offload_work_add(offload);
965                         break;
966                 case FLOW_CLS_DESTROY:
967                         flow_offload_work_del(offload);
968                         break;
969                 case FLOW_CLS_STATS:
970                         flow_offload_work_stats(offload);
971                         break;
972                 default:
973                         WARN_ON_ONCE(1);
974         }
975
976         clear_bit(NF_FLOW_HW_PENDING, &offload->flow->flags);
977         kfree(offload);
978 }
979
980 static void flow_offload_queue_work(struct flow_offload_work *offload)
981 {
982         if (offload->cmd == FLOW_CLS_REPLACE)
983                 queue_work(nf_flow_offload_add_wq, &offload->work);
984         else if (offload->cmd == FLOW_CLS_DESTROY)
985                 queue_work(nf_flow_offload_del_wq, &offload->work);
986         else
987                 queue_work(nf_flow_offload_stats_wq, &offload->work);
988 }
989
990 static struct flow_offload_work *
991 nf_flow_offload_work_alloc(struct nf_flowtable *flowtable,
992                            struct flow_offload *flow, unsigned int cmd)
993 {
994         struct flow_offload_work *offload;
995
996         if (test_and_set_bit(NF_FLOW_HW_PENDING, &flow->flags))
997                 return NULL;
998
999         offload = kmalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
1000         if (!offload) {
1001                 clear_bit(NF_FLOW_HW_PENDING, &flow->flags);
1002                 return NULL;
1003         }
1004
1005         offload->cmd = cmd;
1006         offload->flow = flow;
1007         offload->priority = flowtable->priority;
1008         offload->flowtable = flowtable;
1009         INIT_WORK(&offload->work, flow_offload_work_handler);
1010
1011         return offload;
1012 }
1013
1014
1015 void nf_flow_offload_add(struct nf_flowtable *flowtable,
1016                          struct flow_offload *flow)
1017 {
1018         struct flow_offload_work *offload;
1019
1020         offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_REPLACE);
1021         if (!offload)
1022                 return;
1023
1024         flow_offload_queue_work(offload);
1025 }
1026
1027 void nf_flow_offload_del(struct nf_flowtable *flowtable,
1028                          struct flow_offload *flow)
1029 {
1030         struct flow_offload_work *offload;
1031
1032         offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_DESTROY);
1033         if (!offload)
1034                 return;
1035
1036         set_bit(NF_FLOW_HW_DYING, &flow->flags);
1037         flow_offload_queue_work(offload);
1038 }
1039
1040 void nf_flow_offload_stats(struct nf_flowtable *flowtable,
1041                            struct flow_offload *flow)
1042 {
1043         struct flow_offload_work *offload;
1044         __s32 delta;
1045
1046         delta = nf_flow_timeout_delta(flow->timeout);
1047         if ((delta >= (9 * flow_offload_get_timeout(flow)) / 10))
1048                 return;
1049
1050         offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_STATS);
1051         if (!offload)
1052                 return;
1053
1054         flow_offload_queue_work(offload);
1055 }
1056
1057 void nf_flow_table_offload_flush(struct nf_flowtable *flowtable)
1058 {
1059         if (nf_flowtable_hw_offload(flowtable)) {
1060                 flush_workqueue(nf_flow_offload_add_wq);
1061                 flush_workqueue(nf_flow_offload_del_wq);
1062                 flush_workqueue(nf_flow_offload_stats_wq);
1063         }
1064 }
1065
1066 static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
1067                                      struct flow_block_offload *bo,
1068                                      enum flow_block_command cmd)
1069 {
1070         struct flow_block_cb *block_cb, *next;
1071         int err = 0;
1072
1073         switch (cmd) {
1074         case FLOW_BLOCK_BIND:
1075                 list_splice(&bo->cb_list, &flowtable->flow_block.cb_list);
1076                 break;
1077         case FLOW_BLOCK_UNBIND:
1078                 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1079                         list_del(&block_cb->list);
1080                         flow_block_cb_free(block_cb);
1081                 }
1082                 break;
1083         default:
1084                 WARN_ON_ONCE(1);
1085                 err = -EOPNOTSUPP;
1086         }
1087
1088         return err;
1089 }
1090
1091 static void nf_flow_table_block_offload_init(struct flow_block_offload *bo,
1092                                              struct net *net,
1093                                              enum flow_block_command cmd,
1094                                              struct nf_flowtable *flowtable,
1095                                              struct netlink_ext_ack *extack)
1096 {
1097         memset(bo, 0, sizeof(*bo));
1098         bo->net         = net;
1099         bo->block       = &flowtable->flow_block;
1100         bo->command     = cmd;
1101         bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
1102         bo->extack      = extack;
1103         bo->cb_list_head = &flowtable->flow_block.cb_list;
1104         INIT_LIST_HEAD(&bo->cb_list);
1105 }
1106
1107 static void nf_flow_table_indr_cleanup(struct flow_block_cb *block_cb)
1108 {
1109         struct nf_flowtable *flowtable = block_cb->indr.data;
1110         struct net_device *dev = block_cb->indr.dev;
1111
1112         nf_flow_table_gc_cleanup(flowtable, dev);
1113         down_write(&flowtable->flow_block_lock);
1114         list_del(&block_cb->list);
1115         list_del(&block_cb->driver_list);
1116         flow_block_cb_free(block_cb);
1117         up_write(&flowtable->flow_block_lock);
1118 }
1119
1120 static int nf_flow_table_indr_offload_cmd(struct flow_block_offload *bo,
1121                                           struct nf_flowtable *flowtable,
1122                                           struct net_device *dev,
1123                                           enum flow_block_command cmd,
1124                                           struct netlink_ext_ack *extack)
1125 {
1126         nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable,
1127                                          extack);
1128
1129         return flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_FT, flowtable, bo,
1130                                            nf_flow_table_indr_cleanup);
1131 }
1132
1133 static int nf_flow_table_offload_cmd(struct flow_block_offload *bo,
1134                                      struct nf_flowtable *flowtable,
1135                                      struct net_device *dev,
1136                                      enum flow_block_command cmd,
1137                                      struct netlink_ext_ack *extack)
1138 {
1139         int err;
1140
1141         nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable,
1142                                          extack);
1143         err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_FT, bo);
1144         if (err < 0)
1145                 return err;
1146
1147         return 0;
1148 }
1149
1150 int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
1151                                 struct net_device *dev,
1152                                 enum flow_block_command cmd)
1153 {
1154         struct netlink_ext_ack extack = {};
1155         struct flow_block_offload bo;
1156         int err;
1157
1158         if (!nf_flowtable_hw_offload(flowtable))
1159                 return 0;
1160
1161         if (dev->netdev_ops->ndo_setup_tc)
1162                 err = nf_flow_table_offload_cmd(&bo, flowtable, dev, cmd,
1163                                                 &extack);
1164         else
1165                 err = nf_flow_table_indr_offload_cmd(&bo, flowtable, dev, cmd,
1166                                                      &extack);
1167         if (err < 0)
1168                 return err;
1169
1170         return nf_flow_table_block_setup(flowtable, &bo, cmd);
1171 }
1172 EXPORT_SYMBOL_GPL(nf_flow_table_offload_setup);
1173
1174 int nf_flow_table_offload_init(void)
1175 {
1176         nf_flow_offload_add_wq  = alloc_workqueue("nf_ft_offload_add",
1177                                                   WQ_UNBOUND | WQ_SYSFS, 0);
1178         if (!nf_flow_offload_add_wq)
1179                 return -ENOMEM;
1180
1181         nf_flow_offload_del_wq  = alloc_workqueue("nf_ft_offload_del",
1182                                                   WQ_UNBOUND | WQ_SYSFS, 0);
1183         if (!nf_flow_offload_del_wq)
1184                 goto err_del_wq;
1185
1186         nf_flow_offload_stats_wq  = alloc_workqueue("nf_ft_offload_stats",
1187                                                     WQ_UNBOUND | WQ_SYSFS, 0);
1188         if (!nf_flow_offload_stats_wq)
1189                 goto err_stats_wq;
1190
1191         return 0;
1192
1193 err_stats_wq:
1194         destroy_workqueue(nf_flow_offload_del_wq);
1195 err_del_wq:
1196         destroy_workqueue(nf_flow_offload_add_wq);
1197         return -ENOMEM;
1198 }
1199
1200 void nf_flow_table_offload_exit(void)
1201 {
1202         destroy_workqueue(nf_flow_offload_add_wq);
1203         destroy_workqueue(nf_flow_offload_del_wq);
1204         destroy_workqueue(nf_flow_offload_stats_wq);
1205 }