Linux 6.9-rc1
[linux-2.6-microblaze.git] / net / netfilter / nf_flow_table_offload.c
1 #include <linux/kernel.h>
2 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/netfilter.h>
5 #include <linux/rhashtable.h>
6 #include <linux/netdevice.h>
7 #include <linux/tc_act/tc_csum.h>
8 #include <net/flow_offload.h>
9 #include <net/netfilter/nf_flow_table.h>
10 #include <net/netfilter/nf_tables.h>
11 #include <net/netfilter/nf_conntrack.h>
12 #include <net/netfilter/nf_conntrack_acct.h>
13 #include <net/netfilter/nf_conntrack_core.h>
14 #include <net/netfilter/nf_conntrack_tuple.h>
15
16 static struct workqueue_struct *nf_flow_offload_add_wq;
17 static struct workqueue_struct *nf_flow_offload_del_wq;
18 static struct workqueue_struct *nf_flow_offload_stats_wq;
19
20 struct flow_offload_work {
21         struct list_head        list;
22         enum flow_cls_command   cmd;
23         struct nf_flowtable     *flowtable;
24         struct flow_offload     *flow;
25         struct work_struct      work;
26 };
27
28 #define NF_FLOW_DISSECTOR(__match, __type, __field)     \
29         (__match)->dissector.offset[__type] =           \
30                 offsetof(struct nf_flow_key, __field)
31
32 static void nf_flow_rule_lwt_match(struct nf_flow_match *match,
33                                    struct ip_tunnel_info *tun_info)
34 {
35         struct nf_flow_key *mask = &match->mask;
36         struct nf_flow_key *key = &match->key;
37         unsigned long long enc_keys;
38
39         if (!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX))
40                 return;
41
42         NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_ENC_CONTROL, enc_control);
43         NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
44         key->enc_key_id.keyid = tunnel_id_to_key32(tun_info->key.tun_id);
45         mask->enc_key_id.keyid = 0xffffffff;
46         enc_keys = BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
47                    BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL);
48
49         if (ip_tunnel_info_af(tun_info) == AF_INET) {
50                 NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
51                                   enc_ipv4);
52                 key->enc_ipv4.src = tun_info->key.u.ipv4.dst;
53                 key->enc_ipv4.dst = tun_info->key.u.ipv4.src;
54                 if (key->enc_ipv4.src)
55                         mask->enc_ipv4.src = 0xffffffff;
56                 if (key->enc_ipv4.dst)
57                         mask->enc_ipv4.dst = 0xffffffff;
58                 enc_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
59                 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
60         } else {
61                 memcpy(&key->enc_ipv6.src, &tun_info->key.u.ipv6.dst,
62                        sizeof(struct in6_addr));
63                 memcpy(&key->enc_ipv6.dst, &tun_info->key.u.ipv6.src,
64                        sizeof(struct in6_addr));
65                 if (memcmp(&key->enc_ipv6.src, &in6addr_any,
66                            sizeof(struct in6_addr)))
67                         memset(&mask->enc_ipv6.src, 0xff,
68                                sizeof(struct in6_addr));
69                 if (memcmp(&key->enc_ipv6.dst, &in6addr_any,
70                            sizeof(struct in6_addr)))
71                         memset(&mask->enc_ipv6.dst, 0xff,
72                                sizeof(struct in6_addr));
73                 enc_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS);
74                 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
75         }
76
77         match->dissector.used_keys |= enc_keys;
78 }
79
80 static void nf_flow_rule_vlan_match(struct flow_dissector_key_vlan *key,
81                                     struct flow_dissector_key_vlan *mask,
82                                     u16 vlan_id, __be16 proto)
83 {
84         key->vlan_id = vlan_id;
85         mask->vlan_id = VLAN_VID_MASK;
86         key->vlan_tpid = proto;
87         mask->vlan_tpid = 0xffff;
88 }
89
90 static int nf_flow_rule_match(struct nf_flow_match *match,
91                               const struct flow_offload_tuple *tuple,
92                               struct dst_entry *other_dst)
93 {
94         struct nf_flow_key *mask = &match->mask;
95         struct nf_flow_key *key = &match->key;
96         struct ip_tunnel_info *tun_info;
97         bool vlan_encap = false;
98
99         NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_META, meta);
100         NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_CONTROL, control);
101         NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_BASIC, basic);
102         NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
103         NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
104         NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_TCP, tcp);
105         NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_PORTS, tp);
106
107         if (other_dst && other_dst->lwtstate) {
108                 tun_info = lwt_tun_info(other_dst->lwtstate);
109                 nf_flow_rule_lwt_match(match, tun_info);
110         }
111
112         if (tuple->xmit_type == FLOW_OFFLOAD_XMIT_TC)
113                 key->meta.ingress_ifindex = tuple->tc.iifidx;
114         else
115                 key->meta.ingress_ifindex = tuple->iifidx;
116
117         mask->meta.ingress_ifindex = 0xffffffff;
118
119         if (tuple->encap_num > 0 && !(tuple->in_vlan_ingress & BIT(0)) &&
120             tuple->encap[0].proto == htons(ETH_P_8021Q)) {
121                 NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_VLAN, vlan);
122                 nf_flow_rule_vlan_match(&key->vlan, &mask->vlan,
123                                         tuple->encap[0].id,
124                                         tuple->encap[0].proto);
125                 vlan_encap = true;
126         }
127
128         if (tuple->encap_num > 1 && !(tuple->in_vlan_ingress & BIT(1)) &&
129             tuple->encap[1].proto == htons(ETH_P_8021Q)) {
130                 if (vlan_encap) {
131                         NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_CVLAN,
132                                           cvlan);
133                         nf_flow_rule_vlan_match(&key->cvlan, &mask->cvlan,
134                                                 tuple->encap[1].id,
135                                                 tuple->encap[1].proto);
136                 } else {
137                         NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_VLAN,
138                                           vlan);
139                         nf_flow_rule_vlan_match(&key->vlan, &mask->vlan,
140                                                 tuple->encap[1].id,
141                                                 tuple->encap[1].proto);
142                 }
143         }
144
145         switch (tuple->l3proto) {
146         case AF_INET:
147                 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
148                 key->basic.n_proto = htons(ETH_P_IP);
149                 key->ipv4.src = tuple->src_v4.s_addr;
150                 mask->ipv4.src = 0xffffffff;
151                 key->ipv4.dst = tuple->dst_v4.s_addr;
152                 mask->ipv4.dst = 0xffffffff;
153                 break;
154        case AF_INET6:
155                 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
156                 key->basic.n_proto = htons(ETH_P_IPV6);
157                 key->ipv6.src = tuple->src_v6;
158                 memset(&mask->ipv6.src, 0xff, sizeof(mask->ipv6.src));
159                 key->ipv6.dst = tuple->dst_v6;
160                 memset(&mask->ipv6.dst, 0xff, sizeof(mask->ipv6.dst));
161                 break;
162         default:
163                 return -EOPNOTSUPP;
164         }
165         mask->control.addr_type = 0xffff;
166         match->dissector.used_keys |= BIT_ULL(key->control.addr_type);
167         mask->basic.n_proto = 0xffff;
168
169         switch (tuple->l4proto) {
170         case IPPROTO_TCP:
171                 key->tcp.flags = 0;
172                 mask->tcp.flags = cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16);
173                 match->dissector.used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_TCP);
174                 break;
175         case IPPROTO_UDP:
176         case IPPROTO_GRE:
177                 break;
178         default:
179                 return -EOPNOTSUPP;
180         }
181
182         key->basic.ip_proto = tuple->l4proto;
183         mask->basic.ip_proto = 0xff;
184
185         match->dissector.used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_META) |
186                                       BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
187                                       BIT_ULL(FLOW_DISSECTOR_KEY_BASIC);
188
189         switch (tuple->l4proto) {
190         case IPPROTO_TCP:
191         case IPPROTO_UDP:
192                 key->tp.src = tuple->src_port;
193                 mask->tp.src = 0xffff;
194                 key->tp.dst = tuple->dst_port;
195                 mask->tp.dst = 0xffff;
196
197                 match->dissector.used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_PORTS);
198                 break;
199         }
200
201         return 0;
202 }
203
204 static void flow_offload_mangle(struct flow_action_entry *entry,
205                                 enum flow_action_mangle_base htype, u32 offset,
206                                 const __be32 *value, const __be32 *mask)
207 {
208         entry->id = FLOW_ACTION_MANGLE;
209         entry->mangle.htype = htype;
210         entry->mangle.offset = offset;
211         memcpy(&entry->mangle.mask, mask, sizeof(u32));
212         memcpy(&entry->mangle.val, value, sizeof(u32));
213 }
214
215 static inline struct flow_action_entry *
216 flow_action_entry_next(struct nf_flow_rule *flow_rule)
217 {
218         int i = flow_rule->rule->action.num_entries++;
219
220         return &flow_rule->rule->action.entries[i];
221 }
222
223 static int flow_offload_eth_src(struct net *net,
224                                 const struct flow_offload *flow,
225                                 enum flow_offload_tuple_dir dir,
226                                 struct nf_flow_rule *flow_rule)
227 {
228         struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
229         struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
230         const struct flow_offload_tuple *other_tuple, *this_tuple;
231         struct net_device *dev = NULL;
232         const unsigned char *addr;
233         u32 mask, val;
234         u16 val16;
235
236         this_tuple = &flow->tuplehash[dir].tuple;
237
238         switch (this_tuple->xmit_type) {
239         case FLOW_OFFLOAD_XMIT_DIRECT:
240                 addr = this_tuple->out.h_source;
241                 break;
242         case FLOW_OFFLOAD_XMIT_NEIGH:
243                 other_tuple = &flow->tuplehash[!dir].tuple;
244                 dev = dev_get_by_index(net, other_tuple->iifidx);
245                 if (!dev)
246                         return -ENOENT;
247
248                 addr = dev->dev_addr;
249                 break;
250         default:
251                 return -EOPNOTSUPP;
252         }
253
254         mask = ~0xffff0000;
255         memcpy(&val16, addr, 2);
256         val = val16 << 16;
257         flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
258                             &val, &mask);
259
260         mask = ~0xffffffff;
261         memcpy(&val, addr + 2, 4);
262         flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 8,
263                             &val, &mask);
264
265         dev_put(dev);
266
267         return 0;
268 }
269
270 static int flow_offload_eth_dst(struct net *net,
271                                 const struct flow_offload *flow,
272                                 enum flow_offload_tuple_dir dir,
273                                 struct nf_flow_rule *flow_rule)
274 {
275         struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
276         struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
277         const struct flow_offload_tuple *other_tuple, *this_tuple;
278         const struct dst_entry *dst_cache;
279         unsigned char ha[ETH_ALEN];
280         struct neighbour *n;
281         const void *daddr;
282         u32 mask, val;
283         u8 nud_state;
284         u16 val16;
285
286         this_tuple = &flow->tuplehash[dir].tuple;
287
288         switch (this_tuple->xmit_type) {
289         case FLOW_OFFLOAD_XMIT_DIRECT:
290                 ether_addr_copy(ha, this_tuple->out.h_dest);
291                 break;
292         case FLOW_OFFLOAD_XMIT_NEIGH:
293                 other_tuple = &flow->tuplehash[!dir].tuple;
294                 daddr = &other_tuple->src_v4;
295                 dst_cache = this_tuple->dst_cache;
296                 n = dst_neigh_lookup(dst_cache, daddr);
297                 if (!n)
298                         return -ENOENT;
299
300                 read_lock_bh(&n->lock);
301                 nud_state = n->nud_state;
302                 ether_addr_copy(ha, n->ha);
303                 read_unlock_bh(&n->lock);
304                 neigh_release(n);
305
306                 if (!(nud_state & NUD_VALID))
307                         return -ENOENT;
308                 break;
309         default:
310                 return -EOPNOTSUPP;
311         }
312
313         mask = ~0xffffffff;
314         memcpy(&val, ha, 4);
315         flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 0,
316                             &val, &mask);
317
318         mask = ~0x0000ffff;
319         memcpy(&val16, ha + 4, 2);
320         val = val16;
321         flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
322                             &val, &mask);
323
324         return 0;
325 }
326
327 static void flow_offload_ipv4_snat(struct net *net,
328                                    const struct flow_offload *flow,
329                                    enum flow_offload_tuple_dir dir,
330                                    struct nf_flow_rule *flow_rule)
331 {
332         struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
333         u32 mask = ~htonl(0xffffffff);
334         __be32 addr;
335         u32 offset;
336
337         switch (dir) {
338         case FLOW_OFFLOAD_DIR_ORIGINAL:
339                 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
340                 offset = offsetof(struct iphdr, saddr);
341                 break;
342         case FLOW_OFFLOAD_DIR_REPLY:
343                 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
344                 offset = offsetof(struct iphdr, daddr);
345                 break;
346         default:
347                 return;
348         }
349
350         flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
351                             &addr, &mask);
352 }
353
354 static void flow_offload_ipv4_dnat(struct net *net,
355                                    const struct flow_offload *flow,
356                                    enum flow_offload_tuple_dir dir,
357                                    struct nf_flow_rule *flow_rule)
358 {
359         struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
360         u32 mask = ~htonl(0xffffffff);
361         __be32 addr;
362         u32 offset;
363
364         switch (dir) {
365         case FLOW_OFFLOAD_DIR_ORIGINAL:
366                 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
367                 offset = offsetof(struct iphdr, daddr);
368                 break;
369         case FLOW_OFFLOAD_DIR_REPLY:
370                 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
371                 offset = offsetof(struct iphdr, saddr);
372                 break;
373         default:
374                 return;
375         }
376
377         flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
378                             &addr, &mask);
379 }
380
381 static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
382                                      unsigned int offset,
383                                      const __be32 *addr, const __be32 *mask)
384 {
385         struct flow_action_entry *entry;
386         int i;
387
388         for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++) {
389                 entry = flow_action_entry_next(flow_rule);
390                 flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
391                                     offset + i * sizeof(u32), &addr[i], mask);
392         }
393 }
394
395 static void flow_offload_ipv6_snat(struct net *net,
396                                    const struct flow_offload *flow,
397                                    enum flow_offload_tuple_dir dir,
398                                    struct nf_flow_rule *flow_rule)
399 {
400         u32 mask = ~htonl(0xffffffff);
401         const __be32 *addr;
402         u32 offset;
403
404         switch (dir) {
405         case FLOW_OFFLOAD_DIR_ORIGINAL:
406                 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6.s6_addr32;
407                 offset = offsetof(struct ipv6hdr, saddr);
408                 break;
409         case FLOW_OFFLOAD_DIR_REPLY:
410                 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6.s6_addr32;
411                 offset = offsetof(struct ipv6hdr, daddr);
412                 break;
413         default:
414                 return;
415         }
416
417         flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask);
418 }
419
420 static void flow_offload_ipv6_dnat(struct net *net,
421                                    const struct flow_offload *flow,
422                                    enum flow_offload_tuple_dir dir,
423                                    struct nf_flow_rule *flow_rule)
424 {
425         u32 mask = ~htonl(0xffffffff);
426         const __be32 *addr;
427         u32 offset;
428
429         switch (dir) {
430         case FLOW_OFFLOAD_DIR_ORIGINAL:
431                 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6.s6_addr32;
432                 offset = offsetof(struct ipv6hdr, daddr);
433                 break;
434         case FLOW_OFFLOAD_DIR_REPLY:
435                 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6.s6_addr32;
436                 offset = offsetof(struct ipv6hdr, saddr);
437                 break;
438         default:
439                 return;
440         }
441
442         flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask);
443 }
444
445 static int flow_offload_l4proto(const struct flow_offload *flow)
446 {
447         u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto;
448         u8 type = 0;
449
450         switch (protonum) {
451         case IPPROTO_TCP:
452                 type = FLOW_ACT_MANGLE_HDR_TYPE_TCP;
453                 break;
454         case IPPROTO_UDP:
455                 type = FLOW_ACT_MANGLE_HDR_TYPE_UDP;
456                 break;
457         default:
458                 break;
459         }
460
461         return type;
462 }
463
464 static void flow_offload_port_snat(struct net *net,
465                                    const struct flow_offload *flow,
466                                    enum flow_offload_tuple_dir dir,
467                                    struct nf_flow_rule *flow_rule)
468 {
469         struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
470         u32 mask, port;
471         u32 offset;
472
473         switch (dir) {
474         case FLOW_OFFLOAD_DIR_ORIGINAL:
475                 port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port);
476                 offset = 0; /* offsetof(struct tcphdr, source); */
477                 port = htonl(port << 16);
478                 mask = ~htonl(0xffff0000);
479                 break;
480         case FLOW_OFFLOAD_DIR_REPLY:
481                 port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port);
482                 offset = 0; /* offsetof(struct tcphdr, dest); */
483                 port = htonl(port);
484                 mask = ~htonl(0xffff);
485                 break;
486         default:
487                 return;
488         }
489
490         flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
491                             &port, &mask);
492 }
493
494 static void flow_offload_port_dnat(struct net *net,
495                                    const struct flow_offload *flow,
496                                    enum flow_offload_tuple_dir dir,
497                                    struct nf_flow_rule *flow_rule)
498 {
499         struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
500         u32 mask, port;
501         u32 offset;
502
503         switch (dir) {
504         case FLOW_OFFLOAD_DIR_ORIGINAL:
505                 port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port);
506                 offset = 0; /* offsetof(struct tcphdr, dest); */
507                 port = htonl(port);
508                 mask = ~htonl(0xffff);
509                 break;
510         case FLOW_OFFLOAD_DIR_REPLY:
511                 port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port);
512                 offset = 0; /* offsetof(struct tcphdr, source); */
513                 port = htonl(port << 16);
514                 mask = ~htonl(0xffff0000);
515                 break;
516         default:
517                 return;
518         }
519
520         flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
521                             &port, &mask);
522 }
523
524 static void flow_offload_ipv4_checksum(struct net *net,
525                                        const struct flow_offload *flow,
526                                        struct nf_flow_rule *flow_rule)
527 {
528         u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto;
529         struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
530
531         entry->id = FLOW_ACTION_CSUM;
532         entry->csum_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR;
533
534         switch (protonum) {
535         case IPPROTO_TCP:
536                 entry->csum_flags |= TCA_CSUM_UPDATE_FLAG_TCP;
537                 break;
538         case IPPROTO_UDP:
539                 entry->csum_flags |= TCA_CSUM_UPDATE_FLAG_UDP;
540                 break;
541         }
542 }
543
544 static void flow_offload_redirect(struct net *net,
545                                   const struct flow_offload *flow,
546                                   enum flow_offload_tuple_dir dir,
547                                   struct nf_flow_rule *flow_rule)
548 {
549         const struct flow_offload_tuple *this_tuple, *other_tuple;
550         struct flow_action_entry *entry;
551         struct net_device *dev;
552         int ifindex;
553
554         this_tuple = &flow->tuplehash[dir].tuple;
555         switch (this_tuple->xmit_type) {
556         case FLOW_OFFLOAD_XMIT_DIRECT:
557                 this_tuple = &flow->tuplehash[dir].tuple;
558                 ifindex = this_tuple->out.hw_ifidx;
559                 break;
560         case FLOW_OFFLOAD_XMIT_NEIGH:
561                 other_tuple = &flow->tuplehash[!dir].tuple;
562                 ifindex = other_tuple->iifidx;
563                 break;
564         default:
565                 return;
566         }
567
568         dev = dev_get_by_index(net, ifindex);
569         if (!dev)
570                 return;
571
572         entry = flow_action_entry_next(flow_rule);
573         entry->id = FLOW_ACTION_REDIRECT;
574         entry->dev = dev;
575 }
576
577 static void flow_offload_encap_tunnel(const struct flow_offload *flow,
578                                       enum flow_offload_tuple_dir dir,
579                                       struct nf_flow_rule *flow_rule)
580 {
581         const struct flow_offload_tuple *this_tuple;
582         struct flow_action_entry *entry;
583         struct dst_entry *dst;
584
585         this_tuple = &flow->tuplehash[dir].tuple;
586         if (this_tuple->xmit_type == FLOW_OFFLOAD_XMIT_DIRECT)
587                 return;
588
589         dst = this_tuple->dst_cache;
590         if (dst && dst->lwtstate) {
591                 struct ip_tunnel_info *tun_info;
592
593                 tun_info = lwt_tun_info(dst->lwtstate);
594                 if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX)) {
595                         entry = flow_action_entry_next(flow_rule);
596                         entry->id = FLOW_ACTION_TUNNEL_ENCAP;
597                         entry->tunnel = tun_info;
598                 }
599         }
600 }
601
602 static void flow_offload_decap_tunnel(const struct flow_offload *flow,
603                                       enum flow_offload_tuple_dir dir,
604                                       struct nf_flow_rule *flow_rule)
605 {
606         const struct flow_offload_tuple *other_tuple;
607         struct flow_action_entry *entry;
608         struct dst_entry *dst;
609
610         other_tuple = &flow->tuplehash[!dir].tuple;
611         if (other_tuple->xmit_type == FLOW_OFFLOAD_XMIT_DIRECT)
612                 return;
613
614         dst = other_tuple->dst_cache;
615         if (dst && dst->lwtstate) {
616                 struct ip_tunnel_info *tun_info;
617
618                 tun_info = lwt_tun_info(dst->lwtstate);
619                 if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX)) {
620                         entry = flow_action_entry_next(flow_rule);
621                         entry->id = FLOW_ACTION_TUNNEL_DECAP;
622                 }
623         }
624 }
625
626 static int
627 nf_flow_rule_route_common(struct net *net, const struct flow_offload *flow,
628                           enum flow_offload_tuple_dir dir,
629                           struct nf_flow_rule *flow_rule)
630 {
631         const struct flow_offload_tuple *other_tuple;
632         const struct flow_offload_tuple *tuple;
633         int i;
634
635         flow_offload_decap_tunnel(flow, dir, flow_rule);
636         flow_offload_encap_tunnel(flow, dir, flow_rule);
637
638         if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 ||
639             flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
640                 return -1;
641
642         tuple = &flow->tuplehash[dir].tuple;
643
644         for (i = 0; i < tuple->encap_num; i++) {
645                 struct flow_action_entry *entry;
646
647                 if (tuple->in_vlan_ingress & BIT(i))
648                         continue;
649
650                 if (tuple->encap[i].proto == htons(ETH_P_8021Q)) {
651                         entry = flow_action_entry_next(flow_rule);
652                         entry->id = FLOW_ACTION_VLAN_POP;
653                 }
654         }
655
656         other_tuple = &flow->tuplehash[!dir].tuple;
657
658         for (i = 0; i < other_tuple->encap_num; i++) {
659                 struct flow_action_entry *entry;
660
661                 if (other_tuple->in_vlan_ingress & BIT(i))
662                         continue;
663
664                 entry = flow_action_entry_next(flow_rule);
665
666                 switch (other_tuple->encap[i].proto) {
667                 case htons(ETH_P_PPP_SES):
668                         entry->id = FLOW_ACTION_PPPOE_PUSH;
669                         entry->pppoe.sid = other_tuple->encap[i].id;
670                         break;
671                 case htons(ETH_P_8021Q):
672                         entry->id = FLOW_ACTION_VLAN_PUSH;
673                         entry->vlan.vid = other_tuple->encap[i].id;
674                         entry->vlan.proto = other_tuple->encap[i].proto;
675                         break;
676                 }
677         }
678
679         return 0;
680 }
681
682 int nf_flow_rule_route_ipv4(struct net *net, struct flow_offload *flow,
683                             enum flow_offload_tuple_dir dir,
684                             struct nf_flow_rule *flow_rule)
685 {
686         if (nf_flow_rule_route_common(net, flow, dir, flow_rule) < 0)
687                 return -1;
688
689         if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
690                 flow_offload_ipv4_snat(net, flow, dir, flow_rule);
691                 flow_offload_port_snat(net, flow, dir, flow_rule);
692         }
693         if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
694                 flow_offload_ipv4_dnat(net, flow, dir, flow_rule);
695                 flow_offload_port_dnat(net, flow, dir, flow_rule);
696         }
697         if (test_bit(NF_FLOW_SNAT, &flow->flags) ||
698             test_bit(NF_FLOW_DNAT, &flow->flags))
699                 flow_offload_ipv4_checksum(net, flow, flow_rule);
700
701         flow_offload_redirect(net, flow, dir, flow_rule);
702
703         return 0;
704 }
705 EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv4);
706
707 int nf_flow_rule_route_ipv6(struct net *net, struct flow_offload *flow,
708                             enum flow_offload_tuple_dir dir,
709                             struct nf_flow_rule *flow_rule)
710 {
711         if (nf_flow_rule_route_common(net, flow, dir, flow_rule) < 0)
712                 return -1;
713
714         if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
715                 flow_offload_ipv6_snat(net, flow, dir, flow_rule);
716                 flow_offload_port_snat(net, flow, dir, flow_rule);
717         }
718         if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
719                 flow_offload_ipv6_dnat(net, flow, dir, flow_rule);
720                 flow_offload_port_dnat(net, flow, dir, flow_rule);
721         }
722
723         flow_offload_redirect(net, flow, dir, flow_rule);
724
725         return 0;
726 }
727 EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv6);
728
729 #define NF_FLOW_RULE_ACTION_MAX 16
730
731 static struct nf_flow_rule *
732 nf_flow_offload_rule_alloc(struct net *net,
733                            const struct flow_offload_work *offload,
734                            enum flow_offload_tuple_dir dir)
735 {
736         const struct nf_flowtable *flowtable = offload->flowtable;
737         const struct flow_offload_tuple *tuple, *other_tuple;
738         struct flow_offload *flow = offload->flow;
739         struct dst_entry *other_dst = NULL;
740         struct nf_flow_rule *flow_rule;
741         int err = -ENOMEM;
742
743         flow_rule = kzalloc(sizeof(*flow_rule), GFP_KERNEL);
744         if (!flow_rule)
745                 goto err_flow;
746
747         flow_rule->rule = flow_rule_alloc(NF_FLOW_RULE_ACTION_MAX);
748         if (!flow_rule->rule)
749                 goto err_flow_rule;
750
751         flow_rule->rule->match.dissector = &flow_rule->match.dissector;
752         flow_rule->rule->match.mask = &flow_rule->match.mask;
753         flow_rule->rule->match.key = &flow_rule->match.key;
754
755         tuple = &flow->tuplehash[dir].tuple;
756         other_tuple = &flow->tuplehash[!dir].tuple;
757         if (other_tuple->xmit_type == FLOW_OFFLOAD_XMIT_NEIGH)
758                 other_dst = other_tuple->dst_cache;
759
760         err = nf_flow_rule_match(&flow_rule->match, tuple, other_dst);
761         if (err < 0)
762                 goto err_flow_match;
763
764         flow_rule->rule->action.num_entries = 0;
765         if (flowtable->type->action(net, flow, dir, flow_rule) < 0)
766                 goto err_flow_match;
767
768         return flow_rule;
769
770 err_flow_match:
771         kfree(flow_rule->rule);
772 err_flow_rule:
773         kfree(flow_rule);
774 err_flow:
775         return NULL;
776 }
777
778 static void __nf_flow_offload_destroy(struct nf_flow_rule *flow_rule)
779 {
780         struct flow_action_entry *entry;
781         int i;
782
783         for (i = 0; i < flow_rule->rule->action.num_entries; i++) {
784                 entry = &flow_rule->rule->action.entries[i];
785                 if (entry->id != FLOW_ACTION_REDIRECT)
786                         continue;
787
788                 dev_put(entry->dev);
789         }
790         kfree(flow_rule->rule);
791         kfree(flow_rule);
792 }
793
794 static void nf_flow_offload_destroy(struct nf_flow_rule *flow_rule[])
795 {
796         int i;
797
798         for (i = 0; i < FLOW_OFFLOAD_DIR_MAX; i++)
799                 __nf_flow_offload_destroy(flow_rule[i]);
800 }
801
802 static int nf_flow_offload_alloc(const struct flow_offload_work *offload,
803                                  struct nf_flow_rule *flow_rule[])
804 {
805         struct net *net = read_pnet(&offload->flowtable->net);
806
807         flow_rule[0] = nf_flow_offload_rule_alloc(net, offload,
808                                                   FLOW_OFFLOAD_DIR_ORIGINAL);
809         if (!flow_rule[0])
810                 return -ENOMEM;
811
812         flow_rule[1] = nf_flow_offload_rule_alloc(net, offload,
813                                                   FLOW_OFFLOAD_DIR_REPLY);
814         if (!flow_rule[1]) {
815                 __nf_flow_offload_destroy(flow_rule[0]);
816                 return -ENOMEM;
817         }
818
819         return 0;
820 }
821
822 static void nf_flow_offload_init(struct flow_cls_offload *cls_flow,
823                                  __be16 proto, int priority,
824                                  enum flow_cls_command cmd,
825                                  const struct flow_offload_tuple *tuple,
826                                  struct netlink_ext_ack *extack)
827 {
828         cls_flow->common.protocol = proto;
829         cls_flow->common.prio = priority;
830         cls_flow->common.extack = extack;
831         cls_flow->command = cmd;
832         cls_flow->cookie = (unsigned long)tuple;
833 }
834
835 static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
836                                  struct flow_offload *flow,
837                                  struct nf_flow_rule *flow_rule,
838                                  enum flow_offload_tuple_dir dir,
839                                  int priority, int cmd,
840                                  struct flow_stats *stats,
841                                  struct list_head *block_cb_list)
842 {
843         struct flow_cls_offload cls_flow = {};
844         struct flow_block_cb *block_cb;
845         struct netlink_ext_ack extack;
846         __be16 proto = ETH_P_ALL;
847         int err, i = 0;
848
849         nf_flow_offload_init(&cls_flow, proto, priority, cmd,
850                              &flow->tuplehash[dir].tuple, &extack);
851         if (cmd == FLOW_CLS_REPLACE)
852                 cls_flow.rule = flow_rule->rule;
853
854         down_read(&flowtable->flow_block_lock);
855         list_for_each_entry(block_cb, block_cb_list, list) {
856                 err = block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow,
857                                    block_cb->cb_priv);
858                 if (err < 0)
859                         continue;
860
861                 i++;
862         }
863         up_read(&flowtable->flow_block_lock);
864
865         if (cmd == FLOW_CLS_STATS)
866                 memcpy(stats, &cls_flow.stats, sizeof(*stats));
867
868         return i;
869 }
870
871 static int flow_offload_tuple_add(struct flow_offload_work *offload,
872                                   struct nf_flow_rule *flow_rule,
873                                   enum flow_offload_tuple_dir dir)
874 {
875         return nf_flow_offload_tuple(offload->flowtable, offload->flow,
876                                      flow_rule, dir,
877                                      offload->flowtable->priority,
878                                      FLOW_CLS_REPLACE, NULL,
879                                      &offload->flowtable->flow_block.cb_list);
880 }
881
882 static void flow_offload_tuple_del(struct flow_offload_work *offload,
883                                    enum flow_offload_tuple_dir dir)
884 {
885         nf_flow_offload_tuple(offload->flowtable, offload->flow, NULL, dir,
886                               offload->flowtable->priority,
887                               FLOW_CLS_DESTROY, NULL,
888                               &offload->flowtable->flow_block.cb_list);
889 }
890
891 static int flow_offload_rule_add(struct flow_offload_work *offload,
892                                  struct nf_flow_rule *flow_rule[])
893 {
894         int ok_count = 0;
895
896         ok_count += flow_offload_tuple_add(offload, flow_rule[0],
897                                            FLOW_OFFLOAD_DIR_ORIGINAL);
898         if (test_bit(NF_FLOW_HW_BIDIRECTIONAL, &offload->flow->flags))
899                 ok_count += flow_offload_tuple_add(offload, flow_rule[1],
900                                                    FLOW_OFFLOAD_DIR_REPLY);
901         if (ok_count == 0)
902                 return -ENOENT;
903
904         return 0;
905 }
906
907 static void flow_offload_work_add(struct flow_offload_work *offload)
908 {
909         struct nf_flow_rule *flow_rule[FLOW_OFFLOAD_DIR_MAX];
910         int err;
911
912         err = nf_flow_offload_alloc(offload, flow_rule);
913         if (err < 0)
914                 return;
915
916         err = flow_offload_rule_add(offload, flow_rule);
917         if (err < 0)
918                 goto out;
919
920         set_bit(IPS_HW_OFFLOAD_BIT, &offload->flow->ct->status);
921
922 out:
923         nf_flow_offload_destroy(flow_rule);
924 }
925
926 static void flow_offload_work_del(struct flow_offload_work *offload)
927 {
928         clear_bit(IPS_HW_OFFLOAD_BIT, &offload->flow->ct->status);
929         flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_ORIGINAL);
930         if (test_bit(NF_FLOW_HW_BIDIRECTIONAL, &offload->flow->flags))
931                 flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_REPLY);
932         set_bit(NF_FLOW_HW_DEAD, &offload->flow->flags);
933 }
934
935 static void flow_offload_tuple_stats(struct flow_offload_work *offload,
936                                      enum flow_offload_tuple_dir dir,
937                                      struct flow_stats *stats)
938 {
939         nf_flow_offload_tuple(offload->flowtable, offload->flow, NULL, dir,
940                               offload->flowtable->priority,
941                               FLOW_CLS_STATS, stats,
942                               &offload->flowtable->flow_block.cb_list);
943 }
944
945 static void flow_offload_work_stats(struct flow_offload_work *offload)
946 {
947         struct flow_stats stats[FLOW_OFFLOAD_DIR_MAX] = {};
948         u64 lastused;
949
950         flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_ORIGINAL, &stats[0]);
951         if (test_bit(NF_FLOW_HW_BIDIRECTIONAL, &offload->flow->flags))
952                 flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_REPLY,
953                                          &stats[1]);
954
955         lastused = max_t(u64, stats[0].lastused, stats[1].lastused);
956         offload->flow->timeout = max_t(u64, offload->flow->timeout,
957                                        lastused + flow_offload_get_timeout(offload->flow));
958
959         if (offload->flowtable->flags & NF_FLOWTABLE_COUNTER) {
960                 if (stats[0].pkts)
961                         nf_ct_acct_add(offload->flow->ct,
962                                        FLOW_OFFLOAD_DIR_ORIGINAL,
963                                        stats[0].pkts, stats[0].bytes);
964                 if (stats[1].pkts)
965                         nf_ct_acct_add(offload->flow->ct,
966                                        FLOW_OFFLOAD_DIR_REPLY,
967                                        stats[1].pkts, stats[1].bytes);
968         }
969 }
970
971 static void flow_offload_work_handler(struct work_struct *work)
972 {
973         struct flow_offload_work *offload;
974         struct net *net;
975
976         offload = container_of(work, struct flow_offload_work, work);
977         net = read_pnet(&offload->flowtable->net);
978         switch (offload->cmd) {
979                 case FLOW_CLS_REPLACE:
980                         flow_offload_work_add(offload);
981                         NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count_wq_add);
982                         break;
983                 case FLOW_CLS_DESTROY:
984                         flow_offload_work_del(offload);
985                         NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count_wq_del);
986                         break;
987                 case FLOW_CLS_STATS:
988                         flow_offload_work_stats(offload);
989                         NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count_wq_stats);
990                         break;
991                 default:
992                         WARN_ON_ONCE(1);
993         }
994
995         clear_bit(NF_FLOW_HW_PENDING, &offload->flow->flags);
996         kfree(offload);
997 }
998
999 static void flow_offload_queue_work(struct flow_offload_work *offload)
1000 {
1001         struct net *net = read_pnet(&offload->flowtable->net);
1002
1003         if (offload->cmd == FLOW_CLS_REPLACE) {
1004                 NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count_wq_add);
1005                 queue_work(nf_flow_offload_add_wq, &offload->work);
1006         } else if (offload->cmd == FLOW_CLS_DESTROY) {
1007                 NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count_wq_del);
1008                 queue_work(nf_flow_offload_del_wq, &offload->work);
1009         } else {
1010                 NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count_wq_stats);
1011                 queue_work(nf_flow_offload_stats_wq, &offload->work);
1012         }
1013 }
1014
1015 static struct flow_offload_work *
1016 nf_flow_offload_work_alloc(struct nf_flowtable *flowtable,
1017                            struct flow_offload *flow, unsigned int cmd)
1018 {
1019         struct flow_offload_work *offload;
1020
1021         if (test_and_set_bit(NF_FLOW_HW_PENDING, &flow->flags))
1022                 return NULL;
1023
1024         offload = kmalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
1025         if (!offload) {
1026                 clear_bit(NF_FLOW_HW_PENDING, &flow->flags);
1027                 return NULL;
1028         }
1029
1030         offload->cmd = cmd;
1031         offload->flow = flow;
1032         offload->flowtable = flowtable;
1033         INIT_WORK(&offload->work, flow_offload_work_handler);
1034
1035         return offload;
1036 }
1037
1038
1039 void nf_flow_offload_add(struct nf_flowtable *flowtable,
1040                          struct flow_offload *flow)
1041 {
1042         struct flow_offload_work *offload;
1043
1044         offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_REPLACE);
1045         if (!offload)
1046                 return;
1047
1048         flow_offload_queue_work(offload);
1049 }
1050
1051 void nf_flow_offload_del(struct nf_flowtable *flowtable,
1052                          struct flow_offload *flow)
1053 {
1054         struct flow_offload_work *offload;
1055
1056         offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_DESTROY);
1057         if (!offload)
1058                 return;
1059
1060         set_bit(NF_FLOW_HW_DYING, &flow->flags);
1061         flow_offload_queue_work(offload);
1062 }
1063
1064 void nf_flow_offload_stats(struct nf_flowtable *flowtable,
1065                            struct flow_offload *flow)
1066 {
1067         struct flow_offload_work *offload;
1068         __s32 delta;
1069
1070         delta = nf_flow_timeout_delta(flow->timeout);
1071         if ((delta >= (9 * flow_offload_get_timeout(flow)) / 10))
1072                 return;
1073
1074         offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_STATS);
1075         if (!offload)
1076                 return;
1077
1078         flow_offload_queue_work(offload);
1079 }
1080
1081 void nf_flow_table_offload_flush_cleanup(struct nf_flowtable *flowtable)
1082 {
1083         if (nf_flowtable_hw_offload(flowtable)) {
1084                 flush_workqueue(nf_flow_offload_del_wq);
1085                 nf_flow_table_gc_run(flowtable);
1086         }
1087 }
1088
1089 void nf_flow_table_offload_flush(struct nf_flowtable *flowtable)
1090 {
1091         if (nf_flowtable_hw_offload(flowtable)) {
1092                 flush_workqueue(nf_flow_offload_add_wq);
1093                 flush_workqueue(nf_flow_offload_del_wq);
1094                 flush_workqueue(nf_flow_offload_stats_wq);
1095         }
1096 }
1097
1098 static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
1099                                      struct flow_block_offload *bo,
1100                                      enum flow_block_command cmd)
1101 {
1102         struct flow_block_cb *block_cb, *next;
1103         int err = 0;
1104
1105         down_write(&flowtable->flow_block_lock);
1106         switch (cmd) {
1107         case FLOW_BLOCK_BIND:
1108                 list_splice(&bo->cb_list, &flowtable->flow_block.cb_list);
1109                 break;
1110         case FLOW_BLOCK_UNBIND:
1111                 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1112                         list_del(&block_cb->list);
1113                         flow_block_cb_free(block_cb);
1114                 }
1115                 break;
1116         default:
1117                 WARN_ON_ONCE(1);
1118                 err = -EOPNOTSUPP;
1119         }
1120         up_write(&flowtable->flow_block_lock);
1121
1122         return err;
1123 }
1124
1125 static void nf_flow_table_block_offload_init(struct flow_block_offload *bo,
1126                                              struct net *net,
1127                                              enum flow_block_command cmd,
1128                                              struct nf_flowtable *flowtable,
1129                                              struct netlink_ext_ack *extack)
1130 {
1131         memset(bo, 0, sizeof(*bo));
1132         bo->net         = net;
1133         bo->block       = &flowtable->flow_block;
1134         bo->command     = cmd;
1135         bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
1136         bo->extack      = extack;
1137         bo->cb_list_head = &flowtable->flow_block.cb_list;
1138         INIT_LIST_HEAD(&bo->cb_list);
1139 }
1140
1141 static void nf_flow_table_indr_cleanup(struct flow_block_cb *block_cb)
1142 {
1143         struct nf_flowtable *flowtable = block_cb->indr.data;
1144         struct net_device *dev = block_cb->indr.dev;
1145
1146         nf_flow_table_gc_cleanup(flowtable, dev);
1147         down_write(&flowtable->flow_block_lock);
1148         list_del(&block_cb->list);
1149         list_del(&block_cb->driver_list);
1150         flow_block_cb_free(block_cb);
1151         up_write(&flowtable->flow_block_lock);
1152 }
1153
1154 static int nf_flow_table_indr_offload_cmd(struct flow_block_offload *bo,
1155                                           struct nf_flowtable *flowtable,
1156                                           struct net_device *dev,
1157                                           enum flow_block_command cmd,
1158                                           struct netlink_ext_ack *extack)
1159 {
1160         nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable,
1161                                          extack);
1162
1163         return flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_FT, flowtable, bo,
1164                                            nf_flow_table_indr_cleanup);
1165 }
1166
1167 static int nf_flow_table_offload_cmd(struct flow_block_offload *bo,
1168                                      struct nf_flowtable *flowtable,
1169                                      struct net_device *dev,
1170                                      enum flow_block_command cmd,
1171                                      struct netlink_ext_ack *extack)
1172 {
1173         int err;
1174
1175         nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable,
1176                                          extack);
1177         down_write(&flowtable->flow_block_lock);
1178         err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_FT, bo);
1179         up_write(&flowtable->flow_block_lock);
1180         if (err < 0)
1181                 return err;
1182
1183         return 0;
1184 }
1185
1186 int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
1187                                 struct net_device *dev,
1188                                 enum flow_block_command cmd)
1189 {
1190         struct netlink_ext_ack extack = {};
1191         struct flow_block_offload bo;
1192         int err;
1193
1194         if (!nf_flowtable_hw_offload(flowtable))
1195                 return 0;
1196
1197         if (dev->netdev_ops->ndo_setup_tc)
1198                 err = nf_flow_table_offload_cmd(&bo, flowtable, dev, cmd,
1199                                                 &extack);
1200         else
1201                 err = nf_flow_table_indr_offload_cmd(&bo, flowtable, dev, cmd,
1202                                                      &extack);
1203         if (err < 0)
1204                 return err;
1205
1206         return nf_flow_table_block_setup(flowtable, &bo, cmd);
1207 }
1208 EXPORT_SYMBOL_GPL(nf_flow_table_offload_setup);
1209
1210 int nf_flow_table_offload_init(void)
1211 {
1212         nf_flow_offload_add_wq  = alloc_workqueue("nf_ft_offload_add",
1213                                                   WQ_UNBOUND | WQ_SYSFS, 0);
1214         if (!nf_flow_offload_add_wq)
1215                 return -ENOMEM;
1216
1217         nf_flow_offload_del_wq  = alloc_workqueue("nf_ft_offload_del",
1218                                                   WQ_UNBOUND | WQ_SYSFS, 0);
1219         if (!nf_flow_offload_del_wq)
1220                 goto err_del_wq;
1221
1222         nf_flow_offload_stats_wq  = alloc_workqueue("nf_ft_offload_stats",
1223                                                     WQ_UNBOUND | WQ_SYSFS, 0);
1224         if (!nf_flow_offload_stats_wq)
1225                 goto err_stats_wq;
1226
1227         return 0;
1228
1229 err_stats_wq:
1230         destroy_workqueue(nf_flow_offload_del_wq);
1231 err_del_wq:
1232         destroy_workqueue(nf_flow_offload_add_wq);
1233         return -ENOMEM;
1234 }
1235
1236 void nf_flow_table_offload_exit(void)
1237 {
1238         destroy_workqueue(nf_flow_offload_add_wq);
1239         destroy_workqueue(nf_flow_offload_del_wq);
1240         destroy_workqueue(nf_flow_offload_stats_wq);
1241 }