1 #include <linux/kernel.h>
2 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/netfilter.h>
5 #include <linux/rhashtable.h>
6 #include <linux/netdevice.h>
7 #include <linux/tc_act/tc_csum.h>
8 #include <net/flow_offload.h>
9 #include <net/netfilter/nf_flow_table.h>
10 #include <net/netfilter/nf_conntrack.h>
11 #include <net/netfilter/nf_conntrack_core.h>
12 #include <net/netfilter/nf_conntrack_tuple.h>
14 static struct work_struct nf_flow_offload_work;
15 static DEFINE_SPINLOCK(flow_offload_pending_list_lock);
16 static LIST_HEAD(flow_offload_pending_list);
18 struct flow_offload_work {
19 struct list_head list;
20 enum flow_cls_command cmd;
22 struct nf_flowtable *flowtable;
23 struct flow_offload *flow;
27 struct flow_dissector_key_meta meta;
28 struct flow_dissector_key_control control;
29 struct flow_dissector_key_basic basic;
31 struct flow_dissector_key_ipv4_addrs ipv4;
32 struct flow_dissector_key_ipv6_addrs ipv6;
34 struct flow_dissector_key_tcp tcp;
35 struct flow_dissector_key_ports tp;
36 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
38 struct nf_flow_match {
39 struct flow_dissector dissector;
40 struct nf_flow_key key;
41 struct nf_flow_key mask;
45 struct nf_flow_match match;
46 struct flow_rule *rule;
49 #define NF_FLOW_DISSECTOR(__match, __type, __field) \
50 (__match)->dissector.offset[__type] = \
51 offsetof(struct nf_flow_key, __field)
53 static int nf_flow_rule_match(struct nf_flow_match *match,
54 const struct flow_offload_tuple *tuple)
56 struct nf_flow_key *mask = &match->mask;
57 struct nf_flow_key *key = &match->key;
59 NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_META, meta);
60 NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_CONTROL, control);
61 NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_BASIC, basic);
62 NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
63 NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
64 NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_TCP, tcp);
65 NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_PORTS, tp);
67 key->meta.ingress_ifindex = tuple->iifidx;
68 mask->meta.ingress_ifindex = 0xffffffff;
70 switch (tuple->l3proto) {
72 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
73 key->basic.n_proto = htons(ETH_P_IP);
74 key->ipv4.src = tuple->src_v4.s_addr;
75 mask->ipv4.src = 0xffffffff;
76 key->ipv4.dst = tuple->dst_v4.s_addr;
77 mask->ipv4.dst = 0xffffffff;
80 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
81 key->basic.n_proto = htons(ETH_P_IPV6);
82 key->ipv6.src = tuple->src_v6;
83 memset(&mask->ipv6.src, 0xff, sizeof(mask->ipv6.src));
84 key->ipv6.dst = tuple->dst_v6;
85 memset(&mask->ipv6.dst, 0xff, sizeof(mask->ipv6.dst));
90 match->dissector.used_keys |= BIT(key->control.addr_type);
91 mask->basic.n_proto = 0xffff;
93 switch (tuple->l4proto) {
96 mask->tcp.flags = cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16);
97 match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
105 key->basic.ip_proto = tuple->l4proto;
106 mask->basic.ip_proto = 0xff;
108 key->tp.src = tuple->src_port;
109 mask->tp.src = 0xffff;
110 key->tp.dst = tuple->dst_port;
111 mask->tp.dst = 0xffff;
113 match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_META) |
114 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
115 BIT(FLOW_DISSECTOR_KEY_BASIC) |
116 BIT(FLOW_DISSECTOR_KEY_PORTS);
120 static void flow_offload_mangle(struct flow_action_entry *entry,
121 enum flow_action_mangle_base htype, u32 offset,
122 const __be32 *value, const __be32 *mask)
124 entry->id = FLOW_ACTION_MANGLE;
125 entry->mangle.htype = htype;
126 entry->mangle.offset = offset;
127 memcpy(&entry->mangle.mask, mask, sizeof(u32));
128 memcpy(&entry->mangle.val, value, sizeof(u32));
131 static inline struct flow_action_entry *
132 flow_action_entry_next(struct nf_flow_rule *flow_rule)
134 int i = flow_rule->rule->action.num_entries++;
136 return &flow_rule->rule->action.entries[i];
139 static int flow_offload_eth_src(struct net *net,
140 const struct flow_offload *flow,
141 enum flow_offload_tuple_dir dir,
142 struct nf_flow_rule *flow_rule)
144 const struct flow_offload_tuple *tuple = &flow->tuplehash[!dir].tuple;
145 struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
146 struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
147 struct net_device *dev;
151 dev = dev_get_by_index(net, tuple->iifidx);
156 memcpy(&val16, dev->dev_addr, 2);
158 flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
162 memcpy(&val, dev->dev_addr + 2, 4);
163 flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 8,
170 static int flow_offload_eth_dst(struct net *net,
171 const struct flow_offload *flow,
172 enum flow_offload_tuple_dir dir,
173 struct nf_flow_rule *flow_rule)
175 struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
176 struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
177 const void *daddr = &flow->tuplehash[!dir].tuple.src_v4;
178 const struct dst_entry *dst_cache;
179 unsigned char ha[ETH_ALEN];
185 dst_cache = flow->tuplehash[dir].tuple.dst_cache;
186 n = dst_neigh_lookup(dst_cache, daddr);
190 read_lock_bh(&n->lock);
191 nud_state = n->nud_state;
192 ether_addr_copy(ha, n->ha);
193 read_unlock_bh(&n->lock);
195 if (!(nud_state & NUD_VALID)) {
202 flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 0,
206 memcpy(&val16, ha + 4, 2);
208 flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
215 static void flow_offload_ipv4_snat(struct net *net,
216 const struct flow_offload *flow,
217 enum flow_offload_tuple_dir dir,
218 struct nf_flow_rule *flow_rule)
220 struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
221 u32 mask = ~htonl(0xffffffff);
226 case FLOW_OFFLOAD_DIR_ORIGINAL:
227 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
228 offset = offsetof(struct iphdr, saddr);
230 case FLOW_OFFLOAD_DIR_REPLY:
231 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
232 offset = offsetof(struct iphdr, daddr);
238 flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
242 static void flow_offload_ipv4_dnat(struct net *net,
243 const struct flow_offload *flow,
244 enum flow_offload_tuple_dir dir,
245 struct nf_flow_rule *flow_rule)
247 struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
248 u32 mask = ~htonl(0xffffffff);
253 case FLOW_OFFLOAD_DIR_ORIGINAL:
254 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
255 offset = offsetof(struct iphdr, daddr);
257 case FLOW_OFFLOAD_DIR_REPLY:
258 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
259 offset = offsetof(struct iphdr, saddr);
265 flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
269 static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
271 const __be32 *addr, const __be32 *mask)
273 struct flow_action_entry *entry;
276 for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32)) {
277 entry = flow_action_entry_next(flow_rule);
278 flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
279 offset + i, &addr[i], mask);
283 static void flow_offload_ipv6_snat(struct net *net,
284 const struct flow_offload *flow,
285 enum flow_offload_tuple_dir dir,
286 struct nf_flow_rule *flow_rule)
288 u32 mask = ~htonl(0xffffffff);
293 case FLOW_OFFLOAD_DIR_ORIGINAL:
294 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6.s6_addr32;
295 offset = offsetof(struct ipv6hdr, saddr);
297 case FLOW_OFFLOAD_DIR_REPLY:
298 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6.s6_addr32;
299 offset = offsetof(struct ipv6hdr, daddr);
305 flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask);
308 static void flow_offload_ipv6_dnat(struct net *net,
309 const struct flow_offload *flow,
310 enum flow_offload_tuple_dir dir,
311 struct nf_flow_rule *flow_rule)
313 u32 mask = ~htonl(0xffffffff);
318 case FLOW_OFFLOAD_DIR_ORIGINAL:
319 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6.s6_addr32;
320 offset = offsetof(struct ipv6hdr, daddr);
322 case FLOW_OFFLOAD_DIR_REPLY:
323 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6.s6_addr32;
324 offset = offsetof(struct ipv6hdr, saddr);
330 flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask);
333 static int flow_offload_l4proto(const struct flow_offload *flow)
335 u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto;
340 type = FLOW_ACT_MANGLE_HDR_TYPE_TCP;
343 type = FLOW_ACT_MANGLE_HDR_TYPE_UDP;
352 static void flow_offload_port_snat(struct net *net,
353 const struct flow_offload *flow,
354 enum flow_offload_tuple_dir dir,
355 struct nf_flow_rule *flow_rule)
357 struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
362 case FLOW_OFFLOAD_DIR_ORIGINAL:
363 port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port);
364 offset = 0; /* offsetof(struct tcphdr, source); */
365 port = htonl(port << 16);
366 mask = ~htonl(0xffff0000);
368 case FLOW_OFFLOAD_DIR_REPLY:
369 port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port);
370 offset = 0; /* offsetof(struct tcphdr, dest); */
372 mask = ~htonl(0xffff);
378 flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
382 static void flow_offload_port_dnat(struct net *net,
383 const struct flow_offload *flow,
384 enum flow_offload_tuple_dir dir,
385 struct nf_flow_rule *flow_rule)
387 struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
392 case FLOW_OFFLOAD_DIR_ORIGINAL:
393 port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port);
394 offset = 0; /* offsetof(struct tcphdr, dest); */
396 mask = ~htonl(0xffff);
398 case FLOW_OFFLOAD_DIR_REPLY:
399 port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port);
400 offset = 0; /* offsetof(struct tcphdr, source); */
401 port = htonl(port << 16);
402 mask = ~htonl(0xffff0000);
408 flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
412 static void flow_offload_ipv4_checksum(struct net *net,
413 const struct flow_offload *flow,
414 struct nf_flow_rule *flow_rule)
416 u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto;
417 struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
419 entry->id = FLOW_ACTION_CSUM;
420 entry->csum_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR;
424 entry->csum_flags |= TCA_CSUM_UPDATE_FLAG_TCP;
427 entry->csum_flags |= TCA_CSUM_UPDATE_FLAG_UDP;
432 static void flow_offload_redirect(const struct flow_offload *flow,
433 enum flow_offload_tuple_dir dir,
434 struct nf_flow_rule *flow_rule)
436 struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
439 rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
440 entry->id = FLOW_ACTION_REDIRECT;
441 entry->dev = rt->dst.dev;
442 dev_hold(rt->dst.dev);
445 int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
446 enum flow_offload_tuple_dir dir,
447 struct nf_flow_rule *flow_rule)
449 if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 ||
450 flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
453 if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
454 flow_offload_ipv4_snat(net, flow, dir, flow_rule);
455 flow_offload_port_snat(net, flow, dir, flow_rule);
457 if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
458 flow_offload_ipv4_dnat(net, flow, dir, flow_rule);
459 flow_offload_port_dnat(net, flow, dir, flow_rule);
461 if (test_bit(NF_FLOW_SNAT, &flow->flags) ||
462 test_bit(NF_FLOW_DNAT, &flow->flags))
463 flow_offload_ipv4_checksum(net, flow, flow_rule);
465 flow_offload_redirect(flow, dir, flow_rule);
469 EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv4);
471 int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow,
472 enum flow_offload_tuple_dir dir,
473 struct nf_flow_rule *flow_rule)
475 if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 ||
476 flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
479 if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
480 flow_offload_ipv6_snat(net, flow, dir, flow_rule);
481 flow_offload_port_snat(net, flow, dir, flow_rule);
483 if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
484 flow_offload_ipv6_dnat(net, flow, dir, flow_rule);
485 flow_offload_port_dnat(net, flow, dir, flow_rule);
488 flow_offload_redirect(flow, dir, flow_rule);
492 EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv6);
494 #define NF_FLOW_RULE_ACTION_MAX 16
496 static struct nf_flow_rule *
497 nf_flow_offload_rule_alloc(struct net *net,
498 const struct flow_offload_work *offload,
499 enum flow_offload_tuple_dir dir)
501 const struct nf_flowtable *flowtable = offload->flowtable;
502 const struct flow_offload *flow = offload->flow;
503 const struct flow_offload_tuple *tuple;
504 struct nf_flow_rule *flow_rule;
507 flow_rule = kzalloc(sizeof(*flow_rule), GFP_KERNEL);
511 flow_rule->rule = flow_rule_alloc(NF_FLOW_RULE_ACTION_MAX);
512 if (!flow_rule->rule)
515 flow_rule->rule->match.dissector = &flow_rule->match.dissector;
516 flow_rule->rule->match.mask = &flow_rule->match.mask;
517 flow_rule->rule->match.key = &flow_rule->match.key;
519 tuple = &flow->tuplehash[dir].tuple;
520 err = nf_flow_rule_match(&flow_rule->match, tuple);
524 flow_rule->rule->action.num_entries = 0;
525 if (flowtable->type->action(net, flow, dir, flow_rule) < 0)
531 kfree(flow_rule->rule);
538 static void __nf_flow_offload_destroy(struct nf_flow_rule *flow_rule)
540 struct flow_action_entry *entry;
543 for (i = 0; i < flow_rule->rule->action.num_entries; i++) {
544 entry = &flow_rule->rule->action.entries[i];
545 if (entry->id != FLOW_ACTION_REDIRECT)
550 kfree(flow_rule->rule);
554 static void nf_flow_offload_destroy(struct nf_flow_rule *flow_rule[])
558 for (i = 0; i < FLOW_OFFLOAD_DIR_MAX; i++)
559 __nf_flow_offload_destroy(flow_rule[i]);
562 static int nf_flow_offload_alloc(const struct flow_offload_work *offload,
563 struct nf_flow_rule *flow_rule[])
565 struct net *net = read_pnet(&offload->flowtable->net);
567 flow_rule[0] = nf_flow_offload_rule_alloc(net, offload,
568 FLOW_OFFLOAD_DIR_ORIGINAL);
572 flow_rule[1] = nf_flow_offload_rule_alloc(net, offload,
573 FLOW_OFFLOAD_DIR_REPLY);
575 __nf_flow_offload_destroy(flow_rule[0]);
582 static void nf_flow_offload_init(struct flow_cls_offload *cls_flow,
583 __be16 proto, int priority,
584 enum flow_cls_command cmd,
585 const struct flow_offload_tuple *tuple,
586 struct netlink_ext_ack *extack)
588 cls_flow->common.protocol = proto;
589 cls_flow->common.prio = priority;
590 cls_flow->common.extack = extack;
591 cls_flow->command = cmd;
592 cls_flow->cookie = (unsigned long)tuple;
595 static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
596 struct flow_offload *flow,
597 struct nf_flow_rule *flow_rule,
598 enum flow_offload_tuple_dir dir,
599 int priority, int cmd,
600 struct list_head *block_cb_list)
602 struct flow_cls_offload cls_flow = {};
603 struct flow_block_cb *block_cb;
604 struct netlink_ext_ack extack;
605 __be16 proto = ETH_P_ALL;
608 nf_flow_offload_init(&cls_flow, proto, priority, cmd,
609 &flow->tuplehash[dir].tuple, &extack);
610 if (cmd == FLOW_CLS_REPLACE)
611 cls_flow.rule = flow_rule->rule;
613 list_for_each_entry(block_cb, block_cb_list, list) {
614 err = block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow,
625 static int flow_offload_tuple_add(struct flow_offload_work *offload,
626 struct nf_flow_rule *flow_rule,
627 enum flow_offload_tuple_dir dir)
629 return nf_flow_offload_tuple(offload->flowtable, offload->flow,
630 flow_rule, dir, offload->priority,
632 &offload->flowtable->flow_block.cb_list);
635 static void flow_offload_tuple_del(struct flow_offload_work *offload,
636 enum flow_offload_tuple_dir dir)
638 nf_flow_offload_tuple(offload->flowtable, offload->flow, NULL, dir,
639 offload->priority, FLOW_CLS_DESTROY,
640 &offload->flowtable->flow_block.cb_list);
643 static int flow_offload_rule_add(struct flow_offload_work *offload,
644 struct nf_flow_rule *flow_rule[])
648 ok_count += flow_offload_tuple_add(offload, flow_rule[0],
649 FLOW_OFFLOAD_DIR_ORIGINAL);
650 ok_count += flow_offload_tuple_add(offload, flow_rule[1],
651 FLOW_OFFLOAD_DIR_REPLY);
658 static void flow_offload_work_add(struct flow_offload_work *offload)
660 struct nf_flow_rule *flow_rule[FLOW_OFFLOAD_DIR_MAX];
663 err = nf_flow_offload_alloc(offload, flow_rule);
667 err = flow_offload_rule_add(offload, flow_rule);
669 set_bit(NF_FLOW_HW_REFRESH, &offload->flow->flags);
671 nf_flow_offload_destroy(flow_rule);
674 static void flow_offload_work_del(struct flow_offload_work *offload)
676 flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_ORIGINAL);
677 flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_REPLY);
678 set_bit(NF_FLOW_HW_DEAD, &offload->flow->flags);
681 static void flow_offload_tuple_stats(struct flow_offload_work *offload,
682 enum flow_offload_tuple_dir dir,
683 struct flow_stats *stats)
685 struct nf_flowtable *flowtable = offload->flowtable;
686 struct flow_cls_offload cls_flow = {};
687 struct flow_block_cb *block_cb;
688 struct netlink_ext_ack extack;
689 __be16 proto = ETH_P_ALL;
691 nf_flow_offload_init(&cls_flow, proto, offload->priority,
693 &offload->flow->tuplehash[dir].tuple, &extack);
695 list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list)
696 block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow, block_cb->cb_priv);
697 memcpy(stats, &cls_flow.stats, sizeof(*stats));
700 static void flow_offload_work_stats(struct flow_offload_work *offload)
702 struct flow_stats stats[FLOW_OFFLOAD_DIR_MAX] = {};
705 flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_ORIGINAL, &stats[0]);
706 flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_REPLY, &stats[1]);
708 lastused = max_t(u64, stats[0].lastused, stats[1].lastused);
709 offload->flow->timeout = max_t(u64, offload->flow->timeout,
710 lastused + NF_FLOW_TIMEOUT);
713 static void flow_offload_work_handler(struct work_struct *work)
715 struct flow_offload_work *offload, *next;
716 LIST_HEAD(offload_pending_list);
718 spin_lock_bh(&flow_offload_pending_list_lock);
719 list_replace_init(&flow_offload_pending_list, &offload_pending_list);
720 spin_unlock_bh(&flow_offload_pending_list_lock);
722 list_for_each_entry_safe(offload, next, &offload_pending_list, list) {
723 switch (offload->cmd) {
724 case FLOW_CLS_REPLACE:
725 flow_offload_work_add(offload);
727 case FLOW_CLS_DESTROY:
728 flow_offload_work_del(offload);
731 flow_offload_work_stats(offload);
736 list_del(&offload->list);
741 static void flow_offload_queue_work(struct flow_offload_work *offload)
743 spin_lock_bh(&flow_offload_pending_list_lock);
744 list_add_tail(&offload->list, &flow_offload_pending_list);
745 spin_unlock_bh(&flow_offload_pending_list_lock);
747 schedule_work(&nf_flow_offload_work);
750 static struct flow_offload_work *
751 nf_flow_offload_work_alloc(struct nf_flowtable *flowtable,
752 struct flow_offload *flow, unsigned int cmd)
754 struct flow_offload_work *offload;
756 offload = kmalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
761 offload->flow = flow;
762 offload->priority = flowtable->priority;
763 offload->flowtable = flowtable;
769 void nf_flow_offload_add(struct nf_flowtable *flowtable,
770 struct flow_offload *flow)
772 struct flow_offload_work *offload;
774 offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_REPLACE);
778 flow_offload_queue_work(offload);
781 void nf_flow_offload_del(struct nf_flowtable *flowtable,
782 struct flow_offload *flow)
784 struct flow_offload_work *offload;
786 offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_DESTROY);
790 set_bit(NF_FLOW_HW_DYING, &flow->flags);
791 flow_offload_queue_work(offload);
794 void nf_flow_offload_stats(struct nf_flowtable *flowtable,
795 struct flow_offload *flow)
797 struct flow_offload_work *offload;
800 delta = nf_flow_timeout_delta(flow->timeout);
801 if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10))
804 offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_STATS);
808 flow_offload_queue_work(offload);
811 void nf_flow_table_offload_flush(struct nf_flowtable *flowtable)
813 if (nf_flowtable_hw_offload(flowtable))
814 flush_work(&nf_flow_offload_work);
817 static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
818 struct flow_block_offload *bo,
819 enum flow_block_command cmd)
821 struct flow_block_cb *block_cb, *next;
825 case FLOW_BLOCK_BIND:
826 list_splice(&bo->cb_list, &flowtable->flow_block.cb_list);
828 case FLOW_BLOCK_UNBIND:
829 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
830 list_del(&block_cb->list);
831 flow_block_cb_free(block_cb);
842 static int nf_flow_table_offload_cmd(struct flow_block_offload *bo,
843 struct nf_flowtable *flowtable,
844 struct net_device *dev,
845 enum flow_block_command cmd,
846 struct netlink_ext_ack *extack)
850 if (!dev->netdev_ops->ndo_setup_tc)
853 memset(bo, 0, sizeof(*bo));
854 bo->net = dev_net(dev);
855 bo->block = &flowtable->flow_block;
857 bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
859 INIT_LIST_HEAD(&bo->cb_list);
861 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_FT, bo);
868 int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
869 struct net_device *dev,
870 enum flow_block_command cmd)
872 struct netlink_ext_ack extack = {};
873 struct flow_block_offload bo;
876 if (!nf_flowtable_hw_offload(flowtable))
879 err = nf_flow_table_offload_cmd(&bo, flowtable, dev, cmd, &extack);
883 return nf_flow_table_block_setup(flowtable, &bo, cmd);
885 EXPORT_SYMBOL_GPL(nf_flow_table_offload_setup);
887 int nf_flow_table_offload_init(void)
889 INIT_WORK(&nf_flow_offload_work, flow_offload_work_handler);
894 void nf_flow_table_offload_exit(void)
896 struct flow_offload_work *offload, *next;
897 LIST_HEAD(offload_pending_list);
899 cancel_work_sync(&nf_flow_offload_work);
901 list_for_each_entry_safe(offload, next, &offload_pending_list, list) {
902 list_del(&offload->list);