1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/netfilter.h>
6 #include <linux/rhashtable.h>
7 #include <linux/netdevice.h>
9 #include <net/ip6_route.h>
10 #include <net/netfilter/nf_tables.h>
11 #include <net/netfilter/nf_flow_table.h>
12 #include <net/netfilter/nf_conntrack.h>
13 #include <net/netfilter/nf_conntrack_core.h>
14 #include <net/netfilter/nf_conntrack_l4proto.h>
15 #include <net/netfilter/nf_conntrack_tuple.h>
17 static DEFINE_MUTEX(flowtable_lock);
18 static LIST_HEAD(flowtables);
21 flow_offload_fill_dir(struct flow_offload *flow,
22 enum flow_offload_tuple_dir dir)
24 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
25 struct nf_conntrack_tuple *ctt = &flow->ct->tuplehash[dir].tuple;
29 switch (ctt->src.l3num) {
31 ft->src_v4 = ctt->src.u3.in;
32 ft->dst_v4 = ctt->dst.u3.in;
35 ft->src_v6 = ctt->src.u3.in6;
36 ft->dst_v6 = ctt->dst.u3.in6;
40 ft->l3proto = ctt->src.l3num;
41 ft->l4proto = ctt->dst.protonum;
42 ft->src_port = ctt->src.u.tcp.port;
43 ft->dst_port = ctt->dst.u.tcp.port;
46 struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
48 struct flow_offload *flow;
50 if (unlikely(nf_ct_is_dying(ct) ||
51 !atomic_inc_not_zero(&ct->ct_general.use)))
54 flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
60 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
61 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY);
63 if (ct->status & IPS_SRC_NAT)
64 __set_bit(NF_FLOW_SNAT, &flow->flags);
65 if (ct->status & IPS_DST_NAT)
66 __set_bit(NF_FLOW_DNAT, &flow->flags);
75 EXPORT_SYMBOL_GPL(flow_offload_alloc);
77 static u32 flow_offload_dst_cookie(struct flow_offload_tuple *flow_tuple)
79 const struct rt6_info *rt;
81 if (flow_tuple->l3proto == NFPROTO_IPV6) {
82 rt = (const struct rt6_info *)flow_tuple->dst_cache;
83 return rt6_get_cookie(rt);
89 static int flow_offload_fill_route(struct flow_offload *flow,
90 const struct nf_flow_route *route,
91 enum flow_offload_tuple_dir dir)
93 struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
94 struct dst_entry *dst = route->tuple[dir].dst;
97 switch (flow_tuple->l3proto) {
99 flow_tuple->mtu = ip_dst_mtu_maybe_forward(dst, true);
102 flow_tuple->mtu = ip6_dst_mtu_forward(dst);
106 flow_tuple->iifidx = route->tuple[dir].in.ifindex;
107 for (i = route->tuple[dir].in.num_encaps - 1; i >= 0; i--) {
108 flow_tuple->encap[j].id = route->tuple[dir].in.encap[i].id;
109 flow_tuple->encap[j].proto = route->tuple[dir].in.encap[i].proto;
110 if (route->tuple[dir].in.ingress_vlans & BIT(i))
111 flow_tuple->in_vlan_ingress |= BIT(j);
114 flow_tuple->encap_num = route->tuple[dir].in.num_encaps;
116 switch (route->tuple[dir].xmit_type) {
117 case FLOW_OFFLOAD_XMIT_DIRECT:
118 memcpy(flow_tuple->out.h_dest, route->tuple[dir].out.h_dest,
120 memcpy(flow_tuple->out.h_source, route->tuple[dir].out.h_source,
122 flow_tuple->out.ifidx = route->tuple[dir].out.ifindex;
123 flow_tuple->out.hw_ifidx = route->tuple[dir].out.hw_ifindex;
125 case FLOW_OFFLOAD_XMIT_XFRM:
126 case FLOW_OFFLOAD_XMIT_NEIGH:
127 if (!dst_hold_safe(route->tuple[dir].dst))
130 flow_tuple->dst_cache = dst;
131 flow_tuple->dst_cookie = flow_offload_dst_cookie(flow_tuple);
134 flow_tuple->xmit_type = route->tuple[dir].xmit_type;
139 static void nft_flow_dst_release(struct flow_offload *flow,
140 enum flow_offload_tuple_dir dir)
142 if (flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
143 flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)
144 dst_release(flow->tuplehash[dir].tuple.dst_cache);
147 int flow_offload_route_init(struct flow_offload *flow,
148 const struct nf_flow_route *route)
152 err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
156 err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
158 goto err_route_reply;
160 flow->type = NF_FLOW_OFFLOAD_ROUTE;
165 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
169 EXPORT_SYMBOL_GPL(flow_offload_route_init);
171 static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
173 tcp->state = TCP_CONNTRACK_ESTABLISHED;
174 tcp->seen[0].td_maxwin = 0;
175 tcp->seen[1].td_maxwin = 0;
178 #define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ)
179 #define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
181 static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
183 const struct nf_conntrack_l4proto *l4proto;
184 int l4num = nf_ct_protonum(ct);
185 unsigned int timeout;
187 l4proto = nf_ct_l4proto_find(l4num);
191 if (l4num == IPPROTO_TCP)
192 timeout = NF_FLOWTABLE_TCP_PICKUP_TIMEOUT;
193 else if (l4num == IPPROTO_UDP)
194 timeout = NF_FLOWTABLE_UDP_PICKUP_TIMEOUT;
198 if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
199 ct->timeout = nfct_time_stamp + timeout;
202 static void flow_offload_fixup_ct_state(struct nf_conn *ct)
204 if (nf_ct_protonum(ct) == IPPROTO_TCP)
205 flow_offload_fixup_tcp(&ct->proto.tcp);
208 static void flow_offload_fixup_ct(struct nf_conn *ct)
210 flow_offload_fixup_ct_state(ct);
211 flow_offload_fixup_ct_timeout(ct);
214 static void flow_offload_route_release(struct flow_offload *flow)
216 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
217 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_REPLY);
220 void flow_offload_free(struct flow_offload *flow)
222 switch (flow->type) {
223 case NF_FLOW_OFFLOAD_ROUTE:
224 flow_offload_route_release(flow);
230 kfree_rcu(flow, rcu_head);
232 EXPORT_SYMBOL_GPL(flow_offload_free);
234 static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
236 const struct flow_offload_tuple *tuple = data;
238 return jhash(tuple, offsetof(struct flow_offload_tuple, __hash), seed);
241 static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
243 const struct flow_offload_tuple_rhash *tuplehash = data;
245 return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, __hash), seed);
248 static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
251 const struct flow_offload_tuple *tuple = arg->key;
252 const struct flow_offload_tuple_rhash *x = ptr;
254 if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, __hash)))
260 static const struct rhashtable_params nf_flow_offload_rhash_params = {
261 .head_offset = offsetof(struct flow_offload_tuple_rhash, node),
262 .hashfn = flow_offload_hash,
263 .obj_hashfn = flow_offload_hash_obj,
264 .obj_cmpfn = flow_offload_hash_cmp,
265 .automatic_shrinking = true,
268 int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
272 flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
274 err = rhashtable_insert_fast(&flow_table->rhashtable,
275 &flow->tuplehash[0].node,
276 nf_flow_offload_rhash_params);
280 err = rhashtable_insert_fast(&flow_table->rhashtable,
281 &flow->tuplehash[1].node,
282 nf_flow_offload_rhash_params);
284 rhashtable_remove_fast(&flow_table->rhashtable,
285 &flow->tuplehash[0].node,
286 nf_flow_offload_rhash_params);
290 nf_ct_offload_timeout(flow->ct);
292 if (nf_flowtable_hw_offload(flow_table)) {
293 __set_bit(NF_FLOW_HW, &flow->flags);
294 nf_flow_offload_add(flow_table, flow);
299 EXPORT_SYMBOL_GPL(flow_offload_add);
301 void flow_offload_refresh(struct nf_flowtable *flow_table,
302 struct flow_offload *flow)
304 flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
306 if (likely(!nf_flowtable_hw_offload(flow_table) ||
307 !test_and_clear_bit(NF_FLOW_HW_REFRESH, &flow->flags)))
310 nf_flow_offload_add(flow_table, flow);
312 EXPORT_SYMBOL_GPL(flow_offload_refresh);
314 static inline bool nf_flow_has_expired(const struct flow_offload *flow)
316 return nf_flow_timeout_delta(flow->timeout) <= 0;
319 static void flow_offload_del(struct nf_flowtable *flow_table,
320 struct flow_offload *flow)
322 rhashtable_remove_fast(&flow_table->rhashtable,
323 &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
324 nf_flow_offload_rhash_params);
325 rhashtable_remove_fast(&flow_table->rhashtable,
326 &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
327 nf_flow_offload_rhash_params);
329 clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
331 if (nf_flow_has_expired(flow))
332 flow_offload_fixup_ct(flow->ct);
334 flow_offload_fixup_ct_timeout(flow->ct);
336 flow_offload_free(flow);
339 void flow_offload_teardown(struct flow_offload *flow)
341 set_bit(NF_FLOW_TEARDOWN, &flow->flags);
343 flow_offload_fixup_ct_state(flow->ct);
345 EXPORT_SYMBOL_GPL(flow_offload_teardown);
347 struct flow_offload_tuple_rhash *
348 flow_offload_lookup(struct nf_flowtable *flow_table,
349 struct flow_offload_tuple *tuple)
351 struct flow_offload_tuple_rhash *tuplehash;
352 struct flow_offload *flow;
355 tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
356 nf_flow_offload_rhash_params);
360 dir = tuplehash->tuple.dir;
361 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
362 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
365 if (unlikely(nf_ct_is_dying(flow->ct)))
370 EXPORT_SYMBOL_GPL(flow_offload_lookup);
373 nf_flow_table_iterate(struct nf_flowtable *flow_table,
374 void (*iter)(struct flow_offload *flow, void *data),
377 struct flow_offload_tuple_rhash *tuplehash;
378 struct rhashtable_iter hti;
379 struct flow_offload *flow;
382 rhashtable_walk_enter(&flow_table->rhashtable, &hti);
383 rhashtable_walk_start(&hti);
385 while ((tuplehash = rhashtable_walk_next(&hti))) {
386 if (IS_ERR(tuplehash)) {
387 if (PTR_ERR(tuplehash) != -EAGAIN) {
388 err = PTR_ERR(tuplehash);
393 if (tuplehash->tuple.dir)
396 flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
400 rhashtable_walk_stop(&hti);
401 rhashtable_walk_exit(&hti);
406 static bool flow_offload_stale_dst(struct flow_offload_tuple *tuple)
408 struct dst_entry *dst;
410 if (tuple->xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
411 tuple->xmit_type == FLOW_OFFLOAD_XMIT_XFRM) {
412 dst = tuple->dst_cache;
413 if (!dst_check(dst, tuple->dst_cookie))
420 static bool nf_flow_has_stale_dst(struct flow_offload *flow)
422 return flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple) ||
423 flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple);
426 static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
428 struct nf_flowtable *flow_table = data;
430 if (nf_flow_has_expired(flow) ||
431 nf_ct_is_dying(flow->ct) ||
432 nf_flow_has_stale_dst(flow))
433 set_bit(NF_FLOW_TEARDOWN, &flow->flags);
435 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
436 if (test_bit(NF_FLOW_HW, &flow->flags)) {
437 if (!test_bit(NF_FLOW_HW_DYING, &flow->flags))
438 nf_flow_offload_del(flow_table, flow);
439 else if (test_bit(NF_FLOW_HW_DEAD, &flow->flags))
440 flow_offload_del(flow_table, flow);
442 flow_offload_del(flow_table, flow);
444 } else if (test_bit(NF_FLOW_HW, &flow->flags)) {
445 nf_flow_offload_stats(flow_table, flow);
449 static void nf_flow_offload_work_gc(struct work_struct *work)
451 struct nf_flowtable *flow_table;
453 flow_table = container_of(work, struct nf_flowtable, gc_work.work);
454 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
455 queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
458 static void nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
459 __be16 port, __be16 new_port)
463 tcph = (void *)(skb_network_header(skb) + thoff);
464 inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false);
467 static void nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
468 __be16 port, __be16 new_port)
472 udph = (void *)(skb_network_header(skb) + thoff);
473 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
474 inet_proto_csum_replace2(&udph->check, skb, port,
477 udph->check = CSUM_MANGLED_0;
481 static void nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
482 u8 protocol, __be16 port, __be16 new_port)
486 nf_flow_nat_port_tcp(skb, thoff, port, new_port);
489 nf_flow_nat_port_udp(skb, thoff, port, new_port);
494 void nf_flow_snat_port(const struct flow_offload *flow,
495 struct sk_buff *skb, unsigned int thoff,
496 u8 protocol, enum flow_offload_tuple_dir dir)
498 struct flow_ports *hdr;
499 __be16 port, new_port;
501 hdr = (void *)(skb_network_header(skb) + thoff);
504 case FLOW_OFFLOAD_DIR_ORIGINAL:
506 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
507 hdr->source = new_port;
509 case FLOW_OFFLOAD_DIR_REPLY:
511 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
512 hdr->dest = new_port;
516 nf_flow_nat_port(skb, thoff, protocol, port, new_port);
518 EXPORT_SYMBOL_GPL(nf_flow_snat_port);
520 void nf_flow_dnat_port(const struct flow_offload *flow, struct sk_buff *skb,
521 unsigned int thoff, u8 protocol,
522 enum flow_offload_tuple_dir dir)
524 struct flow_ports *hdr;
525 __be16 port, new_port;
527 hdr = (void *)(skb_network_header(skb) + thoff);
530 case FLOW_OFFLOAD_DIR_ORIGINAL:
532 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port;
533 hdr->dest = new_port;
535 case FLOW_OFFLOAD_DIR_REPLY:
537 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
538 hdr->source = new_port;
542 nf_flow_nat_port(skb, thoff, protocol, port, new_port);
544 EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
546 int nf_flow_table_init(struct nf_flowtable *flowtable)
550 INIT_DELAYED_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
551 flow_block_init(&flowtable->flow_block);
552 init_rwsem(&flowtable->flow_block_lock);
554 err = rhashtable_init(&flowtable->rhashtable,
555 &nf_flow_offload_rhash_params);
559 queue_delayed_work(system_power_efficient_wq,
560 &flowtable->gc_work, HZ);
562 mutex_lock(&flowtable_lock);
563 list_add(&flowtable->list, &flowtables);
564 mutex_unlock(&flowtable_lock);
568 EXPORT_SYMBOL_GPL(nf_flow_table_init);
570 static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
572 struct net_device *dev = data;
575 flow_offload_teardown(flow);
579 if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) &&
580 (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
581 flow->tuplehash[1].tuple.iifidx == dev->ifindex))
582 flow_offload_teardown(flow);
585 void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable,
586 struct net_device *dev)
588 nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
589 flush_delayed_work(&flowtable->gc_work);
590 nf_flow_table_offload_flush(flowtable);
593 void nf_flow_table_cleanup(struct net_device *dev)
595 struct nf_flowtable *flowtable;
597 mutex_lock(&flowtable_lock);
598 list_for_each_entry(flowtable, &flowtables, list)
599 nf_flow_table_gc_cleanup(flowtable, dev);
600 mutex_unlock(&flowtable_lock);
602 EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
604 void nf_flow_table_free(struct nf_flowtable *flow_table)
606 mutex_lock(&flowtable_lock);
607 list_del(&flow_table->list);
608 mutex_unlock(&flowtable_lock);
610 cancel_delayed_work_sync(&flow_table->gc_work);
611 nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
612 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
613 nf_flow_table_offload_flush(flow_table);
614 if (nf_flowtable_hw_offload(flow_table))
615 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step,
617 rhashtable_destroy(&flow_table->rhashtable);
619 EXPORT_SYMBOL_GPL(nf_flow_table_free);
621 static int __init nf_flow_table_module_init(void)
623 return nf_flow_table_offload_init();
626 static void __exit nf_flow_table_module_exit(void)
628 nf_flow_table_offload_exit();
631 module_init(nf_flow_table_module_init);
632 module_exit(nf_flow_table_module_exit);
634 MODULE_LICENSE("GPL");
635 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
636 MODULE_DESCRIPTION("Netfilter flow table module");