1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/netfilter.h>
6 #include <linux/rhashtable.h>
7 #include <linux/netdevice.h>
9 #include <net/ip6_route.h>
10 #include <net/netfilter/nf_tables.h>
11 #include <net/netfilter/nf_flow_table.h>
12 #include <net/netfilter/nf_conntrack.h>
13 #include <net/netfilter/nf_conntrack_core.h>
14 #include <net/netfilter/nf_conntrack_l4proto.h>
15 #include <net/netfilter/nf_conntrack_tuple.h>
17 static DEFINE_MUTEX(flowtable_lock);
18 static LIST_HEAD(flowtables);
21 flow_offload_fill_dir(struct flow_offload *flow,
22 enum flow_offload_tuple_dir dir)
24 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
25 struct nf_conntrack_tuple *ctt = &flow->ct->tuplehash[dir].tuple;
29 switch (ctt->src.l3num) {
31 ft->src_v4 = ctt->src.u3.in;
32 ft->dst_v4 = ctt->dst.u3.in;
35 ft->src_v6 = ctt->src.u3.in6;
36 ft->dst_v6 = ctt->dst.u3.in6;
40 ft->l3proto = ctt->src.l3num;
41 ft->l4proto = ctt->dst.protonum;
43 switch (ctt->dst.protonum) {
46 ft->src_port = ctt->src.u.tcp.port;
47 ft->dst_port = ctt->dst.u.tcp.port;
52 struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
54 struct flow_offload *flow;
56 if (unlikely(nf_ct_is_dying(ct) ||
57 !refcount_inc_not_zero(&ct->ct_general.use)))
60 flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
66 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
67 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY);
69 if (ct->status & IPS_SRC_NAT)
70 __set_bit(NF_FLOW_SNAT, &flow->flags);
71 if (ct->status & IPS_DST_NAT)
72 __set_bit(NF_FLOW_DNAT, &flow->flags);
81 EXPORT_SYMBOL_GPL(flow_offload_alloc);
83 static u32 flow_offload_dst_cookie(struct flow_offload_tuple *flow_tuple)
85 const struct rt6_info *rt;
87 if (flow_tuple->l3proto == NFPROTO_IPV6) {
88 rt = (const struct rt6_info *)flow_tuple->dst_cache;
89 return rt6_get_cookie(rt);
95 static int flow_offload_fill_route(struct flow_offload *flow,
96 const struct nf_flow_route *route,
97 enum flow_offload_tuple_dir dir)
99 struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
100 struct dst_entry *dst = route->tuple[dir].dst;
103 switch (flow_tuple->l3proto) {
105 flow_tuple->mtu = ip_dst_mtu_maybe_forward(dst, true);
108 flow_tuple->mtu = ip6_dst_mtu_maybe_forward(dst, true);
112 flow_tuple->iifidx = route->tuple[dir].in.ifindex;
113 for (i = route->tuple[dir].in.num_encaps - 1; i >= 0; i--) {
114 flow_tuple->encap[j].id = route->tuple[dir].in.encap[i].id;
115 flow_tuple->encap[j].proto = route->tuple[dir].in.encap[i].proto;
116 if (route->tuple[dir].in.ingress_vlans & BIT(i))
117 flow_tuple->in_vlan_ingress |= BIT(j);
120 flow_tuple->encap_num = route->tuple[dir].in.num_encaps;
122 switch (route->tuple[dir].xmit_type) {
123 case FLOW_OFFLOAD_XMIT_DIRECT:
124 memcpy(flow_tuple->out.h_dest, route->tuple[dir].out.h_dest,
126 memcpy(flow_tuple->out.h_source, route->tuple[dir].out.h_source,
128 flow_tuple->out.ifidx = route->tuple[dir].out.ifindex;
129 flow_tuple->out.hw_ifidx = route->tuple[dir].out.hw_ifindex;
131 case FLOW_OFFLOAD_XMIT_XFRM:
132 case FLOW_OFFLOAD_XMIT_NEIGH:
133 if (!dst_hold_safe(route->tuple[dir].dst))
136 flow_tuple->dst_cache = dst;
137 flow_tuple->dst_cookie = flow_offload_dst_cookie(flow_tuple);
143 flow_tuple->xmit_type = route->tuple[dir].xmit_type;
148 static void nft_flow_dst_release(struct flow_offload *flow,
149 enum flow_offload_tuple_dir dir)
151 if (flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
152 flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)
153 dst_release(flow->tuplehash[dir].tuple.dst_cache);
156 int flow_offload_route_init(struct flow_offload *flow,
157 const struct nf_flow_route *route)
161 err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
165 err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
167 goto err_route_reply;
169 flow->type = NF_FLOW_OFFLOAD_ROUTE;
174 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
178 EXPORT_SYMBOL_GPL(flow_offload_route_init);
180 static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
182 tcp->state = TCP_CONNTRACK_ESTABLISHED;
183 tcp->seen[0].td_maxwin = 0;
184 tcp->seen[1].td_maxwin = 0;
187 static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
189 struct net *net = nf_ct_net(ct);
190 int l4num = nf_ct_protonum(ct);
193 if (l4num == IPPROTO_TCP) {
194 struct nf_tcp_net *tn = nf_tcp_pernet(net);
196 timeout = tn->timeouts[TCP_CONNTRACK_ESTABLISHED];
197 timeout -= tn->offload_timeout;
198 } else if (l4num == IPPROTO_UDP) {
199 struct nf_udp_net *tn = nf_udp_pernet(net);
201 timeout = tn->timeouts[UDP_CT_REPLIED];
202 timeout -= tn->offload_timeout;
210 if (nf_flow_timeout_delta(READ_ONCE(ct->timeout)) > (__s32)timeout)
211 WRITE_ONCE(ct->timeout, nfct_time_stamp + timeout);
214 static void flow_offload_fixup_ct_state(struct nf_conn *ct)
216 if (nf_ct_protonum(ct) == IPPROTO_TCP)
217 flow_offload_fixup_tcp(&ct->proto.tcp);
220 static void flow_offload_fixup_ct(struct nf_conn *ct)
222 flow_offload_fixup_ct_state(ct);
223 flow_offload_fixup_ct_timeout(ct);
226 static void flow_offload_route_release(struct flow_offload *flow)
228 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
229 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_REPLY);
232 void flow_offload_free(struct flow_offload *flow)
234 switch (flow->type) {
235 case NF_FLOW_OFFLOAD_ROUTE:
236 flow_offload_route_release(flow);
242 kfree_rcu(flow, rcu_head);
244 EXPORT_SYMBOL_GPL(flow_offload_free);
246 static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
248 const struct flow_offload_tuple *tuple = data;
250 return jhash(tuple, offsetof(struct flow_offload_tuple, __hash), seed);
253 static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
255 const struct flow_offload_tuple_rhash *tuplehash = data;
257 return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, __hash), seed);
260 static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
263 const struct flow_offload_tuple *tuple = arg->key;
264 const struct flow_offload_tuple_rhash *x = ptr;
266 if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, __hash)))
272 static const struct rhashtable_params nf_flow_offload_rhash_params = {
273 .head_offset = offsetof(struct flow_offload_tuple_rhash, node),
274 .hashfn = flow_offload_hash,
275 .obj_hashfn = flow_offload_hash_obj,
276 .obj_cmpfn = flow_offload_hash_cmp,
277 .automatic_shrinking = true,
280 unsigned long flow_offload_get_timeout(struct flow_offload *flow)
282 unsigned long timeout = NF_FLOW_TIMEOUT;
283 struct net *net = nf_ct_net(flow->ct);
284 int l4num = nf_ct_protonum(flow->ct);
286 if (l4num == IPPROTO_TCP) {
287 struct nf_tcp_net *tn = nf_tcp_pernet(net);
289 timeout = tn->offload_timeout;
290 } else if (l4num == IPPROTO_UDP) {
291 struct nf_udp_net *tn = nf_udp_pernet(net);
293 timeout = tn->offload_timeout;
299 int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
303 flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
305 err = rhashtable_insert_fast(&flow_table->rhashtable,
306 &flow->tuplehash[0].node,
307 nf_flow_offload_rhash_params);
311 err = rhashtable_insert_fast(&flow_table->rhashtable,
312 &flow->tuplehash[1].node,
313 nf_flow_offload_rhash_params);
315 rhashtable_remove_fast(&flow_table->rhashtable,
316 &flow->tuplehash[0].node,
317 nf_flow_offload_rhash_params);
321 nf_ct_offload_timeout(flow->ct);
323 if (nf_flowtable_hw_offload(flow_table)) {
324 __set_bit(NF_FLOW_HW, &flow->flags);
325 nf_flow_offload_add(flow_table, flow);
330 EXPORT_SYMBOL_GPL(flow_offload_add);
332 void flow_offload_refresh(struct nf_flowtable *flow_table,
333 struct flow_offload *flow)
337 timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
338 if (READ_ONCE(flow->timeout) != timeout)
339 WRITE_ONCE(flow->timeout, timeout);
341 if (likely(!nf_flowtable_hw_offload(flow_table)))
344 nf_flow_offload_add(flow_table, flow);
346 EXPORT_SYMBOL_GPL(flow_offload_refresh);
348 static inline bool nf_flow_has_expired(const struct flow_offload *flow)
350 return nf_flow_timeout_delta(flow->timeout) <= 0;
353 static void flow_offload_del(struct nf_flowtable *flow_table,
354 struct flow_offload *flow)
356 rhashtable_remove_fast(&flow_table->rhashtable,
357 &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
358 nf_flow_offload_rhash_params);
359 rhashtable_remove_fast(&flow_table->rhashtable,
360 &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
361 nf_flow_offload_rhash_params);
363 clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
365 if (nf_flow_has_expired(flow))
366 flow_offload_fixup_ct(flow->ct);
368 flow_offload_fixup_ct_timeout(flow->ct);
370 flow_offload_free(flow);
373 void flow_offload_teardown(struct flow_offload *flow)
375 set_bit(NF_FLOW_TEARDOWN, &flow->flags);
377 flow_offload_fixup_ct_state(flow->ct);
379 EXPORT_SYMBOL_GPL(flow_offload_teardown);
381 struct flow_offload_tuple_rhash *
382 flow_offload_lookup(struct nf_flowtable *flow_table,
383 struct flow_offload_tuple *tuple)
385 struct flow_offload_tuple_rhash *tuplehash;
386 struct flow_offload *flow;
389 tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
390 nf_flow_offload_rhash_params);
394 dir = tuplehash->tuple.dir;
395 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
396 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
399 if (unlikely(nf_ct_is_dying(flow->ct)))
404 EXPORT_SYMBOL_GPL(flow_offload_lookup);
407 nf_flow_table_iterate(struct nf_flowtable *flow_table,
408 void (*iter)(struct nf_flowtable *flowtable,
409 struct flow_offload *flow, void *data),
412 struct flow_offload_tuple_rhash *tuplehash;
413 struct rhashtable_iter hti;
414 struct flow_offload *flow;
417 rhashtable_walk_enter(&flow_table->rhashtable, &hti);
418 rhashtable_walk_start(&hti);
420 while ((tuplehash = rhashtable_walk_next(&hti))) {
421 if (IS_ERR(tuplehash)) {
422 if (PTR_ERR(tuplehash) != -EAGAIN) {
423 err = PTR_ERR(tuplehash);
428 if (tuplehash->tuple.dir)
431 flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
433 iter(flow_table, flow, data);
435 rhashtable_walk_stop(&hti);
436 rhashtable_walk_exit(&hti);
441 static bool flow_offload_stale_dst(struct flow_offload_tuple *tuple)
443 struct dst_entry *dst;
445 if (tuple->xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
446 tuple->xmit_type == FLOW_OFFLOAD_XMIT_XFRM) {
447 dst = tuple->dst_cache;
448 if (!dst_check(dst, tuple->dst_cookie))
455 static bool nf_flow_has_stale_dst(struct flow_offload *flow)
457 return flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple) ||
458 flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple);
461 static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
462 struct flow_offload *flow, void *data)
464 if (nf_flow_has_expired(flow) ||
465 nf_ct_is_dying(flow->ct) ||
466 nf_flow_has_stale_dst(flow))
467 set_bit(NF_FLOW_TEARDOWN, &flow->flags);
469 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
470 if (test_bit(NF_FLOW_HW, &flow->flags)) {
471 if (!test_bit(NF_FLOW_HW_DYING, &flow->flags))
472 nf_flow_offload_del(flow_table, flow);
473 else if (test_bit(NF_FLOW_HW_DEAD, &flow->flags))
474 flow_offload_del(flow_table, flow);
476 flow_offload_del(flow_table, flow);
478 } else if (test_bit(NF_FLOW_HW, &flow->flags)) {
479 nf_flow_offload_stats(flow_table, flow);
483 static void nf_flow_offload_work_gc(struct work_struct *work)
485 struct nf_flowtable *flow_table;
487 flow_table = container_of(work, struct nf_flowtable, gc_work.work);
488 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
489 queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
492 static void nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
493 __be16 port, __be16 new_port)
497 tcph = (void *)(skb_network_header(skb) + thoff);
498 inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false);
501 static void nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
502 __be16 port, __be16 new_port)
506 udph = (void *)(skb_network_header(skb) + thoff);
507 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
508 inet_proto_csum_replace2(&udph->check, skb, port,
511 udph->check = CSUM_MANGLED_0;
515 static void nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
516 u8 protocol, __be16 port, __be16 new_port)
520 nf_flow_nat_port_tcp(skb, thoff, port, new_port);
523 nf_flow_nat_port_udp(skb, thoff, port, new_port);
528 void nf_flow_snat_port(const struct flow_offload *flow,
529 struct sk_buff *skb, unsigned int thoff,
530 u8 protocol, enum flow_offload_tuple_dir dir)
532 struct flow_ports *hdr;
533 __be16 port, new_port;
535 hdr = (void *)(skb_network_header(skb) + thoff);
538 case FLOW_OFFLOAD_DIR_ORIGINAL:
540 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
541 hdr->source = new_port;
543 case FLOW_OFFLOAD_DIR_REPLY:
545 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
546 hdr->dest = new_port;
550 nf_flow_nat_port(skb, thoff, protocol, port, new_port);
552 EXPORT_SYMBOL_GPL(nf_flow_snat_port);
554 void nf_flow_dnat_port(const struct flow_offload *flow, struct sk_buff *skb,
555 unsigned int thoff, u8 protocol,
556 enum flow_offload_tuple_dir dir)
558 struct flow_ports *hdr;
559 __be16 port, new_port;
561 hdr = (void *)(skb_network_header(skb) + thoff);
564 case FLOW_OFFLOAD_DIR_ORIGINAL:
566 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port;
567 hdr->dest = new_port;
569 case FLOW_OFFLOAD_DIR_REPLY:
571 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
572 hdr->source = new_port;
576 nf_flow_nat_port(skb, thoff, protocol, port, new_port);
578 EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
580 int nf_flow_table_init(struct nf_flowtable *flowtable)
584 INIT_DELAYED_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
585 flow_block_init(&flowtable->flow_block);
586 init_rwsem(&flowtable->flow_block_lock);
588 err = rhashtable_init(&flowtable->rhashtable,
589 &nf_flow_offload_rhash_params);
593 queue_delayed_work(system_power_efficient_wq,
594 &flowtable->gc_work, HZ);
596 mutex_lock(&flowtable_lock);
597 list_add(&flowtable->list, &flowtables);
598 mutex_unlock(&flowtable_lock);
602 EXPORT_SYMBOL_GPL(nf_flow_table_init);
604 static void nf_flow_table_do_cleanup(struct nf_flowtable *flow_table,
605 struct flow_offload *flow, void *data)
607 struct net_device *dev = data;
610 flow_offload_teardown(flow);
614 if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) &&
615 (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
616 flow->tuplehash[1].tuple.iifidx == dev->ifindex))
617 flow_offload_teardown(flow);
620 void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable,
621 struct net_device *dev)
623 nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
624 flush_delayed_work(&flowtable->gc_work);
625 nf_flow_table_offload_flush(flowtable);
628 void nf_flow_table_cleanup(struct net_device *dev)
630 struct nf_flowtable *flowtable;
632 mutex_lock(&flowtable_lock);
633 list_for_each_entry(flowtable, &flowtables, list)
634 nf_flow_table_gc_cleanup(flowtable, dev);
635 mutex_unlock(&flowtable_lock);
637 EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
639 void nf_flow_table_free(struct nf_flowtable *flow_table)
641 mutex_lock(&flowtable_lock);
642 list_del(&flow_table->list);
643 mutex_unlock(&flowtable_lock);
645 cancel_delayed_work_sync(&flow_table->gc_work);
646 nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
647 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
648 nf_flow_table_offload_flush(flow_table);
649 if (nf_flowtable_hw_offload(flow_table))
650 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
651 rhashtable_destroy(&flow_table->rhashtable);
653 EXPORT_SYMBOL_GPL(nf_flow_table_free);
655 static int __init nf_flow_table_module_init(void)
657 return nf_flow_table_offload_init();
660 static void __exit nf_flow_table_module_exit(void)
662 nf_flow_table_offload_exit();
665 module_init(nf_flow_table_module_init);
666 module_exit(nf_flow_table_module_exit);
668 MODULE_LICENSE("GPL");
669 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
670 MODULE_DESCRIPTION("Netfilter flow table module");