1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/netfilter.h>
6 #include <linux/rhashtable.h>
8 #include <linux/ipv6.h>
9 #include <linux/netdevice.h>
10 #include <linux/if_ether.h>
11 #include <linux/if_pppox.h>
12 #include <linux/ppp_defs.h>
15 #include <net/ip6_route.h>
16 #include <net/neighbour.h>
17 #include <net/netfilter/nf_flow_table.h>
18 #include <net/netfilter/nf_conntrack_acct.h>
19 /* For layer 4 checksum field offset. */
20 #include <linux/tcp.h>
21 #include <linux/udp.h>
23 static int nf_flow_state_check(struct flow_offload *flow, int proto,
24 struct sk_buff *skb, unsigned int thoff)
28 if (proto != IPPROTO_TCP)
31 tcph = (void *)(skb_network_header(skb) + thoff);
32 if (unlikely(tcph->fin || tcph->rst)) {
33 flow_offload_teardown(flow);
40 static void nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
41 __be32 addr, __be32 new_addr)
45 tcph = (void *)(skb_network_header(skb) + thoff);
46 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true);
49 static void nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff,
50 __be32 addr, __be32 new_addr)
54 udph = (void *)(skb_network_header(skb) + thoff);
55 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
56 inet_proto_csum_replace4(&udph->check, skb, addr,
59 udph->check = CSUM_MANGLED_0;
63 static void nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph,
64 unsigned int thoff, __be32 addr,
67 switch (iph->protocol) {
69 nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr);
72 nf_flow_nat_ip_udp(skb, thoff, addr, new_addr);
77 static void nf_flow_snat_ip(const struct flow_offload *flow,
78 struct sk_buff *skb, struct iphdr *iph,
79 unsigned int thoff, enum flow_offload_tuple_dir dir)
81 __be32 addr, new_addr;
84 case FLOW_OFFLOAD_DIR_ORIGINAL:
86 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
87 iph->saddr = new_addr;
89 case FLOW_OFFLOAD_DIR_REPLY:
91 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
92 iph->daddr = new_addr;
95 csum_replace4(&iph->check, addr, new_addr);
97 nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
100 static void nf_flow_dnat_ip(const struct flow_offload *flow,
101 struct sk_buff *skb, struct iphdr *iph,
102 unsigned int thoff, enum flow_offload_tuple_dir dir)
104 __be32 addr, new_addr;
107 case FLOW_OFFLOAD_DIR_ORIGINAL:
109 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
110 iph->daddr = new_addr;
112 case FLOW_OFFLOAD_DIR_REPLY:
114 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
115 iph->saddr = new_addr;
118 csum_replace4(&iph->check, addr, new_addr);
120 nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
123 static void nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
124 unsigned int thoff, enum flow_offload_tuple_dir dir,
127 if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
128 nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir);
129 nf_flow_snat_ip(flow, skb, iph, thoff, dir);
131 if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
132 nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir);
133 nf_flow_dnat_ip(flow, skb, iph, thoff, dir);
137 static bool ip_has_options(unsigned int thoff)
139 return thoff != sizeof(struct iphdr);
142 static void nf_flow_tuple_encap(struct sk_buff *skb,
143 struct flow_offload_tuple *tuple)
145 struct vlan_ethhdr *veth;
146 struct pppoe_hdr *phdr;
149 if (skb_vlan_tag_present(skb)) {
150 tuple->encap[i].id = skb_vlan_tag_get(skb);
151 tuple->encap[i].proto = skb->vlan_proto;
154 switch (skb->protocol) {
155 case htons(ETH_P_8021Q):
156 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
157 tuple->encap[i].id = ntohs(veth->h_vlan_TCI);
158 tuple->encap[i].proto = skb->protocol;
160 case htons(ETH_P_PPP_SES):
161 phdr = (struct pppoe_hdr *)skb_mac_header(skb);
162 tuple->encap[i].id = ntohs(phdr->sid);
163 tuple->encap[i].proto = skb->protocol;
168 static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
169 struct flow_offload_tuple *tuple, u32 *hdrsize,
172 struct flow_ports *ports;
176 if (!pskb_may_pull(skb, sizeof(*iph) + offset))
179 iph = (struct iphdr *)(skb_network_header(skb) + offset);
180 thoff = (iph->ihl * 4);
182 if (ip_is_fragment(iph) ||
183 unlikely(ip_has_options(thoff)))
188 switch (iph->protocol) {
190 *hdrsize = sizeof(struct tcphdr);
193 *hdrsize = sizeof(struct udphdr);
202 if (!pskb_may_pull(skb, thoff + *hdrsize))
205 iph = (struct iphdr *)(skb_network_header(skb) + offset);
206 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
208 tuple->src_v4.s_addr = iph->saddr;
209 tuple->dst_v4.s_addr = iph->daddr;
210 tuple->src_port = ports->source;
211 tuple->dst_port = ports->dest;
212 tuple->l3proto = AF_INET;
213 tuple->l4proto = iph->protocol;
214 tuple->iifidx = dev->ifindex;
215 nf_flow_tuple_encap(skb, tuple);
220 /* Based on ip_exceeds_mtu(). */
221 static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
226 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
232 static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
233 const struct nf_hook_state *state,
234 struct dst_entry *dst)
237 skb_dst_set_noref(skb, dst);
238 dst_output(state->net, state->sk, skb);
242 static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
246 proto = *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
247 sizeof(struct pppoe_hdr)));
250 return htons(ETH_P_IP);
251 case htons(PPP_IPV6):
252 return htons(ETH_P_IPV6);
258 static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto,
261 struct vlan_ethhdr *veth;
263 switch (skb->protocol) {
264 case htons(ETH_P_8021Q):
265 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
266 if (veth->h_vlan_encapsulated_proto == proto) {
267 *offset += VLAN_HLEN;
271 case htons(ETH_P_PPP_SES):
272 if (nf_flow_pppoe_proto(skb) == proto) {
273 *offset += PPPOE_SES_HLEN;
282 static void nf_flow_encap_pop(struct sk_buff *skb,
283 struct flow_offload_tuple_rhash *tuplehash)
285 struct vlan_hdr *vlan_hdr;
288 for (i = 0; i < tuplehash->tuple.encap_num; i++) {
289 if (skb_vlan_tag_present(skb)) {
290 __vlan_hwaccel_clear_tag(skb);
293 switch (skb->protocol) {
294 case htons(ETH_P_8021Q):
295 vlan_hdr = (struct vlan_hdr *)skb->data;
296 __skb_pull(skb, VLAN_HLEN);
297 vlan_set_encap_proto(skb, vlan_hdr);
298 skb_reset_network_header(skb);
300 case htons(ETH_P_PPP_SES):
301 skb->protocol = nf_flow_pppoe_proto(skb);
302 skb_pull(skb, PPPOE_SES_HLEN);
303 skb_reset_network_header(skb);
309 static unsigned int nf_flow_queue_xmit(struct net *net, struct sk_buff *skb,
310 const struct flow_offload_tuple_rhash *tuplehash,
313 struct net_device *outdev;
315 outdev = dev_get_by_index_rcu(net, tuplehash->tuple.out.ifidx);
320 dev_hard_header(skb, skb->dev, type, tuplehash->tuple.out.h_dest,
321 tuplehash->tuple.out.h_source, skb->len);
328 nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
329 const struct nf_hook_state *state)
331 struct flow_offload_tuple_rhash *tuplehash;
332 struct nf_flowtable *flow_table = priv;
333 struct flow_offload_tuple tuple = {};
334 enum flow_offload_tuple_dir dir;
335 struct flow_offload *flow;
336 struct net_device *outdev;
337 u32 hdrsize, offset = 0;
338 unsigned int thoff, mtu;
344 if (skb->protocol != htons(ETH_P_IP) &&
345 !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IP), &offset))
348 if (nf_flow_tuple_ip(skb, state->in, &tuple, &hdrsize, offset) < 0)
351 tuplehash = flow_offload_lookup(flow_table, &tuple);
352 if (tuplehash == NULL)
355 dir = tuplehash->tuple.dir;
356 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
358 mtu = flow->tuplehash[dir].tuple.mtu + offset;
359 if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
362 iph = (struct iphdr *)(skb_network_header(skb) + offset);
363 thoff = (iph->ihl * 4) + offset;
364 if (nf_flow_state_check(flow, iph->protocol, skb, thoff))
367 if (skb_try_make_writable(skb, thoff + hdrsize))
370 flow_offload_refresh(flow_table, flow);
372 nf_flow_encap_pop(skb, tuplehash);
376 nf_flow_nat_ip(flow, skb, thoff, dir, iph);
378 ip_decrease_ttl(iph);
381 if (flow_table->flags & NF_FLOWTABLE_COUNTER)
382 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
384 if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
385 rt = (struct rtable *)tuplehash->tuple.dst_cache;
386 memset(skb->cb, 0, sizeof(struct inet_skb_parm));
387 IPCB(skb)->iif = skb->dev->ifindex;
388 IPCB(skb)->flags = IPSKB_FORWARDED;
389 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
392 switch (tuplehash->tuple.xmit_type) {
393 case FLOW_OFFLOAD_XMIT_NEIGH:
394 rt = (struct rtable *)tuplehash->tuple.dst_cache;
395 outdev = rt->dst.dev;
397 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
398 skb_dst_set_noref(skb, &rt->dst);
399 neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
402 case FLOW_OFFLOAD_XMIT_DIRECT:
403 ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IP);
405 flow_offload_teardown(flow);
411 EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook);
413 static void nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
414 struct in6_addr *addr,
415 struct in6_addr *new_addr,
416 struct ipv6hdr *ip6h)
420 tcph = (void *)(skb_network_header(skb) + thoff);
421 inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
422 new_addr->s6_addr32, true);
425 static void nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
426 struct in6_addr *addr,
427 struct in6_addr *new_addr)
431 udph = (void *)(skb_network_header(skb) + thoff);
432 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
433 inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
434 new_addr->s6_addr32, true);
436 udph->check = CSUM_MANGLED_0;
440 static void nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
441 unsigned int thoff, struct in6_addr *addr,
442 struct in6_addr *new_addr)
444 switch (ip6h->nexthdr) {
446 nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr, ip6h);
449 nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr);
454 static void nf_flow_snat_ipv6(const struct flow_offload *flow,
455 struct sk_buff *skb, struct ipv6hdr *ip6h,
457 enum flow_offload_tuple_dir dir)
459 struct in6_addr addr, new_addr;
462 case FLOW_OFFLOAD_DIR_ORIGINAL:
464 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6;
465 ip6h->saddr = new_addr;
467 case FLOW_OFFLOAD_DIR_REPLY:
469 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6;
470 ip6h->daddr = new_addr;
474 nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
477 static void nf_flow_dnat_ipv6(const struct flow_offload *flow,
478 struct sk_buff *skb, struct ipv6hdr *ip6h,
480 enum flow_offload_tuple_dir dir)
482 struct in6_addr addr, new_addr;
485 case FLOW_OFFLOAD_DIR_ORIGINAL:
487 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6;
488 ip6h->daddr = new_addr;
490 case FLOW_OFFLOAD_DIR_REPLY:
492 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6;
493 ip6h->saddr = new_addr;
497 nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
500 static void nf_flow_nat_ipv6(const struct flow_offload *flow,
502 enum flow_offload_tuple_dir dir,
503 struct ipv6hdr *ip6h)
505 unsigned int thoff = sizeof(*ip6h);
507 if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
508 nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir);
509 nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir);
511 if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
512 nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir);
513 nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir);
517 static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
518 struct flow_offload_tuple *tuple, u32 *hdrsize,
521 struct flow_ports *ports;
522 struct ipv6hdr *ip6h;
525 thoff = sizeof(*ip6h) + offset;
526 if (!pskb_may_pull(skb, thoff))
529 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset);
531 switch (ip6h->nexthdr) {
533 *hdrsize = sizeof(struct tcphdr);
536 *hdrsize = sizeof(struct udphdr);
542 if (ip6h->hop_limit <= 1)
545 if (!pskb_may_pull(skb, thoff + *hdrsize))
548 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset);
549 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
551 tuple->src_v6 = ip6h->saddr;
552 tuple->dst_v6 = ip6h->daddr;
553 tuple->src_port = ports->source;
554 tuple->dst_port = ports->dest;
555 tuple->l3proto = AF_INET6;
556 tuple->l4proto = ip6h->nexthdr;
557 tuple->iifidx = dev->ifindex;
558 nf_flow_tuple_encap(skb, tuple);
564 nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
565 const struct nf_hook_state *state)
567 struct flow_offload_tuple_rhash *tuplehash;
568 struct nf_flowtable *flow_table = priv;
569 struct flow_offload_tuple tuple = {};
570 enum flow_offload_tuple_dir dir;
571 const struct in6_addr *nexthop;
572 struct flow_offload *flow;
573 struct net_device *outdev;
574 unsigned int thoff, mtu;
575 u32 hdrsize, offset = 0;
576 struct ipv6hdr *ip6h;
580 if (skb->protocol != htons(ETH_P_IPV6) &&
581 !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IPV6), &offset))
584 if (nf_flow_tuple_ipv6(skb, state->in, &tuple, &hdrsize, offset) < 0)
587 tuplehash = flow_offload_lookup(flow_table, &tuple);
588 if (tuplehash == NULL)
591 dir = tuplehash->tuple.dir;
592 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
594 mtu = flow->tuplehash[dir].tuple.mtu + offset;
595 if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
598 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset);
599 thoff = sizeof(*ip6h) + offset;
600 if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff))
603 if (skb_try_make_writable(skb, thoff + hdrsize))
606 flow_offload_refresh(flow_table, flow);
608 nf_flow_encap_pop(skb, tuplehash);
610 ip6h = ipv6_hdr(skb);
611 nf_flow_nat_ipv6(flow, skb, dir, ip6h);
616 if (flow_table->flags & NF_FLOWTABLE_COUNTER)
617 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
619 if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
620 rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
621 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
622 IP6CB(skb)->iif = skb->dev->ifindex;
623 IP6CB(skb)->flags = IP6SKB_FORWARDED;
624 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
627 switch (tuplehash->tuple.xmit_type) {
628 case FLOW_OFFLOAD_XMIT_NEIGH:
629 rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
630 outdev = rt->dst.dev;
632 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
633 skb_dst_set_noref(skb, &rt->dst);
634 neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
637 case FLOW_OFFLOAD_XMIT_DIRECT:
638 ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IPV6);
640 flow_offload_teardown(flow);
646 EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);