2 * Copyright (c) 2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/capability.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/init.h>
34 #include <linux/in6.h>
35 #include <linux/inetdevice.h>
36 #include <linux/igmp.h>
37 #include <linux/netfilter_ipv4.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_ether.h>
40 #include <linux/if_vlan.h>
41 #include <linux/rculist.h>
42 #include <linux/err.h>
47 #include <net/protocol.h>
48 #include <net/ip_tunnels.h>
50 #include <net/checksum.h>
51 #include <net/dsfield.h>
52 #include <net/inet_ecn.h>
54 #include <net/net_namespace.h>
55 #include <net/netns/generic.h>
56 #include <net/rtnetlink.h>
58 #include <net/dst_metadata.h>
60 #if IS_ENABLED(CONFIG_IPV6)
62 #include <net/ip6_fib.h>
63 #include <net/ip6_route.h>
66 static unsigned int ip_tunnel_hash(__be32 key, __be32 remote)
68 return hash_32((__force u32)key ^ (__force u32)remote,
72 static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p,
73 __be16 flags, __be32 key)
75 if (p->i_flags & TUNNEL_KEY) {
76 if (flags & TUNNEL_KEY)
77 return key == p->i_key;
79 /* key expected, none present */
82 return !(flags & TUNNEL_KEY);
85 /* Fallback tunnel: no source, no destination, no key, no options
88 We require exact key match i.e. if a key is present in packet
89 it will match only tunnel with the same key; if it is not present,
90 it will match only keyless tunnel.
92 All keysless packets, if not matched configured keyless tunnels
93 will match fallback tunnel.
94 Given src, dst and key, find appropriate for input tunnel.
96 struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
97 int link, __be16 flags,
98 __be32 remote, __be32 local,
102 struct ip_tunnel *t, *cand = NULL;
103 struct hlist_head *head;
105 hash = ip_tunnel_hash(key, remote);
106 head = &itn->tunnels[hash];
108 hlist_for_each_entry_rcu(t, head, hash_node) {
109 if (local != t->parms.iph.saddr ||
110 remote != t->parms.iph.daddr ||
111 !(t->dev->flags & IFF_UP))
114 if (!ip_tunnel_key_match(&t->parms, flags, key))
117 if (t->parms.link == link)
123 hlist_for_each_entry_rcu(t, head, hash_node) {
124 if (remote != t->parms.iph.daddr ||
125 t->parms.iph.saddr != 0 ||
126 !(t->dev->flags & IFF_UP))
129 if (!ip_tunnel_key_match(&t->parms, flags, key))
132 if (t->parms.link == link)
138 hash = ip_tunnel_hash(key, 0);
139 head = &itn->tunnels[hash];
141 hlist_for_each_entry_rcu(t, head, hash_node) {
142 if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
143 (local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
146 if (!(t->dev->flags & IFF_UP))
149 if (!ip_tunnel_key_match(&t->parms, flags, key))
152 if (t->parms.link == link)
158 if (flags & TUNNEL_NO_KEY)
159 goto skip_key_lookup;
161 hlist_for_each_entry_rcu(t, head, hash_node) {
162 if (t->parms.i_key != key ||
163 t->parms.iph.saddr != 0 ||
164 t->parms.iph.daddr != 0 ||
165 !(t->dev->flags & IFF_UP))
168 if (t->parms.link == link)
178 t = rcu_dereference(itn->collect_md_tun);
179 if (t && t->dev->flags & IFF_UP)
182 if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
183 return netdev_priv(itn->fb_tunnel_dev);
187 EXPORT_SYMBOL_GPL(ip_tunnel_lookup);
189 static struct hlist_head *ip_bucket(struct ip_tunnel_net *itn,
190 struct ip_tunnel_parm *parms)
194 __be32 i_key = parms->i_key;
196 if (parms->iph.daddr && !ipv4_is_multicast(parms->iph.daddr))
197 remote = parms->iph.daddr;
201 if (!(parms->i_flags & TUNNEL_KEY) && (parms->i_flags & VTI_ISVTI))
204 h = ip_tunnel_hash(i_key, remote);
205 return &itn->tunnels[h];
208 static void ip_tunnel_add(struct ip_tunnel_net *itn, struct ip_tunnel *t)
210 struct hlist_head *head = ip_bucket(itn, &t->parms);
213 rcu_assign_pointer(itn->collect_md_tun, t);
214 hlist_add_head_rcu(&t->hash_node, head);
217 static void ip_tunnel_del(struct ip_tunnel_net *itn, struct ip_tunnel *t)
220 rcu_assign_pointer(itn->collect_md_tun, NULL);
221 hlist_del_init_rcu(&t->hash_node);
224 static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn,
225 struct ip_tunnel_parm *parms,
228 __be32 remote = parms->iph.daddr;
229 __be32 local = parms->iph.saddr;
230 __be32 key = parms->i_key;
231 __be16 flags = parms->i_flags;
232 int link = parms->link;
233 struct ip_tunnel *t = NULL;
234 struct hlist_head *head = ip_bucket(itn, parms);
236 hlist_for_each_entry_rcu(t, head, hash_node) {
237 if (local == t->parms.iph.saddr &&
238 remote == t->parms.iph.daddr &&
239 link == t->parms.link &&
240 type == t->dev->type &&
241 ip_tunnel_key_match(&t->parms, flags, key))
247 static struct net_device *__ip_tunnel_create(struct net *net,
248 const struct rtnl_link_ops *ops,
249 struct ip_tunnel_parm *parms)
252 struct ip_tunnel *tunnel;
253 struct net_device *dev;
257 if (parms->name[0]) {
258 if (!dev_valid_name(parms->name))
260 strlcpy(name, parms->name, IFNAMSIZ);
262 if (strlen(ops->kind) > (IFNAMSIZ - 3))
264 strcpy(name, ops->kind);
269 dev = alloc_netdev(ops->priv_size, name, NET_NAME_UNKNOWN, ops->setup);
274 dev_net_set(dev, net);
276 dev->rtnl_link_ops = ops;
278 tunnel = netdev_priv(dev);
279 tunnel->parms = *parms;
282 err = register_netdevice(dev);
294 static int ip_tunnel_bind_dev(struct net_device *dev)
296 struct net_device *tdev = NULL;
297 struct ip_tunnel *tunnel = netdev_priv(dev);
298 const struct iphdr *iph;
299 int hlen = LL_MAX_HEADER;
300 int mtu = ETH_DATA_LEN;
301 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
303 iph = &tunnel->parms.iph;
305 /* Guess output device to choose reasonable mtu and needed_headroom */
310 ip_tunnel_init_flow(&fl4, iph->protocol, iph->daddr,
311 iph->saddr, tunnel->parms.o_key,
312 RT_TOS(iph->tos), tunnel->parms.link,
314 rt = ip_route_output_key(tunnel->net, &fl4);
320 if (dev->type != ARPHRD_ETHER)
321 dev->flags |= IFF_POINTOPOINT;
323 dst_cache_reset(&tunnel->dst_cache);
326 if (!tdev && tunnel->parms.link)
327 tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
330 hlen = tdev->hard_header_len + tdev->needed_headroom;
331 mtu = min(tdev->mtu, IP_MAX_MTU);
334 dev->needed_headroom = t_hlen + hlen;
335 mtu -= (dev->hard_header_len + t_hlen);
337 if (mtu < IPV4_MIN_MTU)
343 static struct ip_tunnel *ip_tunnel_create(struct net *net,
344 struct ip_tunnel_net *itn,
345 struct ip_tunnel_parm *parms)
347 struct ip_tunnel *nt;
348 struct net_device *dev;
353 dev = __ip_tunnel_create(net, itn->rtnl_link_ops, parms);
355 return ERR_CAST(dev);
357 mtu = ip_tunnel_bind_dev(dev);
358 err = dev_set_mtu(dev, mtu);
360 goto err_dev_set_mtu;
362 nt = netdev_priv(dev);
363 t_hlen = nt->hlen + sizeof(struct iphdr);
364 dev->min_mtu = ETH_MIN_MTU;
365 dev->max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
366 ip_tunnel_add(itn, nt);
370 unregister_netdevice(dev);
374 int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
375 const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
378 struct pcpu_sw_netstats *tstats;
379 const struct iphdr *iph = ip_hdr(skb);
382 #ifdef CONFIG_NET_IPGRE_BROADCAST
383 if (ipv4_is_multicast(iph->daddr)) {
384 tunnel->dev->stats.multicast++;
385 skb->pkt_type = PACKET_BROADCAST;
389 if ((!(tpi->flags&TUNNEL_CSUM) && (tunnel->parms.i_flags&TUNNEL_CSUM)) ||
390 ((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) {
391 tunnel->dev->stats.rx_crc_errors++;
392 tunnel->dev->stats.rx_errors++;
396 if (tunnel->parms.i_flags&TUNNEL_SEQ) {
397 if (!(tpi->flags&TUNNEL_SEQ) ||
398 (tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
399 tunnel->dev->stats.rx_fifo_errors++;
400 tunnel->dev->stats.rx_errors++;
403 tunnel->i_seqno = ntohl(tpi->seq) + 1;
406 skb_reset_network_header(skb);
408 err = IP_ECN_decapsulate(iph, skb);
411 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
412 &iph->saddr, iph->tos);
414 ++tunnel->dev->stats.rx_frame_errors;
415 ++tunnel->dev->stats.rx_errors;
420 tstats = this_cpu_ptr(tunnel->dev->tstats);
421 u64_stats_update_begin(&tstats->syncp);
422 tstats->rx_packets++;
423 tstats->rx_bytes += skb->len;
424 u64_stats_update_end(&tstats->syncp);
426 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
428 if (tunnel->dev->type == ARPHRD_ETHER) {
429 skb->protocol = eth_type_trans(skb, tunnel->dev);
430 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
432 skb->dev = tunnel->dev;
436 skb_dst_set(skb, (struct dst_entry *)tun_dst);
438 gro_cells_receive(&tunnel->gro_cells, skb);
443 dst_release((struct dst_entry *)tun_dst);
447 EXPORT_SYMBOL_GPL(ip_tunnel_rcv);
449 int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *ops,
452 if (num >= MAX_IPTUN_ENCAP_OPS)
455 return !cmpxchg((const struct ip_tunnel_encap_ops **)
459 EXPORT_SYMBOL(ip_tunnel_encap_add_ops);
461 int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *ops,
466 if (num >= MAX_IPTUN_ENCAP_OPS)
469 ret = (cmpxchg((const struct ip_tunnel_encap_ops **)
471 ops, NULL) == ops) ? 0 : -1;
477 EXPORT_SYMBOL(ip_tunnel_encap_del_ops);
479 int ip_tunnel_encap_setup(struct ip_tunnel *t,
480 struct ip_tunnel_encap *ipencap)
484 memset(&t->encap, 0, sizeof(t->encap));
486 hlen = ip_encap_hlen(ipencap);
490 t->encap.type = ipencap->type;
491 t->encap.sport = ipencap->sport;
492 t->encap.dport = ipencap->dport;
493 t->encap.flags = ipencap->flags;
495 t->encap_hlen = hlen;
496 t->hlen = t->encap_hlen + t->tun_hlen;
500 EXPORT_SYMBOL_GPL(ip_tunnel_encap_setup);
502 static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
503 struct rtable *rt, __be16 df,
504 const struct iphdr *inner_iph,
505 int tunnel_hlen, __be32 dst, bool md)
507 struct ip_tunnel *tunnel = netdev_priv(dev);
511 tunnel_hlen = md ? tunnel_hlen : tunnel->hlen;
512 pkt_size = skb->len - tunnel_hlen - dev->hard_header_len;
515 mtu = dst_mtu(&rt->dst) - dev->hard_header_len
516 - sizeof(struct iphdr) - tunnel_hlen;
518 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
520 skb_dst_update_pmtu(skb, mtu);
522 if (skb->protocol == htons(ETH_P_IP)) {
523 if (!skb_is_gso(skb) &&
524 (inner_iph->frag_off & htons(IP_DF)) &&
526 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
527 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
531 #if IS_ENABLED(CONFIG_IPV6)
532 else if (skb->protocol == htons(ETH_P_IPV6)) {
533 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
536 daddr = md ? dst : tunnel->parms.iph.daddr;
538 if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
539 mtu >= IPV6_MIN_MTU) {
540 if ((daddr && !ipv4_is_multicast(daddr)) ||
541 rt6->rt6i_dst.plen == 128) {
542 rt6->rt6i_flags |= RTF_MODIFIED;
543 dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
547 if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
549 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
557 void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
558 u8 proto, int tunnel_hlen)
560 struct ip_tunnel *tunnel = netdev_priv(dev);
561 u32 headroom = sizeof(struct iphdr);
562 struct ip_tunnel_info *tun_info;
563 const struct ip_tunnel_key *key;
564 const struct iphdr *inner_iph;
565 struct rtable *rt = NULL;
571 tun_info = skb_tunnel_info(skb);
572 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
573 ip_tunnel_info_af(tun_info) != AF_INET))
575 key = &tun_info->key;
576 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
577 inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
580 if (skb->protocol == htons(ETH_P_IP))
581 tos = inner_iph->tos;
582 else if (skb->protocol == htons(ETH_P_IPV6))
583 tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
585 ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src,
586 tunnel_id_to_key32(key->tun_id), RT_TOS(tos),
587 0, skb->mark, skb_get_hash(skb));
588 if (tunnel->encap.type != TUNNEL_ENCAP_NONE)
591 use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
593 rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl4.saddr);
595 rt = ip_route_output_key(tunnel->net, &fl4);
597 dev->stats.tx_carrier_errors++;
601 dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
604 if (rt->dst.dev == dev) {
606 dev->stats.collisions++;
610 if (key->tun_flags & TUNNEL_DONT_FRAGMENT)
612 if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, tunnel_hlen,
613 key->u.ipv4.dst, true)) {
618 tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
621 if (skb->protocol == htons(ETH_P_IP))
622 ttl = inner_iph->ttl;
623 else if (skb->protocol == htons(ETH_P_IPV6))
624 ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit;
626 ttl = ip4_dst_hoplimit(&rt->dst);
629 if (!df && skb->protocol == htons(ETH_P_IP))
630 df = inner_iph->frag_off & htons(IP_DF);
632 headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
633 if (headroom > dev->needed_headroom)
634 dev->needed_headroom = headroom;
636 if (skb_cow_head(skb, dev->needed_headroom)) {
640 iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, tos, ttl,
641 df, !net_eq(tunnel->net, dev_net(dev)));
644 dev->stats.tx_errors++;
647 dev->stats.tx_dropped++;
651 EXPORT_SYMBOL_GPL(ip_md_tunnel_xmit);
653 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
654 const struct iphdr *tnl_params, u8 protocol)
656 struct ip_tunnel *tunnel = netdev_priv(dev);
657 struct ip_tunnel_info *tun_info = NULL;
658 const struct iphdr *inner_iph;
659 unsigned int max_headroom; /* The extra header space needed */
660 struct rtable *rt = NULL; /* Route to the other host */
661 bool use_cache = false;
669 inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
670 connected = (tunnel->parms.iph.daddr != 0);
672 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
674 dst = tnl_params->daddr;
679 dev->stats.tx_fifo_errors++;
683 tun_info = skb_tunnel_info(skb);
684 if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX) &&
685 ip_tunnel_info_af(tun_info) == AF_INET &&
686 tun_info->key.u.ipv4.dst) {
687 dst = tun_info->key.u.ipv4.dst;
691 else if (skb->protocol == htons(ETH_P_IP)) {
692 rt = skb_rtable(skb);
693 dst = rt_nexthop(rt, inner_iph->daddr);
695 #if IS_ENABLED(CONFIG_IPV6)
696 else if (skb->protocol == htons(ETH_P_IPV6)) {
697 const struct in6_addr *addr6;
698 struct neighbour *neigh;
699 bool do_tx_error_icmp;
702 neigh = dst_neigh_lookup(skb_dst(skb),
703 &ipv6_hdr(skb)->daddr);
707 addr6 = (const struct in6_addr *)&neigh->primary_key;
708 addr_type = ipv6_addr_type(addr6);
710 if (addr_type == IPV6_ADDR_ANY) {
711 addr6 = &ipv6_hdr(skb)->daddr;
712 addr_type = ipv6_addr_type(addr6);
715 if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
716 do_tx_error_icmp = true;
718 do_tx_error_icmp = false;
719 dst = addr6->s6_addr32[3];
721 neigh_release(neigh);
722 if (do_tx_error_icmp)
733 tos = tnl_params->tos;
736 if (skb->protocol == htons(ETH_P_IP)) {
737 tos = inner_iph->tos;
739 } else if (skb->protocol == htons(ETH_P_IPV6)) {
740 tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
745 ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
746 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link,
747 tunnel->fwmark, skb_get_hash(skb));
749 if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
752 if (connected && md) {
753 use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
755 rt = dst_cache_get_ip4(&tun_info->dst_cache,
758 rt = connected ? dst_cache_get_ip4(&tunnel->dst_cache,
763 rt = ip_route_output_key(tunnel->net, &fl4);
766 dev->stats.tx_carrier_errors++;
770 dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
772 else if (!md && connected)
773 dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst,
777 if (rt->dst.dev == dev) {
779 dev->stats.collisions++;
783 if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph,
789 if (tunnel->err_count > 0) {
790 if (time_before(jiffies,
791 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
794 dst_link_failure(skb);
796 tunnel->err_count = 0;
799 tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
800 ttl = tnl_params->ttl;
802 if (skb->protocol == htons(ETH_P_IP))
803 ttl = inner_iph->ttl;
804 #if IS_ENABLED(CONFIG_IPV6)
805 else if (skb->protocol == htons(ETH_P_IPV6))
806 ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit;
809 ttl = ip4_dst_hoplimit(&rt->dst);
812 df = tnl_params->frag_off;
813 if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
814 df |= (inner_iph->frag_off&htons(IP_DF));
816 max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
817 + rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
818 if (max_headroom > dev->needed_headroom)
819 dev->needed_headroom = max_headroom;
821 if (skb_cow_head(skb, dev->needed_headroom)) {
823 dev->stats.tx_dropped++;
828 iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl,
829 df, !net_eq(tunnel->net, dev_net(dev)));
832 #if IS_ENABLED(CONFIG_IPV6)
834 dst_link_failure(skb);
837 dev->stats.tx_errors++;
840 EXPORT_SYMBOL_GPL(ip_tunnel_xmit);
842 static void ip_tunnel_update(struct ip_tunnel_net *itn,
844 struct net_device *dev,
845 struct ip_tunnel_parm *p,
849 ip_tunnel_del(itn, t);
850 t->parms.iph.saddr = p->iph.saddr;
851 t->parms.iph.daddr = p->iph.daddr;
852 t->parms.i_key = p->i_key;
853 t->parms.o_key = p->o_key;
854 if (dev->type != ARPHRD_ETHER) {
855 memcpy(dev->dev_addr, &p->iph.saddr, 4);
856 memcpy(dev->broadcast, &p->iph.daddr, 4);
858 ip_tunnel_add(itn, t);
860 t->parms.iph.ttl = p->iph.ttl;
861 t->parms.iph.tos = p->iph.tos;
862 t->parms.iph.frag_off = p->iph.frag_off;
864 if (t->parms.link != p->link || t->fwmark != fwmark) {
867 t->parms.link = p->link;
869 mtu = ip_tunnel_bind_dev(dev);
873 dst_cache_reset(&t->dst_cache);
874 netdev_state_change(dev);
877 int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
880 struct ip_tunnel *t = netdev_priv(dev);
881 struct net *net = t->net;
882 struct ip_tunnel_net *itn = net_generic(net, t->ip_tnl_net_id);
886 if (dev == itn->fb_tunnel_dev) {
887 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
889 t = netdev_priv(dev);
891 memcpy(p, &t->parms, sizeof(*p));
897 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
900 p->iph.frag_off |= htons(IP_DF);
901 if (!(p->i_flags & VTI_ISVTI)) {
902 if (!(p->i_flags & TUNNEL_KEY))
904 if (!(p->o_flags & TUNNEL_KEY))
908 t = ip_tunnel_find(itn, p, itn->type);
910 if (cmd == SIOCADDTUNNEL) {
912 t = ip_tunnel_create(net, itn, p);
913 err = PTR_ERR_OR_ZERO(t);
920 if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
927 unsigned int nflags = 0;
929 if (ipv4_is_multicast(p->iph.daddr))
930 nflags = IFF_BROADCAST;
931 else if (p->iph.daddr)
932 nflags = IFF_POINTOPOINT;
934 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
939 t = netdev_priv(dev);
945 ip_tunnel_update(itn, t, dev, p, true, 0);
953 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
956 if (dev == itn->fb_tunnel_dev) {
958 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
962 if (t == netdev_priv(itn->fb_tunnel_dev))
966 unregister_netdevice(dev);
977 EXPORT_SYMBOL_GPL(ip_tunnel_ioctl);
979 int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
981 struct ip_tunnel *tunnel = netdev_priv(dev);
982 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
983 int max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
985 if (new_mtu < ETH_MIN_MTU)
988 if (new_mtu > max_mtu) {
998 EXPORT_SYMBOL_GPL(__ip_tunnel_change_mtu);
1000 int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1002 return __ip_tunnel_change_mtu(dev, new_mtu, true);
1004 EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu);
1006 static void ip_tunnel_dev_free(struct net_device *dev)
1008 struct ip_tunnel *tunnel = netdev_priv(dev);
1010 gro_cells_destroy(&tunnel->gro_cells);
1011 dst_cache_destroy(&tunnel->dst_cache);
1012 free_percpu(dev->tstats);
1015 void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
1017 struct ip_tunnel *tunnel = netdev_priv(dev);
1018 struct ip_tunnel_net *itn;
1020 itn = net_generic(tunnel->net, tunnel->ip_tnl_net_id);
1022 if (itn->fb_tunnel_dev != dev) {
1023 ip_tunnel_del(itn, netdev_priv(dev));
1024 unregister_netdevice_queue(dev, head);
1027 EXPORT_SYMBOL_GPL(ip_tunnel_dellink);
1029 struct net *ip_tunnel_get_link_net(const struct net_device *dev)
1031 struct ip_tunnel *tunnel = netdev_priv(dev);
1035 EXPORT_SYMBOL(ip_tunnel_get_link_net);
1037 int ip_tunnel_get_iflink(const struct net_device *dev)
1039 struct ip_tunnel *tunnel = netdev_priv(dev);
1041 return tunnel->parms.link;
1043 EXPORT_SYMBOL(ip_tunnel_get_iflink);
1045 int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
1046 struct rtnl_link_ops *ops, char *devname)
1048 struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id);
1049 struct ip_tunnel_parm parms;
1052 itn->rtnl_link_ops = ops;
1053 for (i = 0; i < IP_TNL_HASH_SIZE; i++)
1054 INIT_HLIST_HEAD(&itn->tunnels[i]);
1056 if (!ops || !net_has_fallback_tunnels(net)) {
1057 struct ip_tunnel_net *it_init_net;
1059 it_init_net = net_generic(&init_net, ip_tnl_net_id);
1060 itn->type = it_init_net->type;
1061 itn->fb_tunnel_dev = NULL;
1065 memset(&parms, 0, sizeof(parms));
1067 strlcpy(parms.name, devname, IFNAMSIZ);
1070 itn->fb_tunnel_dev = __ip_tunnel_create(net, ops, &parms);
1071 /* FB netdevice is special: we have one, and only one per netns.
1072 * Allowing to move it to another netns is clearly unsafe.
1074 if (!IS_ERR(itn->fb_tunnel_dev)) {
1075 itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
1076 itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev);
1077 ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
1078 itn->type = itn->fb_tunnel_dev->type;
1082 return PTR_ERR_OR_ZERO(itn->fb_tunnel_dev);
1084 EXPORT_SYMBOL_GPL(ip_tunnel_init_net);
1086 static void ip_tunnel_destroy(struct net *net, struct ip_tunnel_net *itn,
1087 struct list_head *head,
1088 struct rtnl_link_ops *ops)
1090 struct net_device *dev, *aux;
1093 for_each_netdev_safe(net, dev, aux)
1094 if (dev->rtnl_link_ops == ops)
1095 unregister_netdevice_queue(dev, head);
1097 for (h = 0; h < IP_TNL_HASH_SIZE; h++) {
1098 struct ip_tunnel *t;
1099 struct hlist_node *n;
1100 struct hlist_head *thead = &itn->tunnels[h];
1102 hlist_for_each_entry_safe(t, n, thead, hash_node)
1103 /* If dev is in the same netns, it has already
1104 * been added to the list by the previous loop.
1106 if (!net_eq(dev_net(t->dev), net))
1107 unregister_netdevice_queue(t->dev, head);
1111 void ip_tunnel_delete_nets(struct list_head *net_list, unsigned int id,
1112 struct rtnl_link_ops *ops)
1114 struct ip_tunnel_net *itn;
1119 list_for_each_entry(net, net_list, exit_list) {
1120 itn = net_generic(net, id);
1121 ip_tunnel_destroy(net, itn, &list, ops);
1123 unregister_netdevice_many(&list);
1126 EXPORT_SYMBOL_GPL(ip_tunnel_delete_nets);
1128 int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
1129 struct ip_tunnel_parm *p, __u32 fwmark)
1131 struct ip_tunnel *nt;
1132 struct net *net = dev_net(dev);
1133 struct ip_tunnel_net *itn;
1137 nt = netdev_priv(dev);
1138 itn = net_generic(net, nt->ip_tnl_net_id);
1140 if (nt->collect_md) {
1141 if (rtnl_dereference(itn->collect_md_tun))
1144 if (ip_tunnel_find(itn, p, dev->type))
1150 nt->fwmark = fwmark;
1151 err = register_netdevice(dev);
1153 goto err_register_netdevice;
1155 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1156 eth_hw_addr_random(dev);
1158 mtu = ip_tunnel_bind_dev(dev);
1160 unsigned int max = IP_MAX_MTU - dev->hard_header_len - nt->hlen;
1162 mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU,
1163 (unsigned int)(max - sizeof(struct iphdr)));
1166 err = dev_set_mtu(dev, mtu);
1168 goto err_dev_set_mtu;
1170 ip_tunnel_add(itn, nt);
1174 unregister_netdevice(dev);
1175 err_register_netdevice:
1178 EXPORT_SYMBOL_GPL(ip_tunnel_newlink);
1180 int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
1181 struct ip_tunnel_parm *p, __u32 fwmark)
1183 struct ip_tunnel *t;
1184 struct ip_tunnel *tunnel = netdev_priv(dev);
1185 struct net *net = tunnel->net;
1186 struct ip_tunnel_net *itn = net_generic(net, tunnel->ip_tnl_net_id);
1188 if (dev == itn->fb_tunnel_dev)
1191 t = ip_tunnel_find(itn, p, dev->type);
1199 if (dev->type != ARPHRD_ETHER) {
1200 unsigned int nflags = 0;
1202 if (ipv4_is_multicast(p->iph.daddr))
1203 nflags = IFF_BROADCAST;
1204 else if (p->iph.daddr)
1205 nflags = IFF_POINTOPOINT;
1207 if ((dev->flags ^ nflags) &
1208 (IFF_POINTOPOINT | IFF_BROADCAST))
1213 ip_tunnel_update(itn, t, dev, p, !tb[IFLA_MTU], fwmark);
1216 EXPORT_SYMBOL_GPL(ip_tunnel_changelink);
1218 int ip_tunnel_init(struct net_device *dev)
1220 struct ip_tunnel *tunnel = netdev_priv(dev);
1221 struct iphdr *iph = &tunnel->parms.iph;
1224 dev->needs_free_netdev = true;
1225 dev->priv_destructor = ip_tunnel_dev_free;
1226 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1230 err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
1232 free_percpu(dev->tstats);
1236 err = gro_cells_init(&tunnel->gro_cells, dev);
1238 dst_cache_destroy(&tunnel->dst_cache);
1239 free_percpu(dev->tstats);
1244 tunnel->net = dev_net(dev);
1245 strcpy(tunnel->parms.name, dev->name);
1249 if (tunnel->collect_md) {
1250 dev->features |= NETIF_F_NETNS_LOCAL;
1251 netif_keep_dst(dev);
1255 EXPORT_SYMBOL_GPL(ip_tunnel_init);
1257 void ip_tunnel_uninit(struct net_device *dev)
1259 struct ip_tunnel *tunnel = netdev_priv(dev);
1260 struct net *net = tunnel->net;
1261 struct ip_tunnel_net *itn;
1263 itn = net_generic(net, tunnel->ip_tnl_net_id);
1264 /* fb_tunnel_dev will be unregisted in net-exit call. */
1265 if (itn->fb_tunnel_dev != dev)
1266 ip_tunnel_del(itn, netdev_priv(dev));
1268 dst_cache_reset(&tunnel->dst_cache);
1270 EXPORT_SYMBOL_GPL(ip_tunnel_uninit);
1272 /* Do least required initialization, rest of init is done in tunnel_init call */
1273 void ip_tunnel_setup(struct net_device *dev, unsigned int net_id)
1275 struct ip_tunnel *tunnel = netdev_priv(dev);
1276 tunnel->ip_tnl_net_id = net_id;
1278 EXPORT_SYMBOL_GPL(ip_tunnel_setup);
1280 MODULE_LICENSE("GPL");