2 * Copyright (c) 2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/capability.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/init.h>
34 #include <linux/in6.h>
35 #include <linux/inetdevice.h>
36 #include <linux/igmp.h>
37 #include <linux/netfilter_ipv4.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_ether.h>
40 #include <linux/if_vlan.h>
41 #include <linux/rculist.h>
42 #include <linux/err.h>
47 #include <net/protocol.h>
48 #include <net/ip_tunnels.h>
50 #include <net/checksum.h>
51 #include <net/dsfield.h>
52 #include <net/inet_ecn.h>
54 #include <net/net_namespace.h>
55 #include <net/netns/generic.h>
56 #include <net/rtnetlink.h>
58 #include <net/dst_metadata.h>
60 #if IS_ENABLED(CONFIG_IPV6)
62 #include <net/ip6_fib.h>
63 #include <net/ip6_route.h>
66 static unsigned int ip_tunnel_hash(__be32 key, __be32 remote)
68 return hash_32((__force u32)key ^ (__force u32)remote,
72 static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p,
73 __be16 flags, __be32 key)
75 if (p->i_flags & TUNNEL_KEY) {
76 if (flags & TUNNEL_KEY)
77 return key == p->i_key;
79 /* key expected, none present */
82 return !(flags & TUNNEL_KEY);
85 /* Fallback tunnel: no source, no destination, no key, no options
88 We require exact key match i.e. if a key is present in packet
89 it will match only tunnel with the same key; if it is not present,
90 it will match only keyless tunnel.
92 All keysless packets, if not matched configured keyless tunnels
93 will match fallback tunnel.
94 Given src, dst and key, find appropriate for input tunnel.
96 struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
97 int link, __be16 flags,
98 __be32 remote, __be32 local,
102 struct ip_tunnel *t, *cand = NULL;
103 struct hlist_head *head;
105 hash = ip_tunnel_hash(key, remote);
106 head = &itn->tunnels[hash];
108 hlist_for_each_entry_rcu(t, head, hash_node) {
109 if (local != t->parms.iph.saddr ||
110 remote != t->parms.iph.daddr ||
111 !(t->dev->flags & IFF_UP))
114 if (!ip_tunnel_key_match(&t->parms, flags, key))
117 if (t->parms.link == link)
123 hlist_for_each_entry_rcu(t, head, hash_node) {
124 if (remote != t->parms.iph.daddr ||
125 t->parms.iph.saddr != 0 ||
126 !(t->dev->flags & IFF_UP))
129 if (!ip_tunnel_key_match(&t->parms, flags, key))
132 if (t->parms.link == link)
138 hash = ip_tunnel_hash(key, 0);
139 head = &itn->tunnels[hash];
141 hlist_for_each_entry_rcu(t, head, hash_node) {
142 if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
143 (local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
146 if (!(t->dev->flags & IFF_UP))
149 if (!ip_tunnel_key_match(&t->parms, flags, key))
152 if (t->parms.link == link)
158 if (flags & TUNNEL_NO_KEY)
159 goto skip_key_lookup;
161 hlist_for_each_entry_rcu(t, head, hash_node) {
162 if (t->parms.i_key != key ||
163 t->parms.iph.saddr != 0 ||
164 t->parms.iph.daddr != 0 ||
165 !(t->dev->flags & IFF_UP))
168 if (t->parms.link == link)
178 t = rcu_dereference(itn->collect_md_tun);
179 if (t && t->dev->flags & IFF_UP)
182 if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
183 return netdev_priv(itn->fb_tunnel_dev);
187 EXPORT_SYMBOL_GPL(ip_tunnel_lookup);
189 static struct hlist_head *ip_bucket(struct ip_tunnel_net *itn,
190 struct ip_tunnel_parm *parms)
194 __be32 i_key = parms->i_key;
196 if (parms->iph.daddr && !ipv4_is_multicast(parms->iph.daddr))
197 remote = parms->iph.daddr;
201 if (!(parms->i_flags & TUNNEL_KEY) && (parms->i_flags & VTI_ISVTI))
204 h = ip_tunnel_hash(i_key, remote);
205 return &itn->tunnels[h];
208 static void ip_tunnel_add(struct ip_tunnel_net *itn, struct ip_tunnel *t)
210 struct hlist_head *head = ip_bucket(itn, &t->parms);
213 rcu_assign_pointer(itn->collect_md_tun, t);
214 hlist_add_head_rcu(&t->hash_node, head);
217 static void ip_tunnel_del(struct ip_tunnel_net *itn, struct ip_tunnel *t)
220 rcu_assign_pointer(itn->collect_md_tun, NULL);
221 hlist_del_init_rcu(&t->hash_node);
224 static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn,
225 struct ip_tunnel_parm *parms,
228 __be32 remote = parms->iph.daddr;
229 __be32 local = parms->iph.saddr;
230 __be32 key = parms->i_key;
231 __be16 flags = parms->i_flags;
232 int link = parms->link;
233 struct ip_tunnel *t = NULL;
234 struct hlist_head *head = ip_bucket(itn, parms);
236 hlist_for_each_entry_rcu(t, head, hash_node) {
237 if (local == t->parms.iph.saddr &&
238 remote == t->parms.iph.daddr &&
239 link == t->parms.link &&
240 type == t->dev->type &&
241 ip_tunnel_key_match(&t->parms, flags, key))
247 static struct net_device *__ip_tunnel_create(struct net *net,
248 const struct rtnl_link_ops *ops,
249 struct ip_tunnel_parm *parms)
252 struct ip_tunnel *tunnel;
253 struct net_device *dev;
257 if (parms->name[0]) {
258 if (!dev_valid_name(parms->name))
260 strlcpy(name, parms->name, IFNAMSIZ);
262 if (strlen(ops->kind) > (IFNAMSIZ - 3))
264 strcpy(name, ops->kind);
269 dev = alloc_netdev(ops->priv_size, name, NET_NAME_UNKNOWN, ops->setup);
274 dev_net_set(dev, net);
276 dev->rtnl_link_ops = ops;
278 tunnel = netdev_priv(dev);
279 tunnel->parms = *parms;
282 err = register_netdevice(dev);
294 static int ip_tunnel_bind_dev(struct net_device *dev)
296 struct net_device *tdev = NULL;
297 struct ip_tunnel *tunnel = netdev_priv(dev);
298 const struct iphdr *iph;
299 int hlen = LL_MAX_HEADER;
300 int mtu = ETH_DATA_LEN;
301 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
303 iph = &tunnel->parms.iph;
305 /* Guess output device to choose reasonable mtu and needed_headroom */
310 ip_tunnel_init_flow(&fl4, iph->protocol, iph->daddr,
311 iph->saddr, tunnel->parms.o_key,
312 RT_TOS(iph->tos), tunnel->parms.link,
314 rt = ip_route_output_key(tunnel->net, &fl4);
320 if (dev->type != ARPHRD_ETHER)
321 dev->flags |= IFF_POINTOPOINT;
323 dst_cache_reset(&tunnel->dst_cache);
326 if (!tdev && tunnel->parms.link)
327 tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
330 hlen = tdev->hard_header_len + tdev->needed_headroom;
331 mtu = min(tdev->mtu, IP_MAX_MTU);
334 dev->needed_headroom = t_hlen + hlen;
335 mtu -= (dev->hard_header_len + t_hlen);
337 if (mtu < IPV4_MIN_MTU)
343 static struct ip_tunnel *ip_tunnel_create(struct net *net,
344 struct ip_tunnel_net *itn,
345 struct ip_tunnel_parm *parms)
347 struct ip_tunnel *nt;
348 struct net_device *dev;
353 dev = __ip_tunnel_create(net, itn->rtnl_link_ops, parms);
355 return ERR_CAST(dev);
357 mtu = ip_tunnel_bind_dev(dev);
358 err = dev_set_mtu(dev, mtu);
360 goto err_dev_set_mtu;
362 nt = netdev_priv(dev);
363 t_hlen = nt->hlen + sizeof(struct iphdr);
364 dev->min_mtu = ETH_MIN_MTU;
365 dev->max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
366 ip_tunnel_add(itn, nt);
370 unregister_netdevice(dev);
374 int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
375 const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
378 struct pcpu_sw_netstats *tstats;
379 const struct iphdr *iph = ip_hdr(skb);
382 #ifdef CONFIG_NET_IPGRE_BROADCAST
383 if (ipv4_is_multicast(iph->daddr)) {
384 tunnel->dev->stats.multicast++;
385 skb->pkt_type = PACKET_BROADCAST;
389 if ((!(tpi->flags&TUNNEL_CSUM) && (tunnel->parms.i_flags&TUNNEL_CSUM)) ||
390 ((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) {
391 tunnel->dev->stats.rx_crc_errors++;
392 tunnel->dev->stats.rx_errors++;
396 if (tunnel->parms.i_flags&TUNNEL_SEQ) {
397 if (!(tpi->flags&TUNNEL_SEQ) ||
398 (tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
399 tunnel->dev->stats.rx_fifo_errors++;
400 tunnel->dev->stats.rx_errors++;
403 tunnel->i_seqno = ntohl(tpi->seq) + 1;
406 skb_reset_network_header(skb);
408 err = IP_ECN_decapsulate(iph, skb);
411 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
412 &iph->saddr, iph->tos);
414 ++tunnel->dev->stats.rx_frame_errors;
415 ++tunnel->dev->stats.rx_errors;
420 tstats = this_cpu_ptr(tunnel->dev->tstats);
421 u64_stats_update_begin(&tstats->syncp);
422 tstats->rx_packets++;
423 tstats->rx_bytes += skb->len;
424 u64_stats_update_end(&tstats->syncp);
426 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
428 if (tunnel->dev->type == ARPHRD_ETHER) {
429 skb->protocol = eth_type_trans(skb, tunnel->dev);
430 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
432 skb->dev = tunnel->dev;
436 skb_dst_set(skb, (struct dst_entry *)tun_dst);
438 gro_cells_receive(&tunnel->gro_cells, skb);
443 dst_release((struct dst_entry *)tun_dst);
447 EXPORT_SYMBOL_GPL(ip_tunnel_rcv);
449 int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *ops,
452 if (num >= MAX_IPTUN_ENCAP_OPS)
455 return !cmpxchg((const struct ip_tunnel_encap_ops **)
459 EXPORT_SYMBOL(ip_tunnel_encap_add_ops);
461 int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *ops,
466 if (num >= MAX_IPTUN_ENCAP_OPS)
469 ret = (cmpxchg((const struct ip_tunnel_encap_ops **)
471 ops, NULL) == ops) ? 0 : -1;
477 EXPORT_SYMBOL(ip_tunnel_encap_del_ops);
479 int ip_tunnel_encap_setup(struct ip_tunnel *t,
480 struct ip_tunnel_encap *ipencap)
484 memset(&t->encap, 0, sizeof(t->encap));
486 hlen = ip_encap_hlen(ipencap);
490 t->encap.type = ipencap->type;
491 t->encap.sport = ipencap->sport;
492 t->encap.dport = ipencap->dport;
493 t->encap.flags = ipencap->flags;
495 t->encap_hlen = hlen;
496 t->hlen = t->encap_hlen + t->tun_hlen;
500 EXPORT_SYMBOL_GPL(ip_tunnel_encap_setup);
502 static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
503 struct rtable *rt, __be16 df,
504 const struct iphdr *inner_iph,
505 int tunnel_hlen, __be32 dst, bool md)
507 struct ip_tunnel *tunnel = netdev_priv(dev);
511 tunnel_hlen = md ? tunnel_hlen : tunnel->hlen;
512 pkt_size = skb->len - tunnel_hlen - dev->hard_header_len;
515 mtu = dst_mtu(&rt->dst) - dev->hard_header_len
516 - sizeof(struct iphdr) - tunnel_hlen;
518 mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
520 if (skb_valid_dst(skb))
521 skb_dst_update_pmtu(skb, mtu);
523 if (skb->protocol == htons(ETH_P_IP)) {
524 if (!skb_is_gso(skb) &&
525 (inner_iph->frag_off & htons(IP_DF)) &&
527 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
528 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
532 #if IS_ENABLED(CONFIG_IPV6)
533 else if (skb->protocol == htons(ETH_P_IPV6)) {
534 struct rt6_info *rt6;
537 rt6 = skb_valid_dst(skb) ? (struct rt6_info *)skb_dst(skb) :
539 daddr = md ? dst : tunnel->parms.iph.daddr;
541 if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
542 mtu >= IPV6_MIN_MTU) {
543 if ((daddr && !ipv4_is_multicast(daddr)) ||
544 rt6->rt6i_dst.plen == 128) {
545 rt6->rt6i_flags |= RTF_MODIFIED;
546 dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
550 if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
552 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
560 void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
561 u8 proto, int tunnel_hlen)
563 struct ip_tunnel *tunnel = netdev_priv(dev);
564 u32 headroom = sizeof(struct iphdr);
565 struct ip_tunnel_info *tun_info;
566 const struct ip_tunnel_key *key;
567 const struct iphdr *inner_iph;
568 struct rtable *rt = NULL;
574 tun_info = skb_tunnel_info(skb);
575 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
576 ip_tunnel_info_af(tun_info) != AF_INET))
578 key = &tun_info->key;
579 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
580 inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
583 if (skb->protocol == htons(ETH_P_IP))
584 tos = inner_iph->tos;
585 else if (skb->protocol == htons(ETH_P_IPV6))
586 tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
588 ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src,
589 tunnel_id_to_key32(key->tun_id), RT_TOS(tos),
590 0, skb->mark, skb_get_hash(skb));
591 if (tunnel->encap.type != TUNNEL_ENCAP_NONE)
594 use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
596 rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl4.saddr);
598 rt = ip_route_output_key(tunnel->net, &fl4);
600 dev->stats.tx_carrier_errors++;
604 dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
607 if (rt->dst.dev == dev) {
609 dev->stats.collisions++;
613 if (key->tun_flags & TUNNEL_DONT_FRAGMENT)
615 if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, tunnel_hlen,
616 key->u.ipv4.dst, true)) {
621 tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
624 if (skb->protocol == htons(ETH_P_IP))
625 ttl = inner_iph->ttl;
626 else if (skb->protocol == htons(ETH_P_IPV6))
627 ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit;
629 ttl = ip4_dst_hoplimit(&rt->dst);
632 if (!df && skb->protocol == htons(ETH_P_IP))
633 df = inner_iph->frag_off & htons(IP_DF);
635 headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
636 if (headroom > dev->needed_headroom)
637 dev->needed_headroom = headroom;
639 if (skb_cow_head(skb, dev->needed_headroom)) {
643 iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, tos, ttl,
644 df, !net_eq(tunnel->net, dev_net(dev)));
647 dev->stats.tx_errors++;
650 dev->stats.tx_dropped++;
654 EXPORT_SYMBOL_GPL(ip_md_tunnel_xmit);
656 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
657 const struct iphdr *tnl_params, u8 protocol)
659 struct ip_tunnel *tunnel = netdev_priv(dev);
660 struct ip_tunnel_info *tun_info = NULL;
661 const struct iphdr *inner_iph;
662 unsigned int max_headroom; /* The extra header space needed */
663 struct rtable *rt = NULL; /* Route to the other host */
664 bool use_cache = false;
672 inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
673 connected = (tunnel->parms.iph.daddr != 0);
675 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
677 dst = tnl_params->daddr;
682 dev->stats.tx_fifo_errors++;
686 tun_info = skb_tunnel_info(skb);
687 if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX) &&
688 ip_tunnel_info_af(tun_info) == AF_INET &&
689 tun_info->key.u.ipv4.dst) {
690 dst = tun_info->key.u.ipv4.dst;
694 else if (skb->protocol == htons(ETH_P_IP)) {
695 rt = skb_rtable(skb);
696 dst = rt_nexthop(rt, inner_iph->daddr);
698 #if IS_ENABLED(CONFIG_IPV6)
699 else if (skb->protocol == htons(ETH_P_IPV6)) {
700 const struct in6_addr *addr6;
701 struct neighbour *neigh;
702 bool do_tx_error_icmp;
705 neigh = dst_neigh_lookup(skb_dst(skb),
706 &ipv6_hdr(skb)->daddr);
710 addr6 = (const struct in6_addr *)&neigh->primary_key;
711 addr_type = ipv6_addr_type(addr6);
713 if (addr_type == IPV6_ADDR_ANY) {
714 addr6 = &ipv6_hdr(skb)->daddr;
715 addr_type = ipv6_addr_type(addr6);
718 if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
719 do_tx_error_icmp = true;
721 do_tx_error_icmp = false;
722 dst = addr6->s6_addr32[3];
724 neigh_release(neigh);
725 if (do_tx_error_icmp)
736 tos = tnl_params->tos;
739 if (skb->protocol == htons(ETH_P_IP)) {
740 tos = inner_iph->tos;
742 } else if (skb->protocol == htons(ETH_P_IPV6)) {
743 tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
748 ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
749 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link,
750 tunnel->fwmark, skb_get_hash(skb));
752 if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
755 if (connected && md) {
756 use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
758 rt = dst_cache_get_ip4(&tun_info->dst_cache,
761 rt = connected ? dst_cache_get_ip4(&tunnel->dst_cache,
766 rt = ip_route_output_key(tunnel->net, &fl4);
769 dev->stats.tx_carrier_errors++;
773 dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
775 else if (!md && connected)
776 dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst,
780 if (rt->dst.dev == dev) {
782 dev->stats.collisions++;
786 if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph,
792 if (tunnel->err_count > 0) {
793 if (time_before(jiffies,
794 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
797 dst_link_failure(skb);
799 tunnel->err_count = 0;
802 tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
803 ttl = tnl_params->ttl;
805 if (skb->protocol == htons(ETH_P_IP))
806 ttl = inner_iph->ttl;
807 #if IS_ENABLED(CONFIG_IPV6)
808 else if (skb->protocol == htons(ETH_P_IPV6))
809 ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit;
812 ttl = ip4_dst_hoplimit(&rt->dst);
815 df = tnl_params->frag_off;
816 if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
817 df |= (inner_iph->frag_off&htons(IP_DF));
819 max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
820 + rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
821 if (max_headroom > dev->needed_headroom)
822 dev->needed_headroom = max_headroom;
824 if (skb_cow_head(skb, dev->needed_headroom)) {
826 dev->stats.tx_dropped++;
831 iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl,
832 df, !net_eq(tunnel->net, dev_net(dev)));
835 #if IS_ENABLED(CONFIG_IPV6)
837 dst_link_failure(skb);
840 dev->stats.tx_errors++;
843 EXPORT_SYMBOL_GPL(ip_tunnel_xmit);
845 static void ip_tunnel_update(struct ip_tunnel_net *itn,
847 struct net_device *dev,
848 struct ip_tunnel_parm *p,
852 ip_tunnel_del(itn, t);
853 t->parms.iph.saddr = p->iph.saddr;
854 t->parms.iph.daddr = p->iph.daddr;
855 t->parms.i_key = p->i_key;
856 t->parms.o_key = p->o_key;
857 if (dev->type != ARPHRD_ETHER) {
858 memcpy(dev->dev_addr, &p->iph.saddr, 4);
859 memcpy(dev->broadcast, &p->iph.daddr, 4);
861 ip_tunnel_add(itn, t);
863 t->parms.iph.ttl = p->iph.ttl;
864 t->parms.iph.tos = p->iph.tos;
865 t->parms.iph.frag_off = p->iph.frag_off;
867 if (t->parms.link != p->link || t->fwmark != fwmark) {
870 t->parms.link = p->link;
872 mtu = ip_tunnel_bind_dev(dev);
876 dst_cache_reset(&t->dst_cache);
877 netdev_state_change(dev);
880 int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
883 struct ip_tunnel *t = netdev_priv(dev);
884 struct net *net = t->net;
885 struct ip_tunnel_net *itn = net_generic(net, t->ip_tnl_net_id);
889 if (dev == itn->fb_tunnel_dev) {
890 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
892 t = netdev_priv(dev);
894 memcpy(p, &t->parms, sizeof(*p));
900 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
903 p->iph.frag_off |= htons(IP_DF);
904 if (!(p->i_flags & VTI_ISVTI)) {
905 if (!(p->i_flags & TUNNEL_KEY))
907 if (!(p->o_flags & TUNNEL_KEY))
911 t = ip_tunnel_find(itn, p, itn->type);
913 if (cmd == SIOCADDTUNNEL) {
915 t = ip_tunnel_create(net, itn, p);
916 err = PTR_ERR_OR_ZERO(t);
923 if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
930 unsigned int nflags = 0;
932 if (ipv4_is_multicast(p->iph.daddr))
933 nflags = IFF_BROADCAST;
934 else if (p->iph.daddr)
935 nflags = IFF_POINTOPOINT;
937 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
942 t = netdev_priv(dev);
948 ip_tunnel_update(itn, t, dev, p, true, 0);
956 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
959 if (dev == itn->fb_tunnel_dev) {
961 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
965 if (t == netdev_priv(itn->fb_tunnel_dev))
969 unregister_netdevice(dev);
980 EXPORT_SYMBOL_GPL(ip_tunnel_ioctl);
982 int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
984 struct ip_tunnel *tunnel = netdev_priv(dev);
985 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
986 int max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
988 if (new_mtu < ETH_MIN_MTU)
991 if (new_mtu > max_mtu) {
1001 EXPORT_SYMBOL_GPL(__ip_tunnel_change_mtu);
1003 int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1005 return __ip_tunnel_change_mtu(dev, new_mtu, true);
1007 EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu);
1009 static void ip_tunnel_dev_free(struct net_device *dev)
1011 struct ip_tunnel *tunnel = netdev_priv(dev);
1013 gro_cells_destroy(&tunnel->gro_cells);
1014 dst_cache_destroy(&tunnel->dst_cache);
1015 free_percpu(dev->tstats);
1018 void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
1020 struct ip_tunnel *tunnel = netdev_priv(dev);
1021 struct ip_tunnel_net *itn;
1023 itn = net_generic(tunnel->net, tunnel->ip_tnl_net_id);
1025 if (itn->fb_tunnel_dev != dev) {
1026 ip_tunnel_del(itn, netdev_priv(dev));
1027 unregister_netdevice_queue(dev, head);
1030 EXPORT_SYMBOL_GPL(ip_tunnel_dellink);
1032 struct net *ip_tunnel_get_link_net(const struct net_device *dev)
1034 struct ip_tunnel *tunnel = netdev_priv(dev);
1038 EXPORT_SYMBOL(ip_tunnel_get_link_net);
1040 int ip_tunnel_get_iflink(const struct net_device *dev)
1042 struct ip_tunnel *tunnel = netdev_priv(dev);
1044 return tunnel->parms.link;
1046 EXPORT_SYMBOL(ip_tunnel_get_iflink);
1048 int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
1049 struct rtnl_link_ops *ops, char *devname)
1051 struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id);
1052 struct ip_tunnel_parm parms;
1055 itn->rtnl_link_ops = ops;
1056 for (i = 0; i < IP_TNL_HASH_SIZE; i++)
1057 INIT_HLIST_HEAD(&itn->tunnels[i]);
1059 if (!ops || !net_has_fallback_tunnels(net)) {
1060 struct ip_tunnel_net *it_init_net;
1062 it_init_net = net_generic(&init_net, ip_tnl_net_id);
1063 itn->type = it_init_net->type;
1064 itn->fb_tunnel_dev = NULL;
1068 memset(&parms, 0, sizeof(parms));
1070 strlcpy(parms.name, devname, IFNAMSIZ);
1073 itn->fb_tunnel_dev = __ip_tunnel_create(net, ops, &parms);
1074 /* FB netdevice is special: we have one, and only one per netns.
1075 * Allowing to move it to another netns is clearly unsafe.
1077 if (!IS_ERR(itn->fb_tunnel_dev)) {
1078 itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
1079 itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev);
1080 ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
1081 itn->type = itn->fb_tunnel_dev->type;
1085 return PTR_ERR_OR_ZERO(itn->fb_tunnel_dev);
1087 EXPORT_SYMBOL_GPL(ip_tunnel_init_net);
1089 static void ip_tunnel_destroy(struct net *net, struct ip_tunnel_net *itn,
1090 struct list_head *head,
1091 struct rtnl_link_ops *ops)
1093 struct net_device *dev, *aux;
1096 for_each_netdev_safe(net, dev, aux)
1097 if (dev->rtnl_link_ops == ops)
1098 unregister_netdevice_queue(dev, head);
1100 for (h = 0; h < IP_TNL_HASH_SIZE; h++) {
1101 struct ip_tunnel *t;
1102 struct hlist_node *n;
1103 struct hlist_head *thead = &itn->tunnels[h];
1105 hlist_for_each_entry_safe(t, n, thead, hash_node)
1106 /* If dev is in the same netns, it has already
1107 * been added to the list by the previous loop.
1109 if (!net_eq(dev_net(t->dev), net))
1110 unregister_netdevice_queue(t->dev, head);
1114 void ip_tunnel_delete_nets(struct list_head *net_list, unsigned int id,
1115 struct rtnl_link_ops *ops)
1117 struct ip_tunnel_net *itn;
1122 list_for_each_entry(net, net_list, exit_list) {
1123 itn = net_generic(net, id);
1124 ip_tunnel_destroy(net, itn, &list, ops);
1126 unregister_netdevice_many(&list);
1129 EXPORT_SYMBOL_GPL(ip_tunnel_delete_nets);
1131 int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
1132 struct ip_tunnel_parm *p, __u32 fwmark)
1134 struct ip_tunnel *nt;
1135 struct net *net = dev_net(dev);
1136 struct ip_tunnel_net *itn;
1140 nt = netdev_priv(dev);
1141 itn = net_generic(net, nt->ip_tnl_net_id);
1143 if (nt->collect_md) {
1144 if (rtnl_dereference(itn->collect_md_tun))
1147 if (ip_tunnel_find(itn, p, dev->type))
1153 nt->fwmark = fwmark;
1154 err = register_netdevice(dev);
1156 goto err_register_netdevice;
1158 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1159 eth_hw_addr_random(dev);
1161 mtu = ip_tunnel_bind_dev(dev);
1163 unsigned int max = IP_MAX_MTU - dev->hard_header_len - nt->hlen;
1165 mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU,
1166 (unsigned int)(max - sizeof(struct iphdr)));
1169 err = dev_set_mtu(dev, mtu);
1171 goto err_dev_set_mtu;
1173 ip_tunnel_add(itn, nt);
1177 unregister_netdevice(dev);
1178 err_register_netdevice:
1181 EXPORT_SYMBOL_GPL(ip_tunnel_newlink);
1183 int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
1184 struct ip_tunnel_parm *p, __u32 fwmark)
1186 struct ip_tunnel *t;
1187 struct ip_tunnel *tunnel = netdev_priv(dev);
1188 struct net *net = tunnel->net;
1189 struct ip_tunnel_net *itn = net_generic(net, tunnel->ip_tnl_net_id);
1191 if (dev == itn->fb_tunnel_dev)
1194 t = ip_tunnel_find(itn, p, dev->type);
1202 if (dev->type != ARPHRD_ETHER) {
1203 unsigned int nflags = 0;
1205 if (ipv4_is_multicast(p->iph.daddr))
1206 nflags = IFF_BROADCAST;
1207 else if (p->iph.daddr)
1208 nflags = IFF_POINTOPOINT;
1210 if ((dev->flags ^ nflags) &
1211 (IFF_POINTOPOINT | IFF_BROADCAST))
1216 ip_tunnel_update(itn, t, dev, p, !tb[IFLA_MTU], fwmark);
1219 EXPORT_SYMBOL_GPL(ip_tunnel_changelink);
1221 int ip_tunnel_init(struct net_device *dev)
1223 struct ip_tunnel *tunnel = netdev_priv(dev);
1224 struct iphdr *iph = &tunnel->parms.iph;
1227 dev->needs_free_netdev = true;
1228 dev->priv_destructor = ip_tunnel_dev_free;
1229 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1233 err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
1235 free_percpu(dev->tstats);
1239 err = gro_cells_init(&tunnel->gro_cells, dev);
1241 dst_cache_destroy(&tunnel->dst_cache);
1242 free_percpu(dev->tstats);
1247 tunnel->net = dev_net(dev);
1248 strcpy(tunnel->parms.name, dev->name);
1252 if (tunnel->collect_md) {
1253 dev->features |= NETIF_F_NETNS_LOCAL;
1254 netif_keep_dst(dev);
1258 EXPORT_SYMBOL_GPL(ip_tunnel_init);
1260 void ip_tunnel_uninit(struct net_device *dev)
1262 struct ip_tunnel *tunnel = netdev_priv(dev);
1263 struct net *net = tunnel->net;
1264 struct ip_tunnel_net *itn;
1266 itn = net_generic(net, tunnel->ip_tnl_net_id);
1267 /* fb_tunnel_dev will be unregisted in net-exit call. */
1268 if (itn->fb_tunnel_dev != dev)
1269 ip_tunnel_del(itn, netdev_priv(dev));
1271 dst_cache_reset(&tunnel->dst_cache);
1273 EXPORT_SYMBOL_GPL(ip_tunnel_uninit);
1275 /* Do least required initialization, rest of init is done in tunnel_init call */
1276 void ip_tunnel_setup(struct net_device *dev, unsigned int net_id)
1278 struct ip_tunnel *tunnel = netdev_priv(dev);
1279 tunnel->ip_tnl_net_id = net_id;
1281 EXPORT_SYMBOL_GPL(ip_tunnel_setup);
1283 MODULE_LICENSE("GPL");