2 * Linux NET3: GRE over IP protocol decoder.
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_vlan.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
39 #include <net/protocol.h>
40 #include <net/ip_tunnels.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
50 #include <net/dst_metadata.h>
51 #include <net/erspan.h>
57 1. The most important issue is detecting local dead loops.
58 They would cause complete host lockup in transmit, which
59 would be "resolved" by stack overflow or, if queueing is enabled,
60 with infinite looping in net_bh.
62 We cannot track such dead loops during route installation,
63 it is infeasible task. The most general solutions would be
64 to keep skb->encapsulation counter (sort of local ttl),
65 and silently drop packet when it expires. It is a good
66 solution, but it supposes maintaining new variable in ALL
67 skb, even if no tunneling is used.
69 Current solution: xmit_recursion breaks dead loops. This is a percpu
70 counter, since when we enter the first ndo_xmit(), cpu migration is
71 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
73 2. Networking dead loops would not kill routers, but would really
74 kill network. IP hop limit plays role of "t->recursion" in this case,
75 if we copy it from packet being encapsulated to upper header.
76 It is very good solution, but it introduces two problems:
78 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
79 do not work over tunnels.
80 - traceroute does not work. I planned to relay ICMP from tunnel,
81 so that this problem would be solved and traceroute output
82 would even more informative. This idea appeared to be wrong:
83 only Linux complies to rfc1812 now (yes, guys, Linux is the only
84 true router now :-)), all routers (at least, in neighbourhood of mine)
85 return only 8 bytes of payload. It is the end.
87 Hence, if we want that OSPF worked or traceroute said something reasonable,
88 we should search for another solution.
90 One of them is to parse packet trying to detect inner encapsulation
91 made by our node. It is difficult or even impossible, especially,
92 taking into account fragmentation. TO be short, ttl is not solution at all.
94 Current solution: The solution was UNEXPECTEDLY SIMPLE.
95 We force DF flag on tunnels with preconfigured hop limit,
96 that is ALL. :-) Well, it does not remove the problem completely,
97 but exponential growth of network traffic is changed to linear
98 (branches, that exceed pmtu are pruned) and tunnel mtu
99 rapidly degrades to value <68, where looping stops.
100 Yes, it is not good if there exists a router in the loop,
101 which does not force DF, even when encapsulating packets have DF set.
102 But it is not our problem! Nobody could accuse us, we made
103 all that we could make. Even if it is your gated who injected
104 fatal route to network, even if it were you who configured
105 fatal static route: you are innocent. :-)
110 static bool log_ecn_error = true;
111 module_param(log_ecn_error, bool, 0644);
112 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
114 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
115 static int ipgre_tunnel_init(struct net_device *dev);
116 static void erspan_build_header(struct sk_buff *skb,
118 bool truncate, bool is_ipv4);
120 static unsigned int ipgre_net_id __read_mostly;
121 static unsigned int gre_tap_net_id __read_mostly;
122 static unsigned int erspan_net_id __read_mostly;
124 static int ipgre_err(struct sk_buff *skb, u32 info,
125 const struct tnl_ptk_info *tpi)
128 /* All the routers (except for Linux) return only
129 8 bytes of packet payload. It means, that precise relaying of
130 ICMP in the real Internet is absolutely infeasible.
132 Moreover, Cisco "wise men" put GRE key to the third word
133 in GRE header. It makes impossible maintaining even soft
134 state for keyed GRE tunnels with enabled checksum. Tell
137 Well, I wonder, rfc1812 was written by Cisco employee,
138 what the hell these idiots break standards established
141 struct net *net = dev_net(skb->dev);
142 struct ip_tunnel_net *itn;
143 const struct iphdr *iph;
144 const int type = icmp_hdr(skb)->type;
145 const int code = icmp_hdr(skb)->code;
146 unsigned int data_len = 0;
149 if (tpi->proto == htons(ETH_P_TEB))
150 itn = net_generic(net, gre_tap_net_id);
151 else if (tpi->proto == htons(ETH_P_ERSPAN) ||
152 tpi->proto == htons(ETH_P_ERSPAN2))
153 itn = net_generic(net, erspan_net_id);
155 itn = net_generic(net, ipgre_net_id);
157 iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
158 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
159 iph->daddr, iph->saddr, tpi->key);
166 case ICMP_PARAMETERPROB:
169 case ICMP_DEST_UNREACH:
172 case ICMP_PORT_UNREACH:
173 /* Impossible event. */
176 /* All others are translated to HOST_UNREACH.
177 rfc2003 contains "deep thoughts" about NET_UNREACH,
178 I believe they are just ether pollution. --ANK
184 case ICMP_TIME_EXCEEDED:
185 if (code != ICMP_EXC_TTL)
187 data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
194 #if IS_ENABLED(CONFIG_IPV6)
195 if (tpi->proto == htons(ETH_P_IPV6) &&
196 !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
201 if (t->parms.iph.daddr == 0 ||
202 ipv4_is_multicast(t->parms.iph.daddr))
205 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
208 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
212 t->err_time = jiffies;
217 static void gre_err(struct sk_buff *skb, u32 info)
219 /* All the routers (except for Linux) return only
220 * 8 bytes of packet payload. It means, that precise relaying of
221 * ICMP in the real Internet is absolutely infeasible.
223 * Moreover, Cisco "wise men" put GRE key to the third word
224 * in GRE header. It makes impossible maintaining even soft
226 * GRE tunnels with enabled checksum. Tell them "thank you".
228 * Well, I wonder, rfc1812 was written by Cisco employee,
229 * what the hell these idiots break standards established
233 const struct iphdr *iph = (struct iphdr *)skb->data;
234 const int type = icmp_hdr(skb)->type;
235 const int code = icmp_hdr(skb)->code;
236 struct tnl_ptk_info tpi;
238 if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
242 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
243 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
244 skb->dev->ifindex, IPPROTO_GRE);
247 if (type == ICMP_REDIRECT) {
248 ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex,
253 ipgre_err(skb, info, &tpi);
256 static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
259 struct net *net = dev_net(skb->dev);
260 struct metadata_dst *tun_dst = NULL;
261 struct erspan_base_hdr *ershdr;
262 struct erspan_metadata *pkt_md;
263 struct ip_tunnel_net *itn;
264 struct ip_tunnel *tunnel;
265 const struct iphdr *iph;
266 struct erspan_md2 *md2;
270 itn = net_generic(net, erspan_net_id);
271 len = gre_hdr_len + sizeof(*ershdr);
273 /* Check based hdr len */
274 if (unlikely(!pskb_may_pull(skb, len)))
275 return PACKET_REJECT;
278 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
281 /* The original GRE header does not have key field,
282 * Use ERSPAN 10-bit session ID as key.
284 tpi->key = cpu_to_be32(get_session_id(ershdr));
285 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
286 tpi->flags | TUNNEL_KEY,
287 iph->saddr, iph->daddr, tpi->key);
290 len = gre_hdr_len + erspan_hdr_len(ver);
291 if (unlikely(!pskb_may_pull(skb, len)))
292 return PACKET_REJECT;
294 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
295 pkt_md = (struct erspan_metadata *)(ershdr + 1);
297 if (__iptunnel_pull_header(skb,
303 if (tunnel->collect_md) {
304 struct ip_tunnel_info *info;
305 struct erspan_metadata *md;
309 tpi->flags |= TUNNEL_KEY;
311 tun_id = key32_to_tunnel_id(tpi->key);
313 tun_dst = ip_tun_rx_dst(skb, flags,
314 tun_id, sizeof(*md));
316 return PACKET_REJECT;
318 md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
321 memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
324 info = &tun_dst->u.tun_info;
325 info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
326 info->options_len = sizeof(*md);
329 skb_reset_mac_header(skb);
330 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
333 return PACKET_REJECT;
340 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
341 struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
343 struct metadata_dst *tun_dst = NULL;
344 const struct iphdr *iph;
345 struct ip_tunnel *tunnel;
348 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
349 iph->saddr, iph->daddr, tpi->key);
352 if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
353 raw_proto, false) < 0)
356 if (tunnel->dev->type != ARPHRD_NONE)
357 skb_pop_mac_header(skb);
359 skb_reset_mac_header(skb);
360 if (tunnel->collect_md) {
364 flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
365 tun_id = key32_to_tunnel_id(tpi->key);
366 tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
368 return PACKET_REJECT;
371 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
381 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
384 struct net *net = dev_net(skb->dev);
385 struct ip_tunnel_net *itn;
388 if (tpi->proto == htons(ETH_P_TEB))
389 itn = net_generic(net, gre_tap_net_id);
391 itn = net_generic(net, ipgre_net_id);
393 res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
394 if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
395 /* ipgre tunnels in collect metadata mode should receive
396 * also ETH_P_TEB traffic.
398 itn = net_generic(net, ipgre_net_id);
399 res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
404 static int gre_rcv(struct sk_buff *skb)
406 struct tnl_ptk_info tpi;
407 bool csum_err = false;
410 #ifdef CONFIG_NET_IPGRE_BROADCAST
411 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
412 /* Looped back packet, drop it! */
413 if (rt_is_output_route(skb_rtable(skb)))
418 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
422 if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
423 tpi.proto == htons(ETH_P_ERSPAN2))) {
424 if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
429 if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
433 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
439 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
440 const struct iphdr *tnl_params,
443 struct ip_tunnel *tunnel = netdev_priv(dev);
445 if (tunnel->parms.o_flags & TUNNEL_SEQ)
448 /* Push GRE header. */
449 gre_build_header(skb, tunnel->tun_hlen,
450 tunnel->parms.o_flags, proto, tunnel->parms.o_key,
451 htonl(tunnel->o_seqno));
453 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
456 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
458 return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
461 static struct rtable *gre_get_rt(struct sk_buff *skb,
462 struct net_device *dev,
464 const struct ip_tunnel_key *key)
466 struct net *net = dev_net(dev);
468 memset(fl, 0, sizeof(*fl));
469 fl->daddr = key->u.ipv4.dst;
470 fl->saddr = key->u.ipv4.src;
471 fl->flowi4_tos = RT_TOS(key->tos);
472 fl->flowi4_mark = skb->mark;
473 fl->flowi4_proto = IPPROTO_GRE;
475 return ip_route_output_key(net, fl);
478 static struct rtable *prepare_fb_xmit(struct sk_buff *skb,
479 struct net_device *dev,
483 struct ip_tunnel_info *tun_info;
484 const struct ip_tunnel_key *key;
485 struct rtable *rt = NULL;
490 tun_info = skb_tunnel_info(skb);
491 key = &tun_info->key;
492 use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
495 rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl->saddr);
497 rt = gre_get_rt(skb, dev, fl, key);
501 dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
505 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
506 + tunnel_hlen + sizeof(struct iphdr);
507 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
508 int head_delta = SKB_DATA_ALIGN(min_headroom -
511 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
522 dev->stats.tx_dropped++;
526 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
529 struct ip_tunnel *tunnel = netdev_priv(dev);
530 struct ip_tunnel_info *tun_info;
531 const struct ip_tunnel_key *key;
532 struct rtable *rt = NULL;
537 tun_info = skb_tunnel_info(skb);
538 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
539 ip_tunnel_info_af(tun_info) != AF_INET))
542 key = &tun_info->key;
543 tunnel_hlen = gre_calc_hlen(key->tun_flags);
545 rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
549 /* Push Tunnel header. */
550 if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
553 flags = tun_info->key.tun_flags &
554 (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
555 gre_build_header(skb, tunnel_hlen, flags, proto,
556 tunnel_id_to_key32(tun_info->key.tun_id),
557 (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
559 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
561 iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
562 key->tos, key->ttl, df, false);
569 dev->stats.tx_dropped++;
572 static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
575 struct ip_tunnel *tunnel = netdev_priv(dev);
576 struct ip_tunnel_info *tun_info;
577 const struct ip_tunnel_key *key;
578 struct erspan_metadata *md;
579 struct rtable *rt = NULL;
580 bool truncate = false;
588 tun_info = skb_tunnel_info(skb);
589 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
590 ip_tunnel_info_af(tun_info) != AF_INET))
593 key = &tun_info->key;
594 if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
596 md = ip_tunnel_info_opts(tun_info);
600 /* ERSPAN has fixed 8 byte GRE header */
601 version = md->version;
602 tunnel_hlen = 8 + erspan_hdr_len(version);
604 rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
608 if (gre_handle_offloads(skb, false))
611 if (skb->len > dev->mtu + dev->hard_header_len) {
612 pskb_trim(skb, dev->mtu + dev->hard_header_len);
616 nhoff = skb_network_header(skb) - skb_mac_header(skb);
617 if (skb->protocol == htons(ETH_P_IP) &&
618 (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
621 thoff = skb_transport_header(skb) - skb_mac_header(skb);
622 if (skb->protocol == htons(ETH_P_IPV6) &&
623 (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff))
627 erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
628 ntohl(md->u.index), truncate, true);
629 } else if (version == 2) {
630 erspan_build_header_v2(skb,
631 ntohl(tunnel_id_to_key32(key->tun_id)),
633 get_hwid(&md->u.md2),
639 gre_build_header(skb, 8, TUNNEL_SEQ,
640 htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++));
642 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
644 iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
645 key->tos, key->ttl, df, false);
652 dev->stats.tx_dropped++;
655 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
657 struct ip_tunnel_info *info = skb_tunnel_info(skb);
661 if (ip_tunnel_info_af(info) != AF_INET)
664 rt = gre_get_rt(skb, dev, &fl4, &info->key);
669 info->key.u.ipv4.src = fl4.saddr;
673 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
674 struct net_device *dev)
676 struct ip_tunnel *tunnel = netdev_priv(dev);
677 const struct iphdr *tnl_params;
679 if (tunnel->collect_md) {
680 gre_fb_xmit(skb, dev, skb->protocol);
684 if (dev->header_ops) {
685 /* Need space for new headers */
686 if (skb_cow_head(skb, dev->needed_headroom -
687 (tunnel->hlen + sizeof(struct iphdr))))
690 tnl_params = (const struct iphdr *)skb->data;
692 /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
695 skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
696 skb_reset_mac_header(skb);
698 if (skb_cow_head(skb, dev->needed_headroom))
701 tnl_params = &tunnel->parms.iph;
704 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
707 __gre_xmit(skb, dev, tnl_params, skb->protocol);
712 dev->stats.tx_dropped++;
716 static netdev_tx_t erspan_xmit(struct sk_buff *skb,
717 struct net_device *dev)
719 struct ip_tunnel *tunnel = netdev_priv(dev);
720 bool truncate = false;
722 if (tunnel->collect_md) {
723 erspan_fb_xmit(skb, dev, skb->protocol);
727 if (gre_handle_offloads(skb, false))
730 if (skb_cow_head(skb, dev->needed_headroom))
733 if (skb->len > dev->mtu + dev->hard_header_len) {
734 pskb_trim(skb, dev->mtu + dev->hard_header_len);
738 /* Push ERSPAN header */
739 if (tunnel->erspan_ver == 1)
740 erspan_build_header(skb, ntohl(tunnel->parms.o_key),
743 else if (tunnel->erspan_ver == 2)
744 erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
745 tunnel->dir, tunnel->hwid,
750 tunnel->parms.o_flags &= ~TUNNEL_KEY;
751 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN));
756 dev->stats.tx_dropped++;
760 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
761 struct net_device *dev)
763 struct ip_tunnel *tunnel = netdev_priv(dev);
765 if (tunnel->collect_md) {
766 gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
770 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
773 if (skb_cow_head(skb, dev->needed_headroom))
776 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
781 dev->stats.tx_dropped++;
785 static void ipgre_link_update(struct net_device *dev, bool set_mtu)
787 struct ip_tunnel *tunnel = netdev_priv(dev);
790 len = tunnel->tun_hlen;
791 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
792 len = tunnel->tun_hlen - len;
793 tunnel->hlen = tunnel->hlen + len;
795 dev->needed_headroom = dev->needed_headroom + len;
797 dev->mtu = max_t(int, dev->mtu - len, 68);
799 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
800 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
801 tunnel->encap.type == TUNNEL_ENCAP_NONE) {
802 dev->features |= NETIF_F_GSO_SOFTWARE;
803 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
805 dev->features &= ~NETIF_F_GSO_SOFTWARE;
806 dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
808 dev->features |= NETIF_F_LLTX;
810 dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
811 dev->features &= ~(NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE);
815 static int ipgre_tunnel_ioctl(struct net_device *dev,
816 struct ifreq *ifr, int cmd)
818 struct ip_tunnel_parm p;
821 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
824 if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
825 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
826 p.iph.ihl != 5 || (p.iph.frag_off & htons(~IP_DF)) ||
827 ((p.i_flags | p.o_flags) & (GRE_VERSION | GRE_ROUTING)))
831 p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
832 p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
834 err = ip_tunnel_ioctl(dev, &p, cmd);
838 if (cmd == SIOCCHGTUNNEL) {
839 struct ip_tunnel *t = netdev_priv(dev);
841 t->parms.i_flags = p.i_flags;
842 t->parms.o_flags = p.o_flags;
844 if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
845 ipgre_link_update(dev, true);
848 p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
849 p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
851 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
857 /* Nice toy. Unfortunately, useless in real life :-)
858 It allows to construct virtual multiprotocol broadcast "LAN"
859 over the Internet, provided multicast routing is tuned.
862 I have no idea was this bicycle invented before me,
863 so that I had to set ARPHRD_IPGRE to a random value.
864 I have an impression, that Cisco could make something similar,
865 but this feature is apparently missing in IOS<=11.2(8).
867 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
868 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
870 ping -t 255 224.66.66.66
872 If nobody answers, mbone does not work.
874 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
875 ip addr add 10.66.66.<somewhat>/24 dev Universe
877 ifconfig Universe add fe80::<Your_real_addr>/10
878 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
881 ftp fec0:6666:6666::193.233.7.65
884 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
886 const void *daddr, const void *saddr, unsigned int len)
888 struct ip_tunnel *t = netdev_priv(dev);
890 struct gre_base_hdr *greh;
892 iph = skb_push(skb, t->hlen + sizeof(*iph));
893 greh = (struct gre_base_hdr *)(iph+1);
894 greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
895 greh->protocol = htons(type);
897 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
899 /* Set the source hardware address. */
901 memcpy(&iph->saddr, saddr, 4);
903 memcpy(&iph->daddr, daddr, 4);
905 return t->hlen + sizeof(*iph);
907 return -(t->hlen + sizeof(*iph));
910 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
912 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
913 memcpy(haddr, &iph->saddr, 4);
917 static const struct header_ops ipgre_header_ops = {
918 .create = ipgre_header,
919 .parse = ipgre_header_parse,
922 #ifdef CONFIG_NET_IPGRE_BROADCAST
923 static int ipgre_open(struct net_device *dev)
925 struct ip_tunnel *t = netdev_priv(dev);
927 if (ipv4_is_multicast(t->parms.iph.daddr)) {
931 rt = ip_route_output_gre(t->net, &fl4,
935 RT_TOS(t->parms.iph.tos),
938 return -EADDRNOTAVAIL;
941 if (!__in_dev_get_rtnl(dev))
942 return -EADDRNOTAVAIL;
943 t->mlink = dev->ifindex;
944 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
949 static int ipgre_close(struct net_device *dev)
951 struct ip_tunnel *t = netdev_priv(dev);
953 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
954 struct in_device *in_dev;
955 in_dev = inetdev_by_index(t->net, t->mlink);
957 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
963 static const struct net_device_ops ipgre_netdev_ops = {
964 .ndo_init = ipgre_tunnel_init,
965 .ndo_uninit = ip_tunnel_uninit,
966 #ifdef CONFIG_NET_IPGRE_BROADCAST
967 .ndo_open = ipgre_open,
968 .ndo_stop = ipgre_close,
970 .ndo_start_xmit = ipgre_xmit,
971 .ndo_do_ioctl = ipgre_tunnel_ioctl,
972 .ndo_change_mtu = ip_tunnel_change_mtu,
973 .ndo_get_stats64 = ip_tunnel_get_stats64,
974 .ndo_get_iflink = ip_tunnel_get_iflink,
977 #define GRE_FEATURES (NETIF_F_SG | \
982 static void ipgre_tunnel_setup(struct net_device *dev)
984 dev->netdev_ops = &ipgre_netdev_ops;
985 dev->type = ARPHRD_IPGRE;
986 ip_tunnel_setup(dev, ipgre_net_id);
989 static void __gre_tunnel_init(struct net_device *dev)
991 struct ip_tunnel *tunnel;
993 tunnel = netdev_priv(dev);
994 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
995 tunnel->parms.iph.protocol = IPPROTO_GRE;
997 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
999 dev->features |= GRE_FEATURES;
1000 dev->hw_features |= GRE_FEATURES;
1002 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
1003 /* TCP offload with GRE SEQ is not supported, nor
1004 * can we support 2 levels of outer headers requiring
1007 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
1008 (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
1009 dev->features |= NETIF_F_GSO_SOFTWARE;
1010 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1013 /* Can use a lockless transmit, unless we generate
1016 dev->features |= NETIF_F_LLTX;
1020 static int ipgre_tunnel_init(struct net_device *dev)
1022 struct ip_tunnel *tunnel = netdev_priv(dev);
1023 struct iphdr *iph = &tunnel->parms.iph;
1025 __gre_tunnel_init(dev);
1027 memcpy(dev->dev_addr, &iph->saddr, 4);
1028 memcpy(dev->broadcast, &iph->daddr, 4);
1030 dev->flags = IFF_NOARP;
1031 netif_keep_dst(dev);
1034 if (iph->daddr && !tunnel->collect_md) {
1035 #ifdef CONFIG_NET_IPGRE_BROADCAST
1036 if (ipv4_is_multicast(iph->daddr)) {
1039 dev->flags = IFF_BROADCAST;
1040 dev->header_ops = &ipgre_header_ops;
1043 } else if (!tunnel->collect_md) {
1044 dev->header_ops = &ipgre_header_ops;
1047 return ip_tunnel_init(dev);
1050 static const struct gre_protocol ipgre_protocol = {
1052 .err_handler = gre_err,
1055 static int __net_init ipgre_init_net(struct net *net)
1057 return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
1060 static void __net_exit ipgre_exit_batch_net(struct list_head *list_net)
1062 ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops);
1065 static struct pernet_operations ipgre_net_ops = {
1066 .init = ipgre_init_net,
1067 .exit_batch = ipgre_exit_batch_net,
1068 .id = &ipgre_net_id,
1069 .size = sizeof(struct ip_tunnel_net),
1072 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1073 struct netlink_ext_ack *extack)
1081 if (data[IFLA_GRE_IFLAGS])
1082 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1083 if (data[IFLA_GRE_OFLAGS])
1084 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1085 if (flags & (GRE_VERSION|GRE_ROUTING))
1088 if (data[IFLA_GRE_COLLECT_METADATA] &&
1089 data[IFLA_GRE_ENCAP_TYPE] &&
1090 nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1096 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1097 struct netlink_ext_ack *extack)
1101 if (tb[IFLA_ADDRESS]) {
1102 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1104 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1105 return -EADDRNOTAVAIL;
1111 if (data[IFLA_GRE_REMOTE]) {
1112 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1118 return ipgre_tunnel_validate(tb, data, extack);
1121 static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1122 struct netlink_ext_ack *extack)
1130 ret = ipgre_tap_validate(tb, data, extack);
1134 /* ERSPAN should only have GRE sequence and key flag */
1135 if (data[IFLA_GRE_OFLAGS])
1136 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1137 if (data[IFLA_GRE_IFLAGS])
1138 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1139 if (!data[IFLA_GRE_COLLECT_METADATA] &&
1140 flags != (GRE_SEQ | GRE_KEY))
1143 /* ERSPAN Session ID only has 10-bit. Since we reuse
1144 * 32-bit key field as ID, check it's range.
1146 if (data[IFLA_GRE_IKEY] &&
1147 (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1150 if (data[IFLA_GRE_OKEY] &&
1151 (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1157 static int ipgre_netlink_parms(struct net_device *dev,
1158 struct nlattr *data[],
1159 struct nlattr *tb[],
1160 struct ip_tunnel_parm *parms,
1163 struct ip_tunnel *t = netdev_priv(dev);
1165 memset(parms, 0, sizeof(*parms));
1167 parms->iph.protocol = IPPROTO_GRE;
1172 if (data[IFLA_GRE_LINK])
1173 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1175 if (data[IFLA_GRE_IFLAGS])
1176 parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
1178 if (data[IFLA_GRE_OFLAGS])
1179 parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
1181 if (data[IFLA_GRE_IKEY])
1182 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1184 if (data[IFLA_GRE_OKEY])
1185 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1187 if (data[IFLA_GRE_LOCAL])
1188 parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1190 if (data[IFLA_GRE_REMOTE])
1191 parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1193 if (data[IFLA_GRE_TTL])
1194 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1196 if (data[IFLA_GRE_TOS])
1197 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1199 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1202 parms->iph.frag_off = htons(IP_DF);
1205 if (data[IFLA_GRE_COLLECT_METADATA]) {
1206 t->collect_md = true;
1207 if (dev->type == ARPHRD_IPGRE)
1208 dev->type = ARPHRD_NONE;
1211 if (data[IFLA_GRE_IGNORE_DF]) {
1212 if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1213 && (parms->iph.frag_off & htons(IP_DF)))
1215 t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1218 if (data[IFLA_GRE_FWMARK])
1219 *fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1221 if (data[IFLA_GRE_ERSPAN_VER]) {
1222 t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1224 if (t->erspan_ver != 1 && t->erspan_ver != 2)
1228 if (t->erspan_ver == 1) {
1229 if (data[IFLA_GRE_ERSPAN_INDEX]) {
1230 t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1231 if (t->index & ~INDEX_MASK)
1234 } else if (t->erspan_ver == 2) {
1235 if (data[IFLA_GRE_ERSPAN_DIR]) {
1236 t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1237 if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1240 if (data[IFLA_GRE_ERSPAN_HWID]) {
1241 t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1242 if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1250 /* This function returns true when ENCAP attributes are present in the nl msg */
1251 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1252 struct ip_tunnel_encap *ipencap)
1256 memset(ipencap, 0, sizeof(*ipencap));
1261 if (data[IFLA_GRE_ENCAP_TYPE]) {
1263 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1266 if (data[IFLA_GRE_ENCAP_FLAGS]) {
1268 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1271 if (data[IFLA_GRE_ENCAP_SPORT]) {
1273 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1276 if (data[IFLA_GRE_ENCAP_DPORT]) {
1278 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1284 static int gre_tap_init(struct net_device *dev)
1286 __gre_tunnel_init(dev);
1287 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1288 netif_keep_dst(dev);
1290 return ip_tunnel_init(dev);
1293 static const struct net_device_ops gre_tap_netdev_ops = {
1294 .ndo_init = gre_tap_init,
1295 .ndo_uninit = ip_tunnel_uninit,
1296 .ndo_start_xmit = gre_tap_xmit,
1297 .ndo_set_mac_address = eth_mac_addr,
1298 .ndo_validate_addr = eth_validate_addr,
1299 .ndo_change_mtu = ip_tunnel_change_mtu,
1300 .ndo_get_stats64 = ip_tunnel_get_stats64,
1301 .ndo_get_iflink = ip_tunnel_get_iflink,
1302 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
1305 static int erspan_tunnel_init(struct net_device *dev)
1307 struct ip_tunnel *tunnel = netdev_priv(dev);
1309 tunnel->tun_hlen = 8;
1310 tunnel->parms.iph.protocol = IPPROTO_GRE;
1311 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1312 erspan_hdr_len(tunnel->erspan_ver);
1314 dev->features |= GRE_FEATURES;
1315 dev->hw_features |= GRE_FEATURES;
1316 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1317 netif_keep_dst(dev);
1319 return ip_tunnel_init(dev);
1322 static const struct net_device_ops erspan_netdev_ops = {
1323 .ndo_init = erspan_tunnel_init,
1324 .ndo_uninit = ip_tunnel_uninit,
1325 .ndo_start_xmit = erspan_xmit,
1326 .ndo_set_mac_address = eth_mac_addr,
1327 .ndo_validate_addr = eth_validate_addr,
1328 .ndo_change_mtu = ip_tunnel_change_mtu,
1329 .ndo_get_stats64 = ip_tunnel_get_stats64,
1330 .ndo_get_iflink = ip_tunnel_get_iflink,
1331 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
1334 static void ipgre_tap_setup(struct net_device *dev)
1338 dev->netdev_ops = &gre_tap_netdev_ops;
1339 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1340 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1341 ip_tunnel_setup(dev, gre_tap_net_id);
1344 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1345 struct nlattr *tb[], struct nlattr *data[],
1346 struct netlink_ext_ack *extack)
1348 struct ip_tunnel_parm p;
1349 struct ip_tunnel_encap ipencap;
1353 if (ipgre_netlink_encap_parms(data, &ipencap)) {
1354 struct ip_tunnel *t = netdev_priv(dev);
1355 err = ip_tunnel_encap_setup(t, &ipencap);
1361 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1364 return ip_tunnel_newlink(dev, tb, &p, fwmark);
1367 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1368 struct nlattr *data[],
1369 struct netlink_ext_ack *extack)
1371 struct ip_tunnel *t = netdev_priv(dev);
1372 struct ip_tunnel_encap ipencap;
1373 __u32 fwmark = t->fwmark;
1374 struct ip_tunnel_parm p;
1377 if (ipgre_netlink_encap_parms(data, &ipencap)) {
1378 err = ip_tunnel_encap_setup(t, &ipencap);
1384 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1388 err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1392 t->parms.i_flags = p.i_flags;
1393 t->parms.o_flags = p.o_flags;
1395 if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
1396 ipgre_link_update(dev, !tb[IFLA_MTU]);
1401 static size_t ipgre_get_size(const struct net_device *dev)
1406 /* IFLA_GRE_IFLAGS */
1408 /* IFLA_GRE_OFLAGS */
1414 /* IFLA_GRE_LOCAL */
1416 /* IFLA_GRE_REMOTE */
1422 /* IFLA_GRE_PMTUDISC */
1424 /* IFLA_GRE_ENCAP_TYPE */
1426 /* IFLA_GRE_ENCAP_FLAGS */
1428 /* IFLA_GRE_ENCAP_SPORT */
1430 /* IFLA_GRE_ENCAP_DPORT */
1432 /* IFLA_GRE_COLLECT_METADATA */
1434 /* IFLA_GRE_IGNORE_DF */
1436 /* IFLA_GRE_FWMARK */
1438 /* IFLA_GRE_ERSPAN_INDEX */
1440 /* IFLA_GRE_ERSPAN_VER */
1442 /* IFLA_GRE_ERSPAN_DIR */
1444 /* IFLA_GRE_ERSPAN_HWID */
1449 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1451 struct ip_tunnel *t = netdev_priv(dev);
1452 struct ip_tunnel_parm *p = &t->parms;
1454 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1455 nla_put_be16(skb, IFLA_GRE_IFLAGS,
1456 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1457 nla_put_be16(skb, IFLA_GRE_OFLAGS,
1458 gre_tnl_flags_to_gre_flags(p->o_flags)) ||
1459 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1460 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1461 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1462 nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1463 nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1464 nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1465 nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1466 !!(p->iph.frag_off & htons(IP_DF))) ||
1467 nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1468 goto nla_put_failure;
1470 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1472 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1474 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1476 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1478 goto nla_put_failure;
1480 if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1481 goto nla_put_failure;
1483 if (t->collect_md) {
1484 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1485 goto nla_put_failure;
1488 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1489 goto nla_put_failure;
1491 if (t->erspan_ver == 1) {
1492 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1493 goto nla_put_failure;
1494 } else if (t->erspan_ver == 2) {
1495 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1496 goto nla_put_failure;
1497 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1498 goto nla_put_failure;
1507 static void erspan_setup(struct net_device *dev)
1509 struct ip_tunnel *t = netdev_priv(dev);
1512 dev->netdev_ops = &erspan_netdev_ops;
1513 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1514 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1515 ip_tunnel_setup(dev, erspan_net_id);
1519 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1520 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1521 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1522 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1523 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1524 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1525 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1526 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1527 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1528 [IFLA_GRE_TOS] = { .type = NLA_U8 },
1529 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
1530 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
1531 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
1532 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
1533 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
1534 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
1535 [IFLA_GRE_IGNORE_DF] = { .type = NLA_U8 },
1536 [IFLA_GRE_FWMARK] = { .type = NLA_U32 },
1537 [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 },
1538 [IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 },
1539 [IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 },
1540 [IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 },
1543 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1545 .maxtype = IFLA_GRE_MAX,
1546 .policy = ipgre_policy,
1547 .priv_size = sizeof(struct ip_tunnel),
1548 .setup = ipgre_tunnel_setup,
1549 .validate = ipgre_tunnel_validate,
1550 .newlink = ipgre_newlink,
1551 .changelink = ipgre_changelink,
1552 .dellink = ip_tunnel_dellink,
1553 .get_size = ipgre_get_size,
1554 .fill_info = ipgre_fill_info,
1555 .get_link_net = ip_tunnel_get_link_net,
1558 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1560 .maxtype = IFLA_GRE_MAX,
1561 .policy = ipgre_policy,
1562 .priv_size = sizeof(struct ip_tunnel),
1563 .setup = ipgre_tap_setup,
1564 .validate = ipgre_tap_validate,
1565 .newlink = ipgre_newlink,
1566 .changelink = ipgre_changelink,
1567 .dellink = ip_tunnel_dellink,
1568 .get_size = ipgre_get_size,
1569 .fill_info = ipgre_fill_info,
1570 .get_link_net = ip_tunnel_get_link_net,
1573 static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1575 .maxtype = IFLA_GRE_MAX,
1576 .policy = ipgre_policy,
1577 .priv_size = sizeof(struct ip_tunnel),
1578 .setup = erspan_setup,
1579 .validate = erspan_validate,
1580 .newlink = ipgre_newlink,
1581 .changelink = ipgre_changelink,
1582 .dellink = ip_tunnel_dellink,
1583 .get_size = ipgre_get_size,
1584 .fill_info = ipgre_fill_info,
1585 .get_link_net = ip_tunnel_get_link_net,
1588 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1589 u8 name_assign_type)
1591 struct nlattr *tb[IFLA_MAX + 1];
1592 struct net_device *dev;
1593 LIST_HEAD(list_kill);
1594 struct ip_tunnel *t;
1597 memset(&tb, 0, sizeof(tb));
1599 dev = rtnl_create_link(net, name, name_assign_type,
1600 &ipgre_tap_ops, tb, NULL);
1604 /* Configure flow based GRE device. */
1605 t = netdev_priv(dev);
1606 t->collect_md = true;
1608 err = ipgre_newlink(net, dev, tb, NULL, NULL);
1611 return ERR_PTR(err);
1614 /* openvswitch users expect packet sizes to be unrestricted,
1615 * so set the largest MTU we can.
1617 err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1621 err = rtnl_configure_link(dev, NULL);
1627 ip_tunnel_dellink(dev, &list_kill);
1628 unregister_netdevice_many(&list_kill);
1629 return ERR_PTR(err);
1631 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1633 static int __net_init ipgre_tap_init_net(struct net *net)
1635 return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1638 static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net)
1640 ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops);
1643 static struct pernet_operations ipgre_tap_net_ops = {
1644 .init = ipgre_tap_init_net,
1645 .exit_batch = ipgre_tap_exit_batch_net,
1646 .id = &gre_tap_net_id,
1647 .size = sizeof(struct ip_tunnel_net),
1650 static int __net_init erspan_init_net(struct net *net)
1652 return ip_tunnel_init_net(net, erspan_net_id,
1653 &erspan_link_ops, "erspan0");
1656 static void __net_exit erspan_exit_batch_net(struct list_head *net_list)
1658 ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops);
1661 static struct pernet_operations erspan_net_ops = {
1662 .init = erspan_init_net,
1663 .exit_batch = erspan_exit_batch_net,
1664 .id = &erspan_net_id,
1665 .size = sizeof(struct ip_tunnel_net),
1668 static int __init ipgre_init(void)
1672 pr_info("GRE over IPv4 tunneling driver\n");
1674 err = register_pernet_device(&ipgre_net_ops);
1678 err = register_pernet_device(&ipgre_tap_net_ops);
1680 goto pnet_tap_failed;
1682 err = register_pernet_device(&erspan_net_ops);
1684 goto pnet_erspan_failed;
1686 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1688 pr_info("%s: can't add protocol\n", __func__);
1689 goto add_proto_failed;
1692 err = rtnl_link_register(&ipgre_link_ops);
1694 goto rtnl_link_failed;
1696 err = rtnl_link_register(&ipgre_tap_ops);
1698 goto tap_ops_failed;
1700 err = rtnl_link_register(&erspan_link_ops);
1702 goto erspan_link_failed;
1707 rtnl_link_unregister(&ipgre_tap_ops);
1709 rtnl_link_unregister(&ipgre_link_ops);
1711 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1713 unregister_pernet_device(&erspan_net_ops);
1715 unregister_pernet_device(&ipgre_tap_net_ops);
1717 unregister_pernet_device(&ipgre_net_ops);
1721 static void __exit ipgre_fini(void)
1723 rtnl_link_unregister(&ipgre_tap_ops);
1724 rtnl_link_unregister(&ipgre_link_ops);
1725 rtnl_link_unregister(&erspan_link_ops);
1726 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1727 unregister_pernet_device(&ipgre_tap_net_ops);
1728 unregister_pernet_device(&ipgre_net_ops);
1729 unregister_pernet_device(&erspan_net_ops);
1732 module_init(ipgre_init);
1733 module_exit(ipgre_fini);
1734 MODULE_LICENSE("GPL");
1735 MODULE_ALIAS_RTNL_LINK("gre");
1736 MODULE_ALIAS_RTNL_LINK("gretap");
1737 MODULE_ALIAS_RTNL_LINK("erspan");
1738 MODULE_ALIAS_NETDEV("gre0");
1739 MODULE_ALIAS_NETDEV("gretap0");
1740 MODULE_ALIAS_NETDEV("erspan0");