2 * Linux NET3: GRE over IP protocol decoder.
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <asm/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/mroute.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
39 #include <net/protocol.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
51 #if IS_ENABLED(CONFIG_IPV6)
53 #include <net/ip6_fib.h>
54 #include <net/ip6_route.h>
61 1. The most important issue is detecting local dead loops.
62 They would cause complete host lockup in transmit, which
63 would be "resolved" by stack overflow or, if queueing is enabled,
64 with infinite looping in net_bh.
66 We cannot track such dead loops during route installation,
67 it is infeasible task. The most general solutions would be
68 to keep skb->encapsulation counter (sort of local ttl),
69 and silently drop packet when it expires. It is a good
70 solution, but it supposes maintaining new variable in ALL
71 skb, even if no tunneling is used.
73 Current solution: xmit_recursion breaks dead loops. This is a percpu
74 counter, since when we enter the first ndo_xmit(), cpu migration is
75 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
77 2. Networking dead loops would not kill routers, but would really
78 kill network. IP hop limit plays role of "t->recursion" in this case,
79 if we copy it from packet being encapsulated to upper header.
80 It is very good solution, but it introduces two problems:
82 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
83 do not work over tunnels.
84 - traceroute does not work. I planned to relay ICMP from tunnel,
85 so that this problem would be solved and traceroute output
86 would even more informative. This idea appeared to be wrong:
87 only Linux complies to rfc1812 now (yes, guys, Linux is the only
88 true router now :-)), all routers (at least, in neighbourhood of mine)
89 return only 8 bytes of payload. It is the end.
91 Hence, if we want that OSPF worked or traceroute said something reasonable,
92 we should search for another solution.
94 One of them is to parse packet trying to detect inner encapsulation
95 made by our node. It is difficult or even impossible, especially,
96 taking into account fragmentation. TO be short, ttl is not solution at all.
98 Current solution: The solution was UNEXPECTEDLY SIMPLE.
99 We force DF flag on tunnels with preconfigured hop limit,
100 that is ALL. :-) Well, it does not remove the problem completely,
101 but exponential growth of network traffic is changed to linear
102 (branches, that exceed pmtu are pruned) and tunnel mtu
103 rapidly degrades to value <68, where looping stops.
104 Yes, it is not good if there exists a router in the loop,
105 which does not force DF, even when encapsulating packets have DF set.
106 But it is not our problem! Nobody could accuse us, we made
107 all that we could make. Even if it is your gated who injected
108 fatal route to network, even if it were you who configured
109 fatal static route: you are innocent. :-)
113 3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain
114 practically identical code. It would be good to glue them
115 together, but it is not very evident, how to make them modular.
116 sit is integral part of IPv6, ipip and gre are naturally modular.
117 We could extract common parts (hash table, ioctl etc)
118 to a separate module (ip_tunnel.c).
123 static bool log_ecn_error = true;
124 module_param(log_ecn_error, bool, 0644);
125 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
127 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
128 static int ipgre_tunnel_init(struct net_device *dev);
129 static void ipgre_tunnel_setup(struct net_device *dev);
130 static int ipgre_tunnel_bind_dev(struct net_device *dev);
132 /* Fallback tunnel: no source, no destination, no key, no options */
136 static int ipgre_net_id __read_mostly;
138 struct ip_tunnel __rcu *tunnels[4][HASH_SIZE];
140 struct net_device *fb_tunnel_dev;
143 /* Tunnel hash table */
153 We require exact key match i.e. if a key is present in packet
154 it will match only tunnel with the same key; if it is not present,
155 it will match only keyless tunnel.
157 All keysless packets, if not matched configured keyless tunnels
158 will match fallback tunnel.
161 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
163 #define tunnels_r_l tunnels[3]
164 #define tunnels_r tunnels[2]
165 #define tunnels_l tunnels[1]
166 #define tunnels_wc tunnels[0]
168 static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev,
169 struct rtnl_link_stats64 *tot)
173 for_each_possible_cpu(i) {
174 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
175 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
179 start = u64_stats_fetch_begin_bh(&tstats->syncp);
180 rx_packets = tstats->rx_packets;
181 tx_packets = tstats->tx_packets;
182 rx_bytes = tstats->rx_bytes;
183 tx_bytes = tstats->tx_bytes;
184 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
186 tot->rx_packets += rx_packets;
187 tot->tx_packets += tx_packets;
188 tot->rx_bytes += rx_bytes;
189 tot->tx_bytes += tx_bytes;
192 tot->multicast = dev->stats.multicast;
193 tot->rx_crc_errors = dev->stats.rx_crc_errors;
194 tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
195 tot->rx_length_errors = dev->stats.rx_length_errors;
196 tot->rx_frame_errors = dev->stats.rx_frame_errors;
197 tot->rx_errors = dev->stats.rx_errors;
199 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
200 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
201 tot->tx_dropped = dev->stats.tx_dropped;
202 tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
203 tot->tx_errors = dev->stats.tx_errors;
208 /* Does key in tunnel parameters match packet */
209 static bool ipgre_key_match(const struct ip_tunnel_parm *p,
210 __be16 flags, __be32 key)
212 if (p->i_flags & GRE_KEY) {
214 return key == p->i_key;
216 return false; /* key expected, none present */
218 return !(flags & GRE_KEY);
221 /* Given src, dst and key, find appropriate for input tunnel. */
223 static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
224 __be32 remote, __be32 local,
225 __be16 flags, __be32 key,
228 struct net *net = dev_net(dev);
229 int link = dev->ifindex;
230 unsigned int h0 = HASH(remote);
231 unsigned int h1 = HASH(key);
232 struct ip_tunnel *t, *cand = NULL;
233 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
234 int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
235 ARPHRD_ETHER : ARPHRD_IPGRE;
236 int score, cand_score = 4;
238 for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
239 if (local != t->parms.iph.saddr ||
240 remote != t->parms.iph.daddr ||
241 !(t->dev->flags & IFF_UP))
244 if (!ipgre_key_match(&t->parms, flags, key))
247 if (t->dev->type != ARPHRD_IPGRE &&
248 t->dev->type != dev_type)
252 if (t->parms.link != link)
254 if (t->dev->type != dev_type)
259 if (score < cand_score) {
265 for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) {
266 if (remote != t->parms.iph.daddr ||
267 !(t->dev->flags & IFF_UP))
270 if (!ipgre_key_match(&t->parms, flags, key))
273 if (t->dev->type != ARPHRD_IPGRE &&
274 t->dev->type != dev_type)
278 if (t->parms.link != link)
280 if (t->dev->type != dev_type)
285 if (score < cand_score) {
291 for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) {
292 if ((local != t->parms.iph.saddr &&
293 (local != t->parms.iph.daddr ||
294 !ipv4_is_multicast(local))) ||
295 !(t->dev->flags & IFF_UP))
298 if (!ipgre_key_match(&t->parms, flags, key))
301 if (t->dev->type != ARPHRD_IPGRE &&
302 t->dev->type != dev_type)
306 if (t->parms.link != link)
308 if (t->dev->type != dev_type)
313 if (score < cand_score) {
319 for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) {
320 if (t->parms.i_key != key ||
321 !(t->dev->flags & IFF_UP))
324 if (t->dev->type != ARPHRD_IPGRE &&
325 t->dev->type != dev_type)
329 if (t->parms.link != link)
331 if (t->dev->type != dev_type)
336 if (score < cand_score) {
345 dev = ign->fb_tunnel_dev;
346 if (dev->flags & IFF_UP)
347 return netdev_priv(dev);
352 static struct ip_tunnel __rcu **__ipgre_bucket(struct ipgre_net *ign,
353 struct ip_tunnel_parm *parms)
355 __be32 remote = parms->iph.daddr;
356 __be32 local = parms->iph.saddr;
357 __be32 key = parms->i_key;
358 unsigned int h = HASH(key);
363 if (remote && !ipv4_is_multicast(remote)) {
368 return &ign->tunnels[prio][h];
371 static inline struct ip_tunnel __rcu **ipgre_bucket(struct ipgre_net *ign,
374 return __ipgre_bucket(ign, &t->parms);
377 static void ipgre_tunnel_link(struct ipgre_net *ign, struct ip_tunnel *t)
379 struct ip_tunnel __rcu **tp = ipgre_bucket(ign, t);
381 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
382 rcu_assign_pointer(*tp, t);
385 static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t)
387 struct ip_tunnel __rcu **tp;
388 struct ip_tunnel *iter;
390 for (tp = ipgre_bucket(ign, t);
391 (iter = rtnl_dereference(*tp)) != NULL;
394 rcu_assign_pointer(*tp, t->next);
400 static struct ip_tunnel *ipgre_tunnel_find(struct net *net,
401 struct ip_tunnel_parm *parms,
404 __be32 remote = parms->iph.daddr;
405 __be32 local = parms->iph.saddr;
406 __be32 key = parms->i_key;
407 int link = parms->link;
409 struct ip_tunnel __rcu **tp;
410 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
412 for (tp = __ipgre_bucket(ign, parms);
413 (t = rtnl_dereference(*tp)) != NULL;
415 if (local == t->parms.iph.saddr &&
416 remote == t->parms.iph.daddr &&
417 key == t->parms.i_key &&
418 link == t->parms.link &&
419 type == t->dev->type)
425 static struct ip_tunnel *ipgre_tunnel_locate(struct net *net,
426 struct ip_tunnel_parm *parms, int create)
428 struct ip_tunnel *t, *nt;
429 struct net_device *dev;
431 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
433 t = ipgre_tunnel_find(net, parms, ARPHRD_IPGRE);
438 strlcpy(name, parms->name, IFNAMSIZ);
440 strcpy(name, "gre%d");
442 dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup);
446 dev_net_set(dev, net);
448 nt = netdev_priv(dev);
450 dev->rtnl_link_ops = &ipgre_link_ops;
452 dev->mtu = ipgre_tunnel_bind_dev(dev);
454 if (register_netdevice(dev) < 0)
457 /* Can use a lockless transmit, unless we generate output sequences */
458 if (!(nt->parms.o_flags & GRE_SEQ))
459 dev->features |= NETIF_F_LLTX;
462 ipgre_tunnel_link(ign, nt);
470 static void ipgre_tunnel_uninit(struct net_device *dev)
472 struct net *net = dev_net(dev);
473 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
475 ipgre_tunnel_unlink(ign, netdev_priv(dev));
480 static void ipgre_err(struct sk_buff *skb, u32 info)
483 /* All the routers (except for Linux) return only
484 8 bytes of packet payload. It means, that precise relaying of
485 ICMP in the real Internet is absolutely infeasible.
487 Moreover, Cisco "wise men" put GRE key to the third word
488 in GRE header. It makes impossible maintaining even soft state for keyed
489 GRE tunnels with enabled checksum. Tell them "thank you".
491 Well, I wonder, rfc1812 was written by Cisco employee,
492 what the hell these idiots break standards established
496 const struct iphdr *iph = (const struct iphdr *)skb->data;
497 __be16 *p = (__be16 *)(skb->data+(iph->ihl<<2));
498 int grehlen = (iph->ihl<<2) + 4;
499 const int type = icmp_hdr(skb)->type;
500 const int code = icmp_hdr(skb)->code;
506 if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
507 if (flags&(GRE_VERSION|GRE_ROUTING))
516 /* If only 8 bytes returned, keyed message will be dropped here */
517 if (skb_headlen(skb) < grehlen)
521 key = *(((__be32 *)p) + (grehlen / 4) - 1);
525 case ICMP_PARAMETERPROB:
528 case ICMP_DEST_UNREACH:
531 case ICMP_PORT_UNREACH:
532 /* Impossible event. */
535 /* All others are translated to HOST_UNREACH.
536 rfc2003 contains "deep thoughts" about NET_UNREACH,
537 I believe they are just ether pollution. --ANK
542 case ICMP_TIME_EXCEEDED:
543 if (code != ICMP_EXC_TTL)
551 t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr,
557 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
558 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
559 t->parms.link, 0, IPPROTO_GRE, 0);
562 if (type == ICMP_REDIRECT) {
563 ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
567 if (t->parms.iph.daddr == 0 ||
568 ipv4_is_multicast(t->parms.iph.daddr))
571 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
574 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
578 t->err_time = jiffies;
582 ipgre_ecn_encapsulate(u8 tos, const struct iphdr *old_iph, struct sk_buff *skb)
585 if (skb->protocol == htons(ETH_P_IP))
586 inner = old_iph->tos;
587 else if (skb->protocol == htons(ETH_P_IPV6))
588 inner = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
589 return INET_ECN_encapsulate(tos, inner);
592 static int ipgre_rcv(struct sk_buff *skb)
594 const struct iphdr *iph;
600 struct ip_tunnel *tunnel;
605 if (!pskb_may_pull(skb, 16))
610 flags = *(__be16 *)h;
612 if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
613 /* - Version must be 0.
614 - We do not support routing headers.
616 if (flags&(GRE_VERSION|GRE_ROUTING))
619 if (flags&GRE_CSUM) {
620 switch (skb->ip_summed) {
621 case CHECKSUM_COMPLETE:
622 csum = csum_fold(skb->csum);
628 csum = __skb_checksum_complete(skb);
629 skb->ip_summed = CHECKSUM_COMPLETE;
634 key = *(__be32 *)(h + offset);
638 seqno = ntohl(*(__be32 *)(h + offset));
643 gre_proto = *(__be16 *)(h + 2);
645 tunnel = ipgre_tunnel_lookup(skb->dev,
646 iph->saddr, iph->daddr, flags, key,
649 struct pcpu_tstats *tstats;
653 skb->protocol = gre_proto;
654 /* WCCP version 1 and 2 protocol decoding.
655 * - Change protocol to IP
656 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
658 if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
659 skb->protocol = htons(ETH_P_IP);
660 if ((*(h + offset) & 0xF0) != 0x40)
664 skb->mac_header = skb->network_header;
665 __pskb_pull(skb, offset);
666 skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
667 skb->pkt_type = PACKET_HOST;
668 #ifdef CONFIG_NET_IPGRE_BROADCAST
669 if (ipv4_is_multicast(iph->daddr)) {
670 /* Looped back packet, drop it! */
671 if (rt_is_output_route(skb_rtable(skb)))
673 tunnel->dev->stats.multicast++;
674 skb->pkt_type = PACKET_BROADCAST;
678 if (((flags&GRE_CSUM) && csum) ||
679 (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
680 tunnel->dev->stats.rx_crc_errors++;
681 tunnel->dev->stats.rx_errors++;
684 if (tunnel->parms.i_flags&GRE_SEQ) {
685 if (!(flags&GRE_SEQ) ||
686 (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
687 tunnel->dev->stats.rx_fifo_errors++;
688 tunnel->dev->stats.rx_errors++;
691 tunnel->i_seqno = seqno + 1;
694 /* Warning: All skb pointers will be invalidated! */
695 if (tunnel->dev->type == ARPHRD_ETHER) {
696 if (!pskb_may_pull(skb, ETH_HLEN)) {
697 tunnel->dev->stats.rx_length_errors++;
698 tunnel->dev->stats.rx_errors++;
703 skb->protocol = eth_type_trans(skb, tunnel->dev);
704 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
707 __skb_tunnel_rx(skb, tunnel->dev);
709 skb_reset_network_header(skb);
710 err = IP_ECN_decapsulate(iph, skb);
713 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
714 &iph->saddr, iph->tos);
716 ++tunnel->dev->stats.rx_frame_errors;
717 ++tunnel->dev->stats.rx_errors;
722 tstats = this_cpu_ptr(tunnel->dev->tstats);
723 u64_stats_update_begin(&tstats->syncp);
724 tstats->rx_packets++;
725 tstats->rx_bytes += skb->len;
726 u64_stats_update_end(&tstats->syncp);
728 gro_cells_receive(&tunnel->gro_cells, skb);
731 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
738 static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
740 struct ip_tunnel *tunnel = netdev_priv(dev);
741 const struct iphdr *old_iph = ip_hdr(skb);
742 const struct iphdr *tiph;
746 struct rtable *rt; /* Route to the other host */
747 struct net_device *tdev; /* Device to other host */
748 struct iphdr *iph; /* Our new IP header */
749 unsigned int max_headroom; /* The extra header space needed */
754 if (skb->ip_summed == CHECKSUM_PARTIAL &&
755 skb_checksum_help(skb))
758 if (dev->type == ARPHRD_ETHER)
759 IPCB(skb)->flags = 0;
761 if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
763 tiph = (const struct iphdr *)skb->data;
765 gre_hlen = tunnel->hlen;
766 tiph = &tunnel->parms.iph;
769 if ((dst = tiph->daddr) == 0) {
772 if (skb_dst(skb) == NULL) {
773 dev->stats.tx_fifo_errors++;
777 if (skb->protocol == htons(ETH_P_IP)) {
778 rt = skb_rtable(skb);
779 dst = rt_nexthop(rt, old_iph->daddr);
781 #if IS_ENABLED(CONFIG_IPV6)
782 else if (skb->protocol == htons(ETH_P_IPV6)) {
783 const struct in6_addr *addr6;
784 struct neighbour *neigh;
785 bool do_tx_error_icmp;
788 neigh = dst_neigh_lookup(skb_dst(skb), &ipv6_hdr(skb)->daddr);
792 addr6 = (const struct in6_addr *)&neigh->primary_key;
793 addr_type = ipv6_addr_type(addr6);
795 if (addr_type == IPV6_ADDR_ANY) {
796 addr6 = &ipv6_hdr(skb)->daddr;
797 addr_type = ipv6_addr_type(addr6);
800 if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
801 do_tx_error_icmp = true;
803 do_tx_error_icmp = false;
804 dst = addr6->s6_addr32[3];
806 neigh_release(neigh);
807 if (do_tx_error_icmp)
818 if (skb->protocol == htons(ETH_P_IP))
820 else if (skb->protocol == htons(ETH_P_IPV6))
821 tos = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
824 rt = ip_route_output_gre(dev_net(dev), &fl4, dst, tiph->saddr,
825 tunnel->parms.o_key, RT_TOS(tos),
828 dev->stats.tx_carrier_errors++;
835 dev->stats.collisions++;
841 mtu = dst_mtu(&rt->dst) - dev->hard_header_len - tunnel->hlen;
843 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
846 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
848 if (skb->protocol == htons(ETH_P_IP)) {
849 df |= (old_iph->frag_off&htons(IP_DF));
851 if ((old_iph->frag_off&htons(IP_DF)) &&
852 mtu < ntohs(old_iph->tot_len)) {
853 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
858 #if IS_ENABLED(CONFIG_IPV6)
859 else if (skb->protocol == htons(ETH_P_IPV6)) {
860 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
862 if (rt6 && mtu < dst_mtu(skb_dst(skb)) && mtu >= IPV6_MIN_MTU) {
863 if ((tunnel->parms.iph.daddr &&
864 !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
865 rt6->rt6i_dst.plen == 128) {
866 rt6->rt6i_flags |= RTF_MODIFIED;
867 dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
871 if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
872 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
879 if (tunnel->err_count > 0) {
880 if (time_before(jiffies,
881 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
884 dst_link_failure(skb);
886 tunnel->err_count = 0;
889 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->dst.header_len;
891 if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
892 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
893 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
894 if (max_headroom > dev->needed_headroom)
895 dev->needed_headroom = max_headroom;
898 dev->stats.tx_dropped++;
903 skb_set_owner_w(new_skb, skb->sk);
906 old_iph = ip_hdr(skb);
909 skb_reset_transport_header(skb);
910 skb_push(skb, gre_hlen);
911 skb_reset_network_header(skb);
912 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
913 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
916 skb_dst_set(skb, &rt->dst);
919 * Push down and install the IPIP header.
924 iph->ihl = sizeof(struct iphdr) >> 2;
926 iph->protocol = IPPROTO_GRE;
927 iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb);
928 iph->daddr = fl4.daddr;
929 iph->saddr = fl4.saddr;
931 if ((iph->ttl = tiph->ttl) == 0) {
932 if (skb->protocol == htons(ETH_P_IP))
933 iph->ttl = old_iph->ttl;
934 #if IS_ENABLED(CONFIG_IPV6)
935 else if (skb->protocol == htons(ETH_P_IPV6))
936 iph->ttl = ((const struct ipv6hdr *)old_iph)->hop_limit;
939 iph->ttl = ip4_dst_hoplimit(&rt->dst);
942 ((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags;
943 ((__be16 *)(iph + 1))[1] = (dev->type == ARPHRD_ETHER) ?
944 htons(ETH_P_TEB) : skb->protocol;
946 if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
947 __be32 *ptr = (__be32 *)(((u8 *)iph) + tunnel->hlen - 4);
949 if (tunnel->parms.o_flags&GRE_SEQ) {
951 *ptr = htonl(tunnel->o_seqno);
954 if (tunnel->parms.o_flags&GRE_KEY) {
955 *ptr = tunnel->parms.o_key;
958 if (tunnel->parms.o_flags&GRE_CSUM) {
960 *(__sum16 *)ptr = ip_compute_csum((void *)(iph+1), skb->len - sizeof(struct iphdr));
964 iptunnel_xmit(skb, dev);
967 #if IS_ENABLED(CONFIG_IPV6)
969 dst_link_failure(skb);
972 dev->stats.tx_errors++;
977 static int ipgre_tunnel_bind_dev(struct net_device *dev)
979 struct net_device *tdev = NULL;
980 struct ip_tunnel *tunnel;
981 const struct iphdr *iph;
982 int hlen = LL_MAX_HEADER;
983 int mtu = ETH_DATA_LEN;
984 int addend = sizeof(struct iphdr) + 4;
986 tunnel = netdev_priv(dev);
987 iph = &tunnel->parms.iph;
989 /* Guess output device to choose reasonable mtu and needed_headroom */
995 rt = ip_route_output_gre(dev_net(dev), &fl4,
996 iph->daddr, iph->saddr,
1005 if (dev->type != ARPHRD_ETHER)
1006 dev->flags |= IFF_POINTOPOINT;
1009 if (!tdev && tunnel->parms.link)
1010 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
1013 hlen = tdev->hard_header_len + tdev->needed_headroom;
1016 dev->iflink = tunnel->parms.link;
1018 /* Precalculate GRE options length */
1019 if (tunnel->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
1020 if (tunnel->parms.o_flags&GRE_CSUM)
1022 if (tunnel->parms.o_flags&GRE_KEY)
1024 if (tunnel->parms.o_flags&GRE_SEQ)
1027 dev->needed_headroom = addend + hlen;
1028 mtu -= dev->hard_header_len + addend;
1033 tunnel->hlen = addend;
1039 ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
1042 struct ip_tunnel_parm p;
1043 struct ip_tunnel *t;
1044 struct net *net = dev_net(dev);
1045 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1050 if (dev == ign->fb_tunnel_dev) {
1051 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1055 t = ipgre_tunnel_locate(net, &p, 0);
1058 t = netdev_priv(dev);
1059 memcpy(&p, &t->parms, sizeof(p));
1060 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1067 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1071 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1075 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
1076 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
1077 ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
1080 p.iph.frag_off |= htons(IP_DF);
1082 if (!(p.i_flags&GRE_KEY))
1084 if (!(p.o_flags&GRE_KEY))
1087 t = ipgre_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
1089 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1091 if (t->dev != dev) {
1096 unsigned int nflags = 0;
1098 t = netdev_priv(dev);
1100 if (ipv4_is_multicast(p.iph.daddr))
1101 nflags = IFF_BROADCAST;
1102 else if (p.iph.daddr)
1103 nflags = IFF_POINTOPOINT;
1105 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
1109 ipgre_tunnel_unlink(ign, t);
1111 t->parms.iph.saddr = p.iph.saddr;
1112 t->parms.iph.daddr = p.iph.daddr;
1113 t->parms.i_key = p.i_key;
1114 t->parms.o_key = p.o_key;
1115 memcpy(dev->dev_addr, &p.iph.saddr, 4);
1116 memcpy(dev->broadcast, &p.iph.daddr, 4);
1117 ipgre_tunnel_link(ign, t);
1118 netdev_state_change(dev);
1124 if (cmd == SIOCCHGTUNNEL) {
1125 t->parms.iph.ttl = p.iph.ttl;
1126 t->parms.iph.tos = p.iph.tos;
1127 t->parms.iph.frag_off = p.iph.frag_off;
1128 if (t->parms.link != p.link) {
1129 t->parms.link = p.link;
1130 dev->mtu = ipgre_tunnel_bind_dev(dev);
1131 netdev_state_change(dev);
1134 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
1137 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1142 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1145 if (dev == ign->fb_tunnel_dev) {
1147 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1150 if ((t = ipgre_tunnel_locate(net, &p, 0)) == NULL)
1153 if (t == netdev_priv(ign->fb_tunnel_dev))
1157 unregister_netdevice(dev);
1169 static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1171 struct ip_tunnel *tunnel = netdev_priv(dev);
1173 new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
1179 /* Nice toy. Unfortunately, useless in real life :-)
1180 It allows to construct virtual multiprotocol broadcast "LAN"
1181 over the Internet, provided multicast routing is tuned.
1184 I have no idea was this bicycle invented before me,
1185 so that I had to set ARPHRD_IPGRE to a random value.
1186 I have an impression, that Cisco could make something similar,
1187 but this feature is apparently missing in IOS<=11.2(8).
1189 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
1190 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
1192 ping -t 255 224.66.66.66
1194 If nobody answers, mbone does not work.
1196 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
1197 ip addr add 10.66.66.<somewhat>/24 dev Universe
1198 ifconfig Universe up
1199 ifconfig Universe add fe80::<Your_real_addr>/10
1200 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
1203 ftp fec0:6666:6666::193.233.7.65
1208 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1209 unsigned short type,
1210 const void *daddr, const void *saddr, unsigned int len)
1212 struct ip_tunnel *t = netdev_priv(dev);
1213 struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
1214 __be16 *p = (__be16 *)(iph+1);
1216 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
1217 p[0] = t->parms.o_flags;
1221 * Set the source hardware address.
1225 memcpy(&iph->saddr, saddr, 4);
1227 memcpy(&iph->daddr, daddr, 4);
1234 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
1236 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
1237 memcpy(haddr, &iph->saddr, 4);
1241 static const struct header_ops ipgre_header_ops = {
1242 .create = ipgre_header,
1243 .parse = ipgre_header_parse,
1246 #ifdef CONFIG_NET_IPGRE_BROADCAST
1247 static int ipgre_open(struct net_device *dev)
1249 struct ip_tunnel *t = netdev_priv(dev);
1251 if (ipv4_is_multicast(t->parms.iph.daddr)) {
1255 rt = ip_route_output_gre(dev_net(dev), &fl4,
1259 RT_TOS(t->parms.iph.tos),
1262 return -EADDRNOTAVAIL;
1265 if (__in_dev_get_rtnl(dev) == NULL)
1266 return -EADDRNOTAVAIL;
1267 t->mlink = dev->ifindex;
1268 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
1273 static int ipgre_close(struct net_device *dev)
1275 struct ip_tunnel *t = netdev_priv(dev);
1277 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
1278 struct in_device *in_dev;
1279 in_dev = inetdev_by_index(dev_net(dev), t->mlink);
1281 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
1288 static const struct net_device_ops ipgre_netdev_ops = {
1289 .ndo_init = ipgre_tunnel_init,
1290 .ndo_uninit = ipgre_tunnel_uninit,
1291 #ifdef CONFIG_NET_IPGRE_BROADCAST
1292 .ndo_open = ipgre_open,
1293 .ndo_stop = ipgre_close,
1295 .ndo_start_xmit = ipgre_tunnel_xmit,
1296 .ndo_do_ioctl = ipgre_tunnel_ioctl,
1297 .ndo_change_mtu = ipgre_tunnel_change_mtu,
1298 .ndo_get_stats64 = ipgre_get_stats64,
1301 static void ipgre_dev_free(struct net_device *dev)
1303 struct ip_tunnel *tunnel = netdev_priv(dev);
1305 gro_cells_destroy(&tunnel->gro_cells);
1306 free_percpu(dev->tstats);
1310 #define GRE_FEATURES (NETIF_F_SG | \
1311 NETIF_F_FRAGLIST | \
1315 static void ipgre_tunnel_setup(struct net_device *dev)
1317 dev->netdev_ops = &ipgre_netdev_ops;
1318 dev->destructor = ipgre_dev_free;
1320 dev->type = ARPHRD_IPGRE;
1321 dev->needed_headroom = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
1322 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 4;
1323 dev->flags = IFF_NOARP;
1326 dev->features |= NETIF_F_NETNS_LOCAL;
1327 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1329 dev->features |= GRE_FEATURES;
1330 dev->hw_features |= GRE_FEATURES;
1333 static int ipgre_tunnel_init(struct net_device *dev)
1335 struct ip_tunnel *tunnel;
1339 tunnel = netdev_priv(dev);
1340 iph = &tunnel->parms.iph;
1343 strcpy(tunnel->parms.name, dev->name);
1345 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1346 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
1349 #ifdef CONFIG_NET_IPGRE_BROADCAST
1350 if (ipv4_is_multicast(iph->daddr)) {
1353 dev->flags = IFF_BROADCAST;
1354 dev->header_ops = &ipgre_header_ops;
1358 dev->header_ops = &ipgre_header_ops;
1360 dev->tstats = alloc_percpu(struct pcpu_tstats);
1364 err = gro_cells_init(&tunnel->gro_cells, dev);
1366 free_percpu(dev->tstats);
1373 static void ipgre_fb_tunnel_init(struct net_device *dev)
1375 struct ip_tunnel *tunnel = netdev_priv(dev);
1376 struct iphdr *iph = &tunnel->parms.iph;
1379 strcpy(tunnel->parms.name, dev->name);
1382 iph->protocol = IPPROTO_GRE;
1384 tunnel->hlen = sizeof(struct iphdr) + 4;
1390 static const struct gre_protocol ipgre_protocol = {
1391 .handler = ipgre_rcv,
1392 .err_handler = ipgre_err,
1395 static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
1399 for (prio = 0; prio < 4; prio++) {
1401 for (h = 0; h < HASH_SIZE; h++) {
1402 struct ip_tunnel *t;
1404 t = rtnl_dereference(ign->tunnels[prio][h]);
1407 unregister_netdevice_queue(t->dev, head);
1408 t = rtnl_dereference(t->next);
1414 static int __net_init ipgre_init_net(struct net *net)
1416 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1419 ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0",
1420 ipgre_tunnel_setup);
1421 if (!ign->fb_tunnel_dev) {
1425 dev_net_set(ign->fb_tunnel_dev, net);
1427 ipgre_fb_tunnel_init(ign->fb_tunnel_dev);
1428 ign->fb_tunnel_dev->rtnl_link_ops = &ipgre_link_ops;
1430 if ((err = register_netdev(ign->fb_tunnel_dev)))
1433 rcu_assign_pointer(ign->tunnels_wc[0],
1434 netdev_priv(ign->fb_tunnel_dev));
1438 ipgre_dev_free(ign->fb_tunnel_dev);
1443 static void __net_exit ipgre_exit_net(struct net *net)
1445 struct ipgre_net *ign;
1448 ign = net_generic(net, ipgre_net_id);
1450 ipgre_destroy_tunnels(ign, &list);
1451 unregister_netdevice_many(&list);
1455 static struct pernet_operations ipgre_net_ops = {
1456 .init = ipgre_init_net,
1457 .exit = ipgre_exit_net,
1458 .id = &ipgre_net_id,
1459 .size = sizeof(struct ipgre_net),
1462 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
1470 if (data[IFLA_GRE_IFLAGS])
1471 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1472 if (data[IFLA_GRE_OFLAGS])
1473 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1474 if (flags & (GRE_VERSION|GRE_ROUTING))
1480 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
1484 if (tb[IFLA_ADDRESS]) {
1485 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1487 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1488 return -EADDRNOTAVAIL;
1494 if (data[IFLA_GRE_REMOTE]) {
1495 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1501 return ipgre_tunnel_validate(tb, data);
1504 static void ipgre_netlink_parms(struct nlattr *data[],
1505 struct ip_tunnel_parm *parms)
1507 memset(parms, 0, sizeof(*parms));
1509 parms->iph.protocol = IPPROTO_GRE;
1514 if (data[IFLA_GRE_LINK])
1515 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1517 if (data[IFLA_GRE_IFLAGS])
1518 parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]);
1520 if (data[IFLA_GRE_OFLAGS])
1521 parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]);
1523 if (data[IFLA_GRE_IKEY])
1524 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1526 if (data[IFLA_GRE_OKEY])
1527 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1529 if (data[IFLA_GRE_LOCAL])
1530 parms->iph.saddr = nla_get_be32(data[IFLA_GRE_LOCAL]);
1532 if (data[IFLA_GRE_REMOTE])
1533 parms->iph.daddr = nla_get_be32(data[IFLA_GRE_REMOTE]);
1535 if (data[IFLA_GRE_TTL])
1536 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1538 if (data[IFLA_GRE_TOS])
1539 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1541 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
1542 parms->iph.frag_off = htons(IP_DF);
1545 static int ipgre_tap_init(struct net_device *dev)
1547 struct ip_tunnel *tunnel;
1549 tunnel = netdev_priv(dev);
1552 strcpy(tunnel->parms.name, dev->name);
1554 ipgre_tunnel_bind_dev(dev);
1556 dev->tstats = alloc_percpu(struct pcpu_tstats);
1563 static const struct net_device_ops ipgre_tap_netdev_ops = {
1564 .ndo_init = ipgre_tap_init,
1565 .ndo_uninit = ipgre_tunnel_uninit,
1566 .ndo_start_xmit = ipgre_tunnel_xmit,
1567 .ndo_set_mac_address = eth_mac_addr,
1568 .ndo_validate_addr = eth_validate_addr,
1569 .ndo_change_mtu = ipgre_tunnel_change_mtu,
1570 .ndo_get_stats64 = ipgre_get_stats64,
1573 static void ipgre_tap_setup(struct net_device *dev)
1578 dev->netdev_ops = &ipgre_tap_netdev_ops;
1579 dev->destructor = ipgre_dev_free;
1582 dev->features |= NETIF_F_NETNS_LOCAL;
1585 static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[],
1586 struct nlattr *data[])
1588 struct ip_tunnel *nt;
1589 struct net *net = dev_net(dev);
1590 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1594 nt = netdev_priv(dev);
1595 ipgre_netlink_parms(data, &nt->parms);
1597 if (ipgre_tunnel_find(net, &nt->parms, dev->type))
1600 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1601 eth_hw_addr_random(dev);
1603 mtu = ipgre_tunnel_bind_dev(dev);
1607 /* Can use a lockless transmit, unless we generate output sequences */
1608 if (!(nt->parms.o_flags & GRE_SEQ))
1609 dev->features |= NETIF_F_LLTX;
1611 err = register_netdevice(dev);
1616 ipgre_tunnel_link(ign, nt);
1622 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1623 struct nlattr *data[])
1625 struct ip_tunnel *t, *nt;
1626 struct net *net = dev_net(dev);
1627 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1628 struct ip_tunnel_parm p;
1631 if (dev == ign->fb_tunnel_dev)
1634 nt = netdev_priv(dev);
1635 ipgre_netlink_parms(data, &p);
1637 t = ipgre_tunnel_locate(net, &p, 0);
1645 if (dev->type != ARPHRD_ETHER) {
1646 unsigned int nflags = 0;
1648 if (ipv4_is_multicast(p.iph.daddr))
1649 nflags = IFF_BROADCAST;
1650 else if (p.iph.daddr)
1651 nflags = IFF_POINTOPOINT;
1653 if ((dev->flags ^ nflags) &
1654 (IFF_POINTOPOINT | IFF_BROADCAST))
1658 ipgre_tunnel_unlink(ign, t);
1659 t->parms.iph.saddr = p.iph.saddr;
1660 t->parms.iph.daddr = p.iph.daddr;
1661 t->parms.i_key = p.i_key;
1662 if (dev->type != ARPHRD_ETHER) {
1663 memcpy(dev->dev_addr, &p.iph.saddr, 4);
1664 memcpy(dev->broadcast, &p.iph.daddr, 4);
1666 ipgre_tunnel_link(ign, t);
1667 netdev_state_change(dev);
1670 t->parms.o_key = p.o_key;
1671 t->parms.iph.ttl = p.iph.ttl;
1672 t->parms.iph.tos = p.iph.tos;
1673 t->parms.iph.frag_off = p.iph.frag_off;
1675 if (t->parms.link != p.link) {
1676 t->parms.link = p.link;
1677 mtu = ipgre_tunnel_bind_dev(dev);
1680 netdev_state_change(dev);
1686 static size_t ipgre_get_size(const struct net_device *dev)
1691 /* IFLA_GRE_IFLAGS */
1693 /* IFLA_GRE_OFLAGS */
1699 /* IFLA_GRE_LOCAL */
1701 /* IFLA_GRE_REMOTE */
1707 /* IFLA_GRE_PMTUDISC */
1712 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1714 struct ip_tunnel *t = netdev_priv(dev);
1715 struct ip_tunnel_parm *p = &t->parms;
1717 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1718 nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) ||
1719 nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
1720 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1721 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1722 nla_put_be32(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1723 nla_put_be32(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1724 nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1725 nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1726 nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1727 !!(p->iph.frag_off & htons(IP_DF))))
1728 goto nla_put_failure;
1735 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1736 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1737 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1738 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1739 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1740 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1741 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1742 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1743 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1744 [IFLA_GRE_TOS] = { .type = NLA_U8 },
1745 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
1748 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1750 .maxtype = IFLA_GRE_MAX,
1751 .policy = ipgre_policy,
1752 .priv_size = sizeof(struct ip_tunnel),
1753 .setup = ipgre_tunnel_setup,
1754 .validate = ipgre_tunnel_validate,
1755 .newlink = ipgre_newlink,
1756 .changelink = ipgre_changelink,
1757 .get_size = ipgre_get_size,
1758 .fill_info = ipgre_fill_info,
1761 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1763 .maxtype = IFLA_GRE_MAX,
1764 .policy = ipgre_policy,
1765 .priv_size = sizeof(struct ip_tunnel),
1766 .setup = ipgre_tap_setup,
1767 .validate = ipgre_tap_validate,
1768 .newlink = ipgre_newlink,
1769 .changelink = ipgre_changelink,
1770 .get_size = ipgre_get_size,
1771 .fill_info = ipgre_fill_info,
1775 * And now the modules code and kernel interface.
1778 static int __init ipgre_init(void)
1782 pr_info("GRE over IPv4 tunneling driver\n");
1784 err = register_pernet_device(&ipgre_net_ops);
1788 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1790 pr_info("%s: can't add protocol\n", __func__);
1791 goto add_proto_failed;
1794 err = rtnl_link_register(&ipgre_link_ops);
1796 goto rtnl_link_failed;
1798 err = rtnl_link_register(&ipgre_tap_ops);
1800 goto tap_ops_failed;
1806 rtnl_link_unregister(&ipgre_link_ops);
1808 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1810 unregister_pernet_device(&ipgre_net_ops);
1814 static void __exit ipgre_fini(void)
1816 rtnl_link_unregister(&ipgre_tap_ops);
1817 rtnl_link_unregister(&ipgre_link_ops);
1818 if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0)
1819 pr_info("%s: can't remove protocol\n", __func__);
1820 unregister_pernet_device(&ipgre_net_ops);
1823 module_init(ipgre_init);
1824 module_exit(ipgre_fini);
1825 MODULE_LICENSE("GPL");
1826 MODULE_ALIAS_RTNL_LINK("gre");
1827 MODULE_ALIAS_RTNL_LINK("gretap");
1828 MODULE_ALIAS_NETDEV("gre0");