2 * VXLAN: Virtual eXtensible Local Area Network
4 * Copyright (c) 2012-2013 Vyatta Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/module.h>
19 #include <linux/errno.h>
20 #include <linux/slab.h>
21 #include <linux/skbuff.h>
22 #include <linux/rculist.h>
23 #include <linux/netdevice.h>
26 #include <linux/udp.h>
27 #include <linux/igmp.h>
28 #include <linux/etherdevice.h>
29 #include <linux/if_ether.h>
30 #include <linux/hash.h>
31 #include <linux/ethtool.h>
33 #include <net/ndisc.h>
35 #include <net/ip_tunnels.h>
38 #include <net/rtnetlink.h>
39 #include <net/route.h>
40 #include <net/dsfield.h>
41 #include <net/inet_ecn.h>
42 #include <net/net_namespace.h>
43 #include <net/netns/generic.h>
45 #define VXLAN_VERSION "0.1"
47 #define PORT_HASH_BITS 8
48 #define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
49 #define VNI_HASH_BITS 10
50 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
51 #define FDB_HASH_BITS 8
52 #define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
53 #define FDB_AGE_DEFAULT 300 /* 5 min */
54 #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
56 #define VXLAN_N_VID (1u << 24)
57 #define VXLAN_VID_MASK (VXLAN_N_VID - 1)
58 /* IP header + UDP + VXLAN + Ethernet header */
59 #define VXLAN_HEADROOM (20 + 8 + 8 + 14)
61 #define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
63 /* VXLAN protocol header */
69 /* UDP port for VXLAN traffic.
70 * The IANA assigned port is 4789, but the Linux default is 8472
71 * for compatibility with early adopters.
73 static unsigned short vxlan_port __read_mostly = 8472;
74 module_param_named(udp_port, vxlan_port, ushort, 0444);
75 MODULE_PARM_DESC(udp_port, "Destination UDP port");
77 static bool log_ecn_error = true;
78 module_param(log_ecn_error, bool, 0644);
79 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
81 static int vxlan_net_id;
83 static const u8 all_zeros_mac[ETH_ALEN];
85 /* per UDP socket information */
87 struct hlist_node hlist;
89 struct work_struct del_work;
92 struct hlist_head vni_list[VNI_HASH_SIZE];
95 /* per-network namespace private data for this module */
97 struct list_head vxlan_list;
98 struct hlist_head sock_list[PORT_HASH_SIZE];
107 struct list_head list;
110 /* Forwarding table entry */
112 struct hlist_node hlist; /* linked list of entries */
114 unsigned long updated; /* jiffies */
116 struct list_head remotes;
117 u16 state; /* see ndm_state */
118 u8 flags; /* see ndm_flags */
119 u8 eth_addr[ETH_ALEN];
122 /* Pseudo network device */
124 struct hlist_node hlist; /* vni hash table */
125 struct list_head next; /* vxlan's per namespace list */
126 struct vxlan_sock *vn_sock; /* listening socket */
127 struct net_device *dev;
128 struct vxlan_rdst default_dst; /* default destination */
129 __be32 saddr; /* source address */
131 __u16 port_min; /* source port range */
133 __u8 tos; /* TOS override */
135 u32 flags; /* VXLAN_F_* below */
137 struct work_struct sock_work;
138 struct work_struct igmp_work;
140 unsigned long age_interval;
141 struct timer_list age_timer;
142 spinlock_t hash_lock;
143 unsigned int addrcnt;
144 unsigned int addrmax;
146 struct hlist_head fdb_head[FDB_HASH_SIZE];
149 #define VXLAN_F_LEARN 0x01
150 #define VXLAN_F_PROXY 0x02
151 #define VXLAN_F_RSC 0x04
152 #define VXLAN_F_L2MISS 0x08
153 #define VXLAN_F_L3MISS 0x10
155 /* salt for hash table */
156 static u32 vxlan_salt __read_mostly;
157 static struct workqueue_struct *vxlan_wq;
159 static void vxlan_sock_work(struct work_struct *work);
161 /* Virtual Network hash table head */
162 static inline struct hlist_head *vni_head(struct vxlan_sock *vs, u32 id)
164 return &vs->vni_list[hash_32(id, VNI_HASH_BITS)];
167 /* Socket hash table head */
168 static inline struct hlist_head *vs_head(struct net *net, __be16 port)
170 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
172 return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
175 /* First remote destination for a forwarding entry.
176 * Guaranteed to be non-NULL because remotes are never deleted.
178 static inline struct vxlan_rdst *first_remote(struct vxlan_fdb *fdb)
180 return list_first_or_null_rcu(&fdb->remotes, struct vxlan_rdst, list);
183 /* Find VXLAN socket based on network namespace and UDP port */
184 static struct vxlan_sock *vxlan_find_port(struct net *net, __be16 port)
186 struct vxlan_sock *vs;
188 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
189 if (inet_sk(vs->sock->sk)->inet_sport == port)
195 /* Look up VNI in a per net namespace table */
196 static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
198 struct vxlan_sock *vs;
199 struct vxlan_dev *vxlan;
201 vs = vxlan_find_port(net, port);
205 hlist_for_each_entry_rcu(vxlan, vni_head(vs, id), hlist) {
206 if (vxlan->default_dst.remote_vni == id)
213 /* Fill in neighbour message in skbuff. */
214 static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
215 const struct vxlan_fdb *fdb,
216 u32 portid, u32 seq, int type, unsigned int flags,
217 const struct vxlan_rdst *rdst)
219 unsigned long now = jiffies;
220 struct nda_cacheinfo ci;
221 struct nlmsghdr *nlh;
223 bool send_ip, send_eth;
225 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
229 ndm = nlmsg_data(nlh);
230 memset(ndm, 0, sizeof(*ndm));
232 send_eth = send_ip = true;
234 if (type == RTM_GETNEIGH) {
235 ndm->ndm_family = AF_INET;
236 send_ip = rdst->remote_ip != htonl(INADDR_ANY);
237 send_eth = !is_zero_ether_addr(fdb->eth_addr);
239 ndm->ndm_family = AF_BRIDGE;
240 ndm->ndm_state = fdb->state;
241 ndm->ndm_ifindex = vxlan->dev->ifindex;
242 ndm->ndm_flags = fdb->flags;
243 ndm->ndm_type = NDA_DST;
245 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
246 goto nla_put_failure;
248 if (send_ip && nla_put_be32(skb, NDA_DST, rdst->remote_ip))
249 goto nla_put_failure;
251 if (rdst->remote_port && rdst->remote_port != vxlan->dst_port &&
252 nla_put_be16(skb, NDA_PORT, rdst->remote_port))
253 goto nla_put_failure;
254 if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
255 nla_put_u32(skb, NDA_VNI, rdst->remote_vni))
256 goto nla_put_failure;
257 if (rdst->remote_ifindex &&
258 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
259 goto nla_put_failure;
261 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
262 ci.ndm_confirmed = 0;
263 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
266 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
267 goto nla_put_failure;
269 return nlmsg_end(skb, nlh);
272 nlmsg_cancel(skb, nlh);
276 static inline size_t vxlan_nlmsg_size(void)
278 return NLMSG_ALIGN(sizeof(struct ndmsg))
279 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
280 + nla_total_size(sizeof(__be32)) /* NDA_DST */
281 + nla_total_size(sizeof(__be16)) /* NDA_PORT */
282 + nla_total_size(sizeof(__be32)) /* NDA_VNI */
283 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
284 + nla_total_size(sizeof(struct nda_cacheinfo));
287 static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
288 struct vxlan_fdb *fdb, int type)
290 struct net *net = dev_net(vxlan->dev);
294 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
298 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, first_remote(fdb));
300 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
301 WARN_ON(err == -EMSGSIZE);
306 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
310 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
313 static void vxlan_ip_miss(struct net_device *dev, __be32 ipa)
315 struct vxlan_dev *vxlan = netdev_priv(dev);
316 struct vxlan_fdb f = {
319 struct vxlan_rdst remote = {
320 .remote_ip = ipa, /* goes to NDA_DST */
321 .remote_vni = VXLAN_N_VID,
324 INIT_LIST_HEAD(&f.remotes);
325 list_add_rcu(&remote.list, &f.remotes);
327 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
330 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
332 struct vxlan_fdb f = {
336 INIT_LIST_HEAD(&f.remotes);
337 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
339 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
342 /* Hash Ethernet address */
343 static u32 eth_hash(const unsigned char *addr)
345 u64 value = get_unaligned((u64 *)addr);
347 /* only want 6 bytes */
353 return hash_64(value, FDB_HASH_BITS);
356 /* Hash chain to use given mac address */
357 static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
360 return &vxlan->fdb_head[eth_hash(mac)];
363 /* Look up Ethernet address in forwarding table */
364 static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
368 struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
371 hlist_for_each_entry_rcu(f, head, hlist) {
372 if (compare_ether_addr(mac, f->eth_addr) == 0)
379 static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
384 f = __vxlan_find_mac(vxlan, mac);
391 /* Add/update destinations for multicast */
392 static int vxlan_fdb_append(struct vxlan_fdb *f,
393 __be32 ip, __be16 port, __u32 vni, __u32 ifindex)
395 struct vxlan_rdst *rd;
397 /* protected by vxlan->hash_lock */
398 list_for_each_entry(rd, &f->remotes, list) {
399 if (rd->remote_ip == ip &&
400 rd->remote_port == port &&
401 rd->remote_vni == vni &&
402 rd->remote_ifindex == ifindex)
406 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
410 rd->remote_port = port;
411 rd->remote_vni = vni;
412 rd->remote_ifindex = ifindex;
414 list_add_tail_rcu(&rd->list, &f->remotes);
419 /* Add new entry to forwarding table -- assumes lock held */
420 static int vxlan_fdb_create(struct vxlan_dev *vxlan,
421 const u8 *mac, __be32 ip,
422 __u16 state, __u16 flags,
423 __be16 port, __u32 vni, __u32 ifindex,
429 f = __vxlan_find_mac(vxlan, mac);
431 if (flags & NLM_F_EXCL) {
432 netdev_dbg(vxlan->dev,
433 "lost race to create %pM\n", mac);
436 if (f->state != state) {
438 f->updated = jiffies;
441 if (f->flags != ndm_flags) {
442 f->flags = ndm_flags;
443 f->updated = jiffies;
446 if ((flags & NLM_F_APPEND) &&
447 is_multicast_ether_addr(f->eth_addr)) {
448 int rc = vxlan_fdb_append(f, ip, port, vni, ifindex);
455 if (!(flags & NLM_F_CREATE))
458 if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
461 netdev_dbg(vxlan->dev, "add %pM -> %pI4\n", mac, &ip);
462 f = kmalloc(sizeof(*f), GFP_ATOMIC);
468 f->flags = ndm_flags;
469 f->updated = f->used = jiffies;
470 INIT_LIST_HEAD(&f->remotes);
471 memcpy(f->eth_addr, mac, ETH_ALEN);
473 vxlan_fdb_append(f, ip, port, vni, ifindex);
476 hlist_add_head_rcu(&f->hlist,
477 vxlan_fdb_head(vxlan, mac));
481 vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
486 static void vxlan_fdb_free(struct rcu_head *head)
488 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
489 struct vxlan_rdst *rd, *nd;
491 list_for_each_entry_safe(rd, nd, &f->remotes, list)
496 static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
498 netdev_dbg(vxlan->dev,
499 "delete %pM\n", f->eth_addr);
502 vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH);
504 hlist_del_rcu(&f->hlist);
505 call_rcu(&f->rcu, vxlan_fdb_free);
508 /* Add static entry (via netlink) */
509 static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
510 struct net_device *dev,
511 const unsigned char *addr, u16 flags)
513 struct vxlan_dev *vxlan = netdev_priv(dev);
514 struct net *net = dev_net(vxlan->dev);
520 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
521 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
526 if (tb[NDA_DST] == NULL)
529 if (nla_len(tb[NDA_DST]) != sizeof(__be32))
530 return -EAFNOSUPPORT;
532 ip = nla_get_be32(tb[NDA_DST]);
535 if (nla_len(tb[NDA_PORT]) != sizeof(__be16))
537 port = nla_get_be16(tb[NDA_PORT]);
539 port = vxlan->dst_port;
542 if (nla_len(tb[NDA_VNI]) != sizeof(u32))
544 vni = nla_get_u32(tb[NDA_VNI]);
546 vni = vxlan->default_dst.remote_vni;
548 if (tb[NDA_IFINDEX]) {
549 struct net_device *tdev;
551 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
553 ifindex = nla_get_u32(tb[NDA_IFINDEX]);
554 tdev = dev_get_by_index(net, ifindex);
556 return -EADDRNOTAVAIL;
561 spin_lock_bh(&vxlan->hash_lock);
562 err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags,
563 port, vni, ifindex, ndm->ndm_flags);
564 spin_unlock_bh(&vxlan->hash_lock);
569 /* Delete entry (via netlink) */
570 static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
571 struct net_device *dev,
572 const unsigned char *addr)
574 struct vxlan_dev *vxlan = netdev_priv(dev);
578 spin_lock_bh(&vxlan->hash_lock);
579 f = vxlan_find_mac(vxlan, addr);
581 vxlan_fdb_destroy(vxlan, f);
584 spin_unlock_bh(&vxlan->hash_lock);
589 /* Dump forwarding table */
590 static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
591 struct net_device *dev, int idx)
593 struct vxlan_dev *vxlan = netdev_priv(dev);
596 for (h = 0; h < FDB_HASH_SIZE; ++h) {
600 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
601 struct vxlan_rdst *rd;
603 if (idx < cb->args[0])
606 list_for_each_entry_rcu(rd, &f->remotes, list) {
607 err = vxlan_fdb_info(skb, vxlan, f,
608 NETLINK_CB(cb->skb).portid,
623 /* Watch incoming packets to learn mapping between Ethernet address
624 * and Tunnel endpoint.
625 * Return true if packet is bogus and should be droppped.
627 static bool vxlan_snoop(struct net_device *dev,
628 __be32 src_ip, const u8 *src_mac)
630 struct vxlan_dev *vxlan = netdev_priv(dev);
633 f = vxlan_find_mac(vxlan, src_mac);
635 struct vxlan_rdst *rdst = first_remote(f);
637 if (likely(rdst->remote_ip == src_ip))
640 /* Don't migrate static entries, drop packets */
641 if (f->state & NUD_NOARP)
646 "%pM migrated from %pI4 to %pI4\n",
647 src_mac, &rdst->remote_ip, &src_ip);
649 rdst->remote_ip = src_ip;
650 f->updated = jiffies;
651 vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
653 /* learned new entry */
654 spin_lock(&vxlan->hash_lock);
656 /* close off race between vxlan_flush and incoming packets */
657 if (netif_running(dev))
658 vxlan_fdb_create(vxlan, src_mac, src_ip,
660 NLM_F_EXCL|NLM_F_CREATE,
662 vxlan->default_dst.remote_vni,
664 spin_unlock(&vxlan->hash_lock);
671 /* See if multicast group is already in use by other ID */
672 static bool vxlan_group_used(struct vxlan_net *vn, __be32 remote_ip)
674 struct vxlan_dev *vxlan;
676 list_for_each_entry(vxlan, &vn->vxlan_list, next) {
677 if (!netif_running(vxlan->dev))
680 if (vxlan->default_dst.remote_ip == remote_ip)
687 static void vxlan_sock_hold(struct vxlan_sock *vs)
689 atomic_inc(&vs->refcnt);
692 static void vxlan_sock_release(struct vxlan_net *vn, struct vxlan_sock *vs)
694 if (!atomic_dec_and_test(&vs->refcnt))
697 spin_lock(&vn->sock_lock);
698 hlist_del_rcu(&vs->hlist);
699 spin_unlock(&vn->sock_lock);
701 queue_work(vxlan_wq, &vs->del_work);
704 /* Callback to update multicast group membership.
705 * Scheduled when vxlan goes up/down.
707 static void vxlan_igmp_work(struct work_struct *work)
709 struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_work);
710 struct vxlan_net *vn = net_generic(dev_net(vxlan->dev), vxlan_net_id);
711 struct vxlan_sock *vs = vxlan->vn_sock;
712 struct sock *sk = vs->sock->sk;
713 struct ip_mreqn mreq = {
714 .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip,
715 .imr_ifindex = vxlan->default_dst.remote_ifindex,
719 if (vxlan_group_used(vn, vxlan->default_dst.remote_ip))
720 ip_mc_join_group(sk, &mreq);
722 ip_mc_leave_group(sk, &mreq);
725 vxlan_sock_release(vn, vs);
729 /* Callback from net/ipv4/udp.c to receive packets */
730 static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
733 struct vxlanhdr *vxh;
734 struct vxlan_dev *vxlan;
735 struct pcpu_tstats *stats;
740 /* pop off outer UDP header */
741 __skb_pull(skb, sizeof(struct udphdr));
743 /* Need Vxlan and inner Ethernet header to be present */
744 if (!pskb_may_pull(skb, sizeof(struct vxlanhdr)))
747 /* Drop packets with reserved bits set */
748 vxh = (struct vxlanhdr *) skb->data;
749 if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
750 (vxh->vx_vni & htonl(0xff))) {
751 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
752 ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
756 __skb_pull(skb, sizeof(struct vxlanhdr));
758 /* Is this VNI defined? */
759 vni = ntohl(vxh->vx_vni) >> 8;
760 port = inet_sk(sk)->inet_sport;
761 vxlan = vxlan_find_vni(sock_net(sk), vni, port);
763 netdev_dbg(skb->dev, "unknown vni %d port %u\n",
768 if (!pskb_may_pull(skb, ETH_HLEN)) {
769 vxlan->dev->stats.rx_length_errors++;
770 vxlan->dev->stats.rx_errors++;
774 skb_reset_mac_header(skb);
776 /* Re-examine inner Ethernet packet */
778 skb->protocol = eth_type_trans(skb, vxlan->dev);
780 /* Ignore packet loops (and multicast echo) */
781 if (compare_ether_addr(eth_hdr(skb)->h_source,
782 vxlan->dev->dev_addr) == 0)
785 if ((vxlan->flags & VXLAN_F_LEARN) &&
786 vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source))
789 __skb_tunnel_rx(skb, vxlan->dev);
790 skb_reset_network_header(skb);
792 /* If the NIC driver gave us an encapsulated packet with
793 * CHECKSUM_UNNECESSARY and Rx checksum feature is enabled,
794 * leave the CHECKSUM_UNNECESSARY, the device checksummed it
795 * for us. Otherwise force the upper layers to verify it.
797 if (skb->ip_summed != CHECKSUM_UNNECESSARY || !skb->encapsulation ||
798 !(vxlan->dev->features & NETIF_F_RXCSUM))
799 skb->ip_summed = CHECKSUM_NONE;
801 skb->encapsulation = 0;
803 err = IP_ECN_decapsulate(oip, skb);
806 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
807 &oip->saddr, oip->tos);
809 ++vxlan->dev->stats.rx_frame_errors;
810 ++vxlan->dev->stats.rx_errors;
815 stats = this_cpu_ptr(vxlan->dev->tstats);
816 u64_stats_update_begin(&stats->syncp);
818 stats->rx_bytes += skb->len;
819 u64_stats_update_end(&stats->syncp);
825 /* Put UDP header back */
826 __skb_push(skb, sizeof(struct udphdr));
830 /* Consume bad packet */
835 static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
837 struct vxlan_dev *vxlan = netdev_priv(dev);
843 if (dev->flags & IFF_NOARP)
846 if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
847 dev->stats.tx_dropped++;
852 if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
853 parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
854 parp->ar_pro != htons(ETH_P_IP) ||
855 parp->ar_op != htons(ARPOP_REQUEST) ||
856 parp->ar_hln != dev->addr_len ||
859 arpptr = (u8 *)parp + sizeof(struct arphdr);
861 arpptr += dev->addr_len; /* sha */
862 memcpy(&sip, arpptr, sizeof(sip));
863 arpptr += sizeof(sip);
864 arpptr += dev->addr_len; /* tha */
865 memcpy(&tip, arpptr, sizeof(tip));
867 if (ipv4_is_loopback(tip) ||
868 ipv4_is_multicast(tip))
871 n = neigh_lookup(&arp_tbl, &tip, dev);
875 struct sk_buff *reply;
877 if (!(n->nud_state & NUD_CONNECTED)) {
882 f = vxlan_find_mac(vxlan, n->ha);
883 if (f && first_remote(f)->remote_ip == htonl(INADDR_ANY)) {
884 /* bridge-local neighbor */
889 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
894 skb_reset_mac_header(reply);
895 __skb_pull(reply, skb_network_offset(reply));
896 reply->ip_summed = CHECKSUM_UNNECESSARY;
897 reply->pkt_type = PACKET_HOST;
899 if (netif_rx_ni(reply) == NET_RX_DROP)
900 dev->stats.rx_dropped++;
901 } else if (vxlan->flags & VXLAN_F_L3MISS)
902 vxlan_ip_miss(dev, tip);
908 static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
910 struct vxlan_dev *vxlan = netdev_priv(dev);
914 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
918 switch (ntohs(eth_hdr(skb)->h_proto)) {
920 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
923 n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
932 diff = compare_ether_addr(eth_hdr(skb)->h_dest, n->ha) != 0;
934 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
936 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
940 } else if (vxlan->flags & VXLAN_F_L3MISS)
941 vxlan_ip_miss(dev, pip->daddr);
945 static void vxlan_sock_put(struct sk_buff *skb)
950 /* On transmit, associate with the tunnel socket */
951 static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb)
953 struct vxlan_dev *vxlan = netdev_priv(dev);
954 struct sock *sk = vxlan->vn_sock->sock->sk;
959 skb->destructor = vxlan_sock_put;
962 /* Compute source port for outgoing packet
963 * first choice to use L4 flow hash since it will spread
964 * better and maybe available from hardware
965 * secondary choice is to use jhash on the Ethernet header
967 static __be16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb)
969 unsigned int range = (vxlan->port_max - vxlan->port_min) + 1;
972 hash = skb_get_rxhash(skb);
974 hash = jhash(skb->data, 2 * ETH_ALEN,
975 (__force u32) skb->protocol);
977 return htons((((u64) hash * range) >> 32) + vxlan->port_min);
980 static int handle_offloads(struct sk_buff *skb)
982 if (skb_is_gso(skb)) {
983 int err = skb_unclone(skb, GFP_ATOMIC);
987 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
988 } else if (skb->ip_summed != CHECKSUM_PARTIAL)
989 skb->ip_summed = CHECKSUM_NONE;
994 /* Bypass encapsulation if the destination is local */
995 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
996 struct vxlan_dev *dst_vxlan)
998 struct pcpu_tstats *tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
999 struct pcpu_tstats *rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
1001 skb->pkt_type = PACKET_HOST;
1002 skb->encapsulation = 0;
1003 skb->dev = dst_vxlan->dev;
1004 __skb_pull(skb, skb_network_offset(skb));
1006 if (dst_vxlan->flags & VXLAN_F_LEARN)
1007 vxlan_snoop(skb->dev, htonl(INADDR_LOOPBACK),
1008 eth_hdr(skb)->h_source);
1010 u64_stats_update_begin(&tx_stats->syncp);
1011 tx_stats->tx_packets++;
1012 tx_stats->tx_bytes += skb->len;
1013 u64_stats_update_end(&tx_stats->syncp);
1015 if (netif_rx(skb) == NET_RX_SUCCESS) {
1016 u64_stats_update_begin(&rx_stats->syncp);
1017 rx_stats->rx_packets++;
1018 rx_stats->rx_bytes += skb->len;
1019 u64_stats_update_end(&rx_stats->syncp);
1021 skb->dev->stats.rx_dropped++;
1025 static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1026 struct vxlan_rdst *rdst, bool did_rsc)
1028 struct vxlan_dev *vxlan = netdev_priv(dev);
1030 const struct iphdr *old_iph;
1031 struct vxlanhdr *vxh;
1035 __be16 src_port, dst_port;
1041 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port;
1042 vni = rdst->remote_vni;
1043 dst = rdst->remote_ip;
1047 /* short-circuited back to local bridge */
1048 vxlan_encap_bypass(skb, vxlan, vxlan);
1054 if (!skb->encapsulation) {
1055 skb_reset_inner_headers(skb);
1056 skb->encapsulation = 1;
1059 /* Need space for new headers (invalidates iph ptr) */
1060 if (skb_cow_head(skb, VXLAN_HEADROOM))
1063 old_iph = ip_hdr(skb);
1066 if (!ttl && IN_MULTICAST(ntohl(dst)))
1071 tos = ip_tunnel_get_dsfield(old_iph, skb);
1073 src_port = vxlan_src_port(vxlan, skb);
1075 memset(&fl4, 0, sizeof(fl4));
1076 fl4.flowi4_oif = rdst->remote_ifindex;
1077 fl4.flowi4_tos = RT_TOS(tos);
1079 fl4.saddr = vxlan->saddr;
1081 rt = ip_route_output_key(dev_net(dev), &fl4);
1083 netdev_dbg(dev, "no route to %pI4\n", &dst);
1084 dev->stats.tx_carrier_errors++;
1088 if (rt->dst.dev == dev) {
1089 netdev_dbg(dev, "circular route to %pI4\n", &dst);
1091 dev->stats.collisions++;
1095 /* Bypass encapsulation if the destination is local */
1096 if (rt->rt_flags & RTCF_LOCAL &&
1097 !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
1098 struct vxlan_dev *dst_vxlan;
1101 dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
1104 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
1107 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1108 vxh->vx_flags = htonl(VXLAN_FLAGS);
1109 vxh->vx_vni = htonl(vni << 8);
1111 __skb_push(skb, sizeof(*uh));
1112 skb_reset_transport_header(skb);
1115 uh->dest = dst_port;
1116 uh->source = src_port;
1118 uh->len = htons(skb->len);
1121 vxlan_set_owner(dev, skb);
1123 if (handle_offloads(skb))
1126 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
1127 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
1129 err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, dst,
1130 IPPROTO_UDP, tos, ttl, df);
1131 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
1136 dev->stats.tx_dropped++;
1140 dev->stats.tx_errors++;
1145 /* Transmit local packets over Vxlan
1147 * Outer IP header inherits ECN and DF from inner header.
1148 * Outer UDP destination is the VXLAN assigned port.
1149 * source port is based on hash of flow
1151 static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
1153 struct vxlan_dev *vxlan = netdev_priv(dev);
1155 bool did_rsc = false;
1156 struct vxlan_rdst *rdst;
1157 struct vxlan_fdb *f;
1159 skb_reset_mac_header(skb);
1162 if ((vxlan->flags & VXLAN_F_PROXY) && ntohs(eth->h_proto) == ETH_P_ARP)
1163 return arp_reduce(dev, skb);
1165 f = vxlan_find_mac(vxlan, eth->h_dest);
1168 if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) &&
1169 ntohs(eth->h_proto) == ETH_P_IP) {
1170 did_rsc = route_shortcircuit(dev, skb);
1172 f = vxlan_find_mac(vxlan, eth->h_dest);
1176 f = vxlan_find_mac(vxlan, all_zeros_mac);
1178 if ((vxlan->flags & VXLAN_F_L2MISS) &&
1179 !is_multicast_ether_addr(eth->h_dest))
1180 vxlan_fdb_miss(vxlan, eth->h_dest);
1182 dev->stats.tx_dropped++;
1184 return NETDEV_TX_OK;
1188 list_for_each_entry_rcu(rdst, &f->remotes, list) {
1189 struct sk_buff *skb1;
1191 skb1 = skb_clone(skb, GFP_ATOMIC);
1193 vxlan_xmit_one(skb1, dev, rdst, did_rsc);
1197 return NETDEV_TX_OK;
1200 /* Walk the forwarding table and purge stale entries */
1201 static void vxlan_cleanup(unsigned long arg)
1203 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
1204 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
1207 if (!netif_running(vxlan->dev))
1210 spin_lock_bh(&vxlan->hash_lock);
1211 for (h = 0; h < FDB_HASH_SIZE; ++h) {
1212 struct hlist_node *p, *n;
1213 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
1215 = container_of(p, struct vxlan_fdb, hlist);
1216 unsigned long timeout;
1218 if (f->state & NUD_PERMANENT)
1221 timeout = f->used + vxlan->age_interval * HZ;
1222 if (time_before_eq(timeout, jiffies)) {
1223 netdev_dbg(vxlan->dev,
1224 "garbage collect %pM\n",
1226 f->state = NUD_STALE;
1227 vxlan_fdb_destroy(vxlan, f);
1228 } else if (time_before(timeout, next_timer))
1229 next_timer = timeout;
1232 spin_unlock_bh(&vxlan->hash_lock);
1234 mod_timer(&vxlan->age_timer, next_timer);
1237 /* Setup stats when device is created */
1238 static int vxlan_init(struct net_device *dev)
1240 struct vxlan_dev *vxlan = netdev_priv(dev);
1241 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
1242 struct vxlan_sock *vs;
1243 __u32 vni = vxlan->default_dst.remote_vni;
1245 dev->tstats = alloc_percpu(struct pcpu_tstats);
1249 spin_lock(&vn->sock_lock);
1250 vs = vxlan_find_port(dev_net(dev), vxlan->dst_port);
1252 /* If we have a socket with same port already, reuse it */
1253 atomic_inc(&vs->refcnt);
1254 vxlan->vn_sock = vs;
1255 hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
1257 /* otherwise make new socket outside of RTNL */
1259 queue_work(vxlan_wq, &vxlan->sock_work);
1261 spin_unlock(&vn->sock_lock);
1266 static void vxlan_fdb_delete_defualt(struct vxlan_dev *vxlan)
1268 struct vxlan_fdb *f;
1270 spin_lock_bh(&vxlan->hash_lock);
1271 f = __vxlan_find_mac(vxlan, all_zeros_mac);
1273 vxlan_fdb_destroy(vxlan, f);
1274 spin_unlock_bh(&vxlan->hash_lock);
1277 static void vxlan_uninit(struct net_device *dev)
1279 struct vxlan_dev *vxlan = netdev_priv(dev);
1280 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
1281 struct vxlan_sock *vs = vxlan->vn_sock;
1283 vxlan_fdb_delete_defualt(vxlan);
1286 vxlan_sock_release(vn, vs);
1287 free_percpu(dev->tstats);
1290 /* Start ageing timer and join group when device is brought up */
1291 static int vxlan_open(struct net_device *dev)
1293 struct vxlan_dev *vxlan = netdev_priv(dev);
1294 struct vxlan_sock *vs = vxlan->vn_sock;
1296 /* socket hasn't been created */
1300 if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip))) {
1301 vxlan_sock_hold(vs);
1303 queue_work(vxlan_wq, &vxlan->igmp_work);
1306 if (vxlan->age_interval)
1307 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
1312 /* Purge the forwarding table */
1313 static void vxlan_flush(struct vxlan_dev *vxlan)
1317 spin_lock_bh(&vxlan->hash_lock);
1318 for (h = 0; h < FDB_HASH_SIZE; ++h) {
1319 struct hlist_node *p, *n;
1320 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
1322 = container_of(p, struct vxlan_fdb, hlist);
1323 /* the all_zeros_mac entry is deleted at vxlan_uninit */
1324 if (!is_zero_ether_addr(f->eth_addr))
1325 vxlan_fdb_destroy(vxlan, f);
1328 spin_unlock_bh(&vxlan->hash_lock);
1331 /* Cleanup timer and forwarding table on shutdown */
1332 static int vxlan_stop(struct net_device *dev)
1334 struct vxlan_dev *vxlan = netdev_priv(dev);
1335 struct vxlan_sock *vs = vxlan->vn_sock;
1337 if (vs && IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip))) {
1338 vxlan_sock_hold(vs);
1340 queue_work(vxlan_wq, &vxlan->igmp_work);
1343 del_timer_sync(&vxlan->age_timer);
1350 /* Stub, nothing needs to be done. */
1351 static void vxlan_set_multicast_list(struct net_device *dev)
1355 static const struct net_device_ops vxlan_netdev_ops = {
1356 .ndo_init = vxlan_init,
1357 .ndo_uninit = vxlan_uninit,
1358 .ndo_open = vxlan_open,
1359 .ndo_stop = vxlan_stop,
1360 .ndo_start_xmit = vxlan_xmit,
1361 .ndo_get_stats64 = ip_tunnel_get_stats64,
1362 .ndo_set_rx_mode = vxlan_set_multicast_list,
1363 .ndo_change_mtu = eth_change_mtu,
1364 .ndo_validate_addr = eth_validate_addr,
1365 .ndo_set_mac_address = eth_mac_addr,
1366 .ndo_fdb_add = vxlan_fdb_add,
1367 .ndo_fdb_del = vxlan_fdb_delete,
1368 .ndo_fdb_dump = vxlan_fdb_dump,
1371 /* Info for udev, that this is a virtual tunnel endpoint */
1372 static struct device_type vxlan_type = {
1376 /* Initialize the device structure. */
1377 static void vxlan_setup(struct net_device *dev)
1379 struct vxlan_dev *vxlan = netdev_priv(dev);
1383 eth_hw_addr_random(dev);
1385 dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM;
1387 dev->netdev_ops = &vxlan_netdev_ops;
1388 dev->destructor = free_netdev;
1389 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
1391 dev->tx_queue_len = 0;
1392 dev->features |= NETIF_F_LLTX;
1393 dev->features |= NETIF_F_NETNS_LOCAL;
1394 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
1395 dev->features |= NETIF_F_RXCSUM;
1396 dev->features |= NETIF_F_GSO_SOFTWARE;
1398 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
1399 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1400 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1401 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1403 INIT_LIST_HEAD(&vxlan->next);
1404 spin_lock_init(&vxlan->hash_lock);
1405 INIT_WORK(&vxlan->igmp_work, vxlan_igmp_work);
1406 INIT_WORK(&vxlan->sock_work, vxlan_sock_work);
1408 init_timer_deferrable(&vxlan->age_timer);
1409 vxlan->age_timer.function = vxlan_cleanup;
1410 vxlan->age_timer.data = (unsigned long) vxlan;
1412 inet_get_local_port_range(&low, &high);
1413 vxlan->port_min = low;
1414 vxlan->port_max = high;
1415 vxlan->dst_port = htons(vxlan_port);
1419 for (h = 0; h < FDB_HASH_SIZE; ++h)
1420 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
1423 static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
1424 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
1425 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1426 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
1427 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1428 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
1429 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
1430 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
1431 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
1432 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
1433 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
1434 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
1435 [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
1436 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
1437 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
1438 [IFLA_VXLAN_PORT] = { .type = NLA_U16 },
1441 static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
1443 if (tb[IFLA_ADDRESS]) {
1444 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
1445 pr_debug("invalid link address (not ethernet)\n");
1449 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
1450 pr_debug("invalid all zero ethernet address\n");
1451 return -EADDRNOTAVAIL;
1458 if (data[IFLA_VXLAN_ID]) {
1459 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
1460 if (id >= VXLAN_VID_MASK)
1464 if (data[IFLA_VXLAN_PORT_RANGE]) {
1465 const struct ifla_vxlan_port_range *p
1466 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
1468 if (ntohs(p->high) < ntohs(p->low)) {
1469 pr_debug("port range %u .. %u not valid\n",
1470 ntohs(p->low), ntohs(p->high));
1478 static void vxlan_get_drvinfo(struct net_device *netdev,
1479 struct ethtool_drvinfo *drvinfo)
1481 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
1482 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
1485 static const struct ethtool_ops vxlan_ethtool_ops = {
1486 .get_drvinfo = vxlan_get_drvinfo,
1487 .get_link = ethtool_op_get_link,
1490 static void vxlan_del_work(struct work_struct *work)
1492 struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work);
1494 sk_release_kernel(vs->sock->sk);
1498 static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port)
1500 struct vxlan_sock *vs;
1502 struct sockaddr_in vxlan_addr = {
1503 .sin_family = AF_INET,
1504 .sin_addr.s_addr = htonl(INADDR_ANY),
1510 vs = kmalloc(sizeof(*vs), GFP_KERNEL);
1512 return ERR_PTR(-ENOMEM);
1514 for (h = 0; h < VNI_HASH_SIZE; ++h)
1515 INIT_HLIST_HEAD(&vs->vni_list[h]);
1517 INIT_WORK(&vs->del_work, vxlan_del_work);
1519 /* Create UDP socket for encapsulation receive. */
1520 rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vs->sock);
1522 pr_debug("UDP socket create failed\n");
1527 /* Put in proper namespace */
1529 sk_change_net(sk, net);
1531 rc = kernel_bind(vs->sock, (struct sockaddr *) &vxlan_addr,
1532 sizeof(vxlan_addr));
1534 pr_debug("bind for UDP socket %pI4:%u (%d)\n",
1535 &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
1536 sk_release_kernel(sk);
1541 /* Disable multicast loopback */
1542 inet_sk(sk)->mc_loop = 0;
1544 /* Mark socket as an encapsulation socket. */
1545 udp_sk(sk)->encap_type = 1;
1546 udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
1548 atomic_set(&vs->refcnt, 1);
1553 /* Scheduled at device creation to bind to a socket */
1554 static void vxlan_sock_work(struct work_struct *work)
1556 struct vxlan_dev *vxlan
1557 = container_of(work, struct vxlan_dev, sock_work);
1558 struct net_device *dev = vxlan->dev;
1559 struct net *net = dev_net(dev);
1560 __u32 vni = vxlan->default_dst.remote_vni;
1561 __be16 port = vxlan->dst_port;
1562 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1563 struct vxlan_sock *nvs, *ovs;
1565 nvs = vxlan_socket_create(net, port);
1567 netdev_err(vxlan->dev, "Can not create UDP socket, %ld\n",
1572 spin_lock(&vn->sock_lock);
1573 /* Look again to see if can reuse socket */
1574 ovs = vxlan_find_port(net, port);
1576 atomic_inc(&ovs->refcnt);
1577 vxlan->vn_sock = ovs;
1578 hlist_add_head_rcu(&vxlan->hlist, vni_head(ovs, vni));
1579 spin_unlock(&vn->sock_lock);
1581 sk_release_kernel(nvs->sock->sk);
1584 vxlan->vn_sock = nvs;
1585 hlist_add_head_rcu(&nvs->hlist, vs_head(net, port));
1586 hlist_add_head_rcu(&vxlan->hlist, vni_head(nvs, vni));
1587 spin_unlock(&vn->sock_lock);
1593 static int vxlan_newlink(struct net *net, struct net_device *dev,
1594 struct nlattr *tb[], struct nlattr *data[])
1596 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1597 struct vxlan_dev *vxlan = netdev_priv(dev);
1598 struct vxlan_rdst *dst = &vxlan->default_dst;
1602 if (!data[IFLA_VXLAN_ID])
1605 vni = nla_get_u32(data[IFLA_VXLAN_ID]);
1606 dst->remote_vni = vni;
1608 if (data[IFLA_VXLAN_GROUP])
1609 dst->remote_ip = nla_get_be32(data[IFLA_VXLAN_GROUP]);
1611 if (data[IFLA_VXLAN_LOCAL])
1612 vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
1614 if (data[IFLA_VXLAN_LINK] &&
1615 (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
1616 struct net_device *lowerdev
1617 = __dev_get_by_index(net, dst->remote_ifindex);
1620 pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
1625 dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
1627 /* update header length based on lower device */
1628 dev->hard_header_len = lowerdev->hard_header_len +
1632 if (data[IFLA_VXLAN_TOS])
1633 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
1635 if (data[IFLA_VXLAN_TTL])
1636 vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
1638 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
1639 vxlan->flags |= VXLAN_F_LEARN;
1641 if (data[IFLA_VXLAN_AGEING])
1642 vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
1644 vxlan->age_interval = FDB_AGE_DEFAULT;
1646 if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
1647 vxlan->flags |= VXLAN_F_PROXY;
1649 if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
1650 vxlan->flags |= VXLAN_F_RSC;
1652 if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
1653 vxlan->flags |= VXLAN_F_L2MISS;
1655 if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
1656 vxlan->flags |= VXLAN_F_L3MISS;
1658 if (data[IFLA_VXLAN_LIMIT])
1659 vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
1661 if (data[IFLA_VXLAN_PORT_RANGE]) {
1662 const struct ifla_vxlan_port_range *p
1663 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
1664 vxlan->port_min = ntohs(p->low);
1665 vxlan->port_max = ntohs(p->high);
1668 if (data[IFLA_VXLAN_PORT])
1669 vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
1671 if (vxlan_find_vni(net, vni, vxlan->dst_port)) {
1672 pr_info("duplicate VNI %u\n", vni);
1676 SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops);
1678 /* create an fdb entry for default destination */
1679 err = vxlan_fdb_create(vxlan, all_zeros_mac,
1680 vxlan->default_dst.remote_ip,
1681 NUD_REACHABLE|NUD_PERMANENT,
1682 NLM_F_EXCL|NLM_F_CREATE,
1683 vxlan->dst_port, vxlan->default_dst.remote_vni,
1684 vxlan->default_dst.remote_ifindex, NTF_SELF);
1688 err = register_netdevice(dev);
1690 vxlan_fdb_delete_defualt(vxlan);
1694 list_add(&vxlan->next, &vn->vxlan_list);
1699 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
1701 struct vxlan_dev *vxlan = netdev_priv(dev);
1703 hlist_del_rcu(&vxlan->hlist);
1704 list_del(&vxlan->next);
1705 unregister_netdevice_queue(dev, head);
1708 static size_t vxlan_get_size(const struct net_device *dev)
1711 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
1712 nla_total_size(sizeof(__be32)) +/* IFLA_VXLAN_GROUP */
1713 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
1714 nla_total_size(sizeof(__be32))+ /* IFLA_VXLAN_LOCAL */
1715 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
1716 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
1717 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
1718 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
1719 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
1720 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
1721 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
1722 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
1723 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
1724 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
1725 nla_total_size(sizeof(__be16))+ /* IFLA_VXLAN_PORT */
1729 static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1731 const struct vxlan_dev *vxlan = netdev_priv(dev);
1732 const struct vxlan_rdst *dst = &vxlan->default_dst;
1733 struct ifla_vxlan_port_range ports = {
1734 .low = htons(vxlan->port_min),
1735 .high = htons(vxlan->port_max),
1738 if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni))
1739 goto nla_put_failure;
1741 if (dst->remote_ip && nla_put_be32(skb, IFLA_VXLAN_GROUP, dst->remote_ip))
1742 goto nla_put_failure;
1744 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
1745 goto nla_put_failure;
1747 if (vxlan->saddr && nla_put_be32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr))
1748 goto nla_put_failure;
1750 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
1751 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
1752 nla_put_u8(skb, IFLA_VXLAN_LEARNING,
1753 !!(vxlan->flags & VXLAN_F_LEARN)) ||
1754 nla_put_u8(skb, IFLA_VXLAN_PROXY,
1755 !!(vxlan->flags & VXLAN_F_PROXY)) ||
1756 nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) ||
1757 nla_put_u8(skb, IFLA_VXLAN_L2MISS,
1758 !!(vxlan->flags & VXLAN_F_L2MISS)) ||
1759 nla_put_u8(skb, IFLA_VXLAN_L3MISS,
1760 !!(vxlan->flags & VXLAN_F_L3MISS)) ||
1761 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
1762 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) ||
1763 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port))
1764 goto nla_put_failure;
1766 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
1767 goto nla_put_failure;
1775 static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
1777 .maxtype = IFLA_VXLAN_MAX,
1778 .policy = vxlan_policy,
1779 .priv_size = sizeof(struct vxlan_dev),
1780 .setup = vxlan_setup,
1781 .validate = vxlan_validate,
1782 .newlink = vxlan_newlink,
1783 .dellink = vxlan_dellink,
1784 .get_size = vxlan_get_size,
1785 .fill_info = vxlan_fill_info,
1788 static __net_init int vxlan_init_net(struct net *net)
1790 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1793 INIT_LIST_HEAD(&vn->vxlan_list);
1794 spin_lock_init(&vn->sock_lock);
1796 for (h = 0; h < PORT_HASH_SIZE; ++h)
1797 INIT_HLIST_HEAD(&vn->sock_list[h]);
1802 static __net_exit void vxlan_exit_net(struct net *net)
1804 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1805 struct vxlan_dev *vxlan;
1808 list_for_each_entry(vxlan, &vn->vxlan_list, next)
1809 dev_close(vxlan->dev);
1813 static struct pernet_operations vxlan_net_ops = {
1814 .init = vxlan_init_net,
1815 .exit = vxlan_exit_net,
1816 .id = &vxlan_net_id,
1817 .size = sizeof(struct vxlan_net),
1820 static int __init vxlan_init_module(void)
1824 vxlan_wq = alloc_workqueue("vxlan", 0, 0);
1828 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
1830 rc = register_pernet_device(&vxlan_net_ops);
1834 rc = rtnl_link_register(&vxlan_link_ops);
1841 unregister_pernet_device(&vxlan_net_ops);
1843 destroy_workqueue(vxlan_wq);
1846 late_initcall(vxlan_init_module);
1848 static void __exit vxlan_cleanup_module(void)
1850 unregister_pernet_device(&vxlan_net_ops);
1851 rtnl_link_unregister(&vxlan_link_ops);
1852 destroy_workqueue(vxlan_wq);
1855 module_exit(vxlan_cleanup_module);
1857 MODULE_LICENSE("GPL");
1858 MODULE_VERSION(VXLAN_VERSION);
1859 MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
1860 MODULE_ALIAS_RTNL_LINK("vxlan");