2 * VXLAN: Virtual eXtensiable Local Area Network
4 * Copyright (c) 2012 Vyatta Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 * - use IANA UDP port number (when defined)
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/module.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/skbuff.h>
23 #include <linux/rculist.h>
24 #include <linux/netdevice.h>
27 #include <linux/udp.h>
28 #include <linux/igmp.h>
29 #include <linux/etherdevice.h>
30 #include <linux/if_ether.h>
31 #include <linux/hash.h>
35 #include <net/rtnetlink.h>
36 #include <net/route.h>
37 #include <net/dsfield.h>
38 #include <net/inet_ecn.h>
39 #include <net/net_namespace.h>
40 #include <net/netns/generic.h>
42 #define VXLAN_VERSION "0.1"
44 #define VNI_HASH_BITS 10
45 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
46 #define FDB_HASH_BITS 8
47 #define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
48 #define FDB_AGE_DEFAULT 300 /* 5 min */
49 #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
51 #define VXLAN_N_VID (1u << 24)
52 #define VXLAN_VID_MASK (VXLAN_N_VID - 1)
53 /* VLAN + IP header + UDP + VXLAN */
54 #define VXLAN_HEADROOM (4 + 20 + 8 + 8)
56 #define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
58 /* VXLAN protocol header */
64 /* UDP port for VXLAN traffic. */
65 static unsigned int vxlan_port __read_mostly = 8472;
66 module_param_named(udp_port, vxlan_port, uint, 0444);
67 MODULE_PARM_DESC(udp_port, "Destination UDP port");
69 static bool log_ecn_error = true;
70 module_param(log_ecn_error, bool, 0644);
71 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
73 /* per-net private data for this module */
74 static unsigned int vxlan_net_id;
76 struct socket *sock; /* UDP encap socket */
77 struct hlist_head vni_list[VNI_HASH_SIZE];
80 /* Forwarding table entry */
82 struct hlist_node hlist; /* linked list of entries */
84 unsigned long updated; /* jiffies */
87 u16 state; /* see ndm_state */
88 u8 eth_addr[ETH_ALEN];
91 /* Per-cpu network traffic stats */
97 struct u64_stats_sync syncp;
100 /* Pseudo network device */
102 struct hlist_node hlist;
103 struct net_device *dev;
104 struct vxlan_stats __percpu *stats;
105 __u32 vni; /* virtual network id */
106 __be32 gaddr; /* multicast group */
107 __be32 saddr; /* source address */
108 unsigned int link; /* link to multicast over */
109 __u16 port_min; /* source port range */
111 __u8 tos; /* TOS override */
115 unsigned long age_interval;
116 struct timer_list age_timer;
117 spinlock_t hash_lock;
118 unsigned int addrcnt;
119 unsigned int addrmax;
120 unsigned int addrexceeded;
122 struct hlist_head fdb_head[FDB_HASH_SIZE];
125 /* salt for hash table */
126 static u32 vxlan_salt __read_mostly;
128 static inline struct hlist_head *vni_head(struct net *net, u32 id)
130 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
132 return &vn->vni_list[hash_32(id, VNI_HASH_BITS)];
135 /* Look up VNI in a per net namespace table */
136 static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id)
138 struct vxlan_dev *vxlan;
139 struct hlist_node *node;
141 hlist_for_each_entry_rcu(vxlan, node, vni_head(net, id), hlist) {
142 if (vxlan->vni == id)
149 /* Fill in neighbour message in skbuff. */
150 static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
151 const struct vxlan_fdb *fdb,
152 u32 portid, u32 seq, int type, unsigned int flags)
154 unsigned long now = jiffies;
155 struct nda_cacheinfo ci;
156 struct nlmsghdr *nlh;
159 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
163 ndm = nlmsg_data(nlh);
164 memset(ndm, 0, sizeof(*ndm));
165 ndm->ndm_family = AF_BRIDGE;
166 ndm->ndm_state = fdb->state;
167 ndm->ndm_ifindex = vxlan->dev->ifindex;
168 ndm->ndm_flags = NTF_SELF;
169 ndm->ndm_type = NDA_DST;
171 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
172 goto nla_put_failure;
174 if (nla_put_be32(skb, NDA_DST, fdb->remote_ip))
175 goto nla_put_failure;
177 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
178 ci.ndm_confirmed = 0;
179 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
182 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
183 goto nla_put_failure;
185 return nlmsg_end(skb, nlh);
188 nlmsg_cancel(skb, nlh);
192 static inline size_t vxlan_nlmsg_size(void)
194 return NLMSG_ALIGN(sizeof(struct ndmsg))
195 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
196 + nla_total_size(sizeof(__be32)) /* NDA_DST */
197 + nla_total_size(sizeof(struct nda_cacheinfo));
200 static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
201 const struct vxlan_fdb *fdb, int type)
203 struct net *net = dev_net(vxlan->dev);
207 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
211 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0);
213 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
214 WARN_ON(err == -EMSGSIZE);
219 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
223 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
226 /* Hash Ethernet address */
227 static u32 eth_hash(const unsigned char *addr)
229 u64 value = get_unaligned((u64 *)addr);
231 /* only want 6 bytes */
237 return hash_64(value, FDB_HASH_BITS);
240 /* Hash chain to use given mac address */
241 static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
244 return &vxlan->fdb_head[eth_hash(mac)];
247 /* Look up Ethernet address in forwarding table */
248 static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
252 struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
254 struct hlist_node *node;
256 hlist_for_each_entry_rcu(f, node, head, hlist) {
257 if (compare_ether_addr(mac, f->eth_addr) == 0)
264 /* Add new entry to forwarding table -- assumes lock held */
265 static int vxlan_fdb_create(struct vxlan_dev *vxlan,
266 const u8 *mac, __be32 ip,
267 __u16 state, __u16 flags)
272 f = vxlan_find_mac(vxlan, mac);
274 if (flags & NLM_F_EXCL) {
275 netdev_dbg(vxlan->dev,
276 "lost race to create %pM\n", mac);
279 if (f->state != state) {
281 f->updated = jiffies;
285 if (!(flags & NLM_F_CREATE))
288 if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
291 netdev_dbg(vxlan->dev, "add %pM -> %pI4\n", mac, &ip);
292 f = kmalloc(sizeof(*f), GFP_ATOMIC);
299 f->updated = f->used = jiffies;
300 memcpy(f->eth_addr, mac, ETH_ALEN);
303 hlist_add_head_rcu(&f->hlist,
304 vxlan_fdb_head(vxlan, mac));
308 vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
313 static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
315 netdev_dbg(vxlan->dev,
316 "delete %pM\n", f->eth_addr);
319 vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH);
321 hlist_del_rcu(&f->hlist);
325 /* Add static entry (via netlink) */
326 static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
327 struct net_device *dev,
328 const unsigned char *addr, u16 flags)
330 struct vxlan_dev *vxlan = netdev_priv(dev);
334 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
335 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
340 if (tb[NDA_DST] == NULL)
343 if (nla_len(tb[NDA_DST]) != sizeof(__be32))
344 return -EAFNOSUPPORT;
346 ip = nla_get_be32(tb[NDA_DST]);
348 spin_lock_bh(&vxlan->hash_lock);
349 err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags);
350 spin_unlock_bh(&vxlan->hash_lock);
355 /* Delete entry (via netlink) */
356 static int vxlan_fdb_delete(struct ndmsg *ndm, struct net_device *dev,
357 const unsigned char *addr)
359 struct vxlan_dev *vxlan = netdev_priv(dev);
363 spin_lock_bh(&vxlan->hash_lock);
364 f = vxlan_find_mac(vxlan, addr);
366 vxlan_fdb_destroy(vxlan, f);
369 spin_unlock_bh(&vxlan->hash_lock);
374 /* Dump forwarding table */
375 static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
376 struct net_device *dev, int idx)
378 struct vxlan_dev *vxlan = netdev_priv(dev);
381 for (h = 0; h < FDB_HASH_SIZE; ++h) {
383 struct hlist_node *n;
386 hlist_for_each_entry_rcu(f, n, &vxlan->fdb_head[h], hlist) {
387 if (idx < cb->args[0])
390 err = vxlan_fdb_info(skb, vxlan, f,
391 NETLINK_CB(cb->skb).portid,
405 /* Watch incoming packets to learn mapping between Ethernet address
406 * and Tunnel endpoint.
408 static void vxlan_snoop(struct net_device *dev,
409 __be32 src_ip, const u8 *src_mac)
411 struct vxlan_dev *vxlan = netdev_priv(dev);
415 f = vxlan_find_mac(vxlan, src_mac);
418 if (likely(f->remote_ip == src_ip))
423 "%pM migrated from %pI4 to %pI4\n",
424 src_mac, &f->remote_ip, &src_ip);
426 f->remote_ip = src_ip;
427 f->updated = jiffies;
429 /* learned new entry */
430 spin_lock(&vxlan->hash_lock);
431 err = vxlan_fdb_create(vxlan, src_mac, src_ip,
433 NLM_F_EXCL|NLM_F_CREATE);
434 spin_unlock(&vxlan->hash_lock);
439 /* See if multicast group is already in use by other ID */
440 static bool vxlan_group_used(struct vxlan_net *vn,
441 const struct vxlan_dev *this)
443 const struct vxlan_dev *vxlan;
444 struct hlist_node *node;
447 for (h = 0; h < VNI_HASH_SIZE; ++h)
448 hlist_for_each_entry(vxlan, node, &vn->vni_list[h], hlist) {
452 if (!netif_running(vxlan->dev))
455 if (vxlan->gaddr == this->gaddr)
462 /* kernel equivalent to IP_ADD_MEMBERSHIP */
463 static int vxlan_join_group(struct net_device *dev)
465 struct vxlan_dev *vxlan = netdev_priv(dev);
466 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
467 struct sock *sk = vn->sock->sk;
468 struct ip_mreqn mreq = {
469 .imr_multiaddr.s_addr = vxlan->gaddr,
473 /* Already a member of group */
474 if (vxlan_group_used(vn, vxlan))
477 /* Need to drop RTNL to call multicast join */
480 err = ip_mc_join_group(sk, &mreq);
488 /* kernel equivalent to IP_DROP_MEMBERSHIP */
489 static int vxlan_leave_group(struct net_device *dev)
491 struct vxlan_dev *vxlan = netdev_priv(dev);
492 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
494 struct sock *sk = vn->sock->sk;
495 struct ip_mreqn mreq = {
496 .imr_multiaddr.s_addr = vxlan->gaddr,
499 /* Only leave group when last vxlan is done. */
500 if (vxlan_group_used(vn, vxlan))
503 /* Need to drop RTNL to call multicast leave */
506 err = ip_mc_leave_group(sk, &mreq);
513 /* Callback from net/ipv4/udp.c to receive packets */
514 static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
517 struct vxlanhdr *vxh;
518 struct vxlan_dev *vxlan;
519 struct vxlan_stats *stats;
523 /* pop off outer UDP header */
524 __skb_pull(skb, sizeof(struct udphdr));
526 /* Need Vxlan and inner Ethernet header to be present */
527 if (!pskb_may_pull(skb, sizeof(struct vxlanhdr)))
530 /* Drop packets with reserved bits set */
531 vxh = (struct vxlanhdr *) skb->data;
532 if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
533 (vxh->vx_vni & htonl(0xff))) {
534 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
535 ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
539 __skb_pull(skb, sizeof(struct vxlanhdr));
540 skb_postpull_rcsum(skb, eth_hdr(skb), sizeof(struct vxlanhdr));
542 /* Is this VNI defined? */
543 vni = ntohl(vxh->vx_vni) >> 8;
544 vxlan = vxlan_find_vni(sock_net(sk), vni);
546 netdev_dbg(skb->dev, "unknown vni %d\n", vni);
550 if (!pskb_may_pull(skb, ETH_HLEN)) {
551 vxlan->dev->stats.rx_length_errors++;
552 vxlan->dev->stats.rx_errors++;
556 /* Re-examine inner Ethernet packet */
558 skb->protocol = eth_type_trans(skb, vxlan->dev);
559 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
561 /* Ignore packet loops (and multicast echo) */
562 if (compare_ether_addr(eth_hdr(skb)->h_source,
563 vxlan->dev->dev_addr) == 0)
567 vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source);
569 __skb_tunnel_rx(skb, vxlan->dev);
570 skb_reset_network_header(skb);
572 err = IP_ECN_decapsulate(oip, skb);
575 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
576 &oip->saddr, oip->tos);
578 ++vxlan->dev->stats.rx_frame_errors;
579 ++vxlan->dev->stats.rx_errors;
584 stats = this_cpu_ptr(vxlan->stats);
585 u64_stats_update_begin(&stats->syncp);
587 stats->rx_bytes += skb->len;
588 u64_stats_update_end(&stats->syncp);
594 /* Put UDP header back */
595 __skb_push(skb, sizeof(struct udphdr));
599 /* Consume bad packet */
604 /* Extract dsfield from inner protocol */
605 static inline u8 vxlan_get_dsfield(const struct iphdr *iph,
606 const struct sk_buff *skb)
608 if (skb->protocol == htons(ETH_P_IP))
610 else if (skb->protocol == htons(ETH_P_IPV6))
611 return ipv6_get_dsfield((const struct ipv6hdr *)iph);
616 /* Propogate ECN bits out */
617 static inline u8 vxlan_ecn_encap(u8 tos,
618 const struct iphdr *iph,
619 const struct sk_buff *skb)
621 u8 inner = vxlan_get_dsfield(iph, skb);
623 return INET_ECN_encapsulate(tos, inner);
626 static __be32 vxlan_find_dst(struct vxlan_dev *vxlan, struct sk_buff *skb)
628 const struct ethhdr *eth = (struct ethhdr *) skb->data;
629 const struct vxlan_fdb *f;
631 if (is_multicast_ether_addr(eth->h_dest))
634 f = vxlan_find_mac(vxlan, eth->h_dest);
642 static void vxlan_sock_free(struct sk_buff *skb)
647 /* On transmit, associate with the tunnel socket */
648 static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb)
650 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
651 struct sock *sk = vn->sock->sk;
656 skb->destructor = vxlan_sock_free;
659 /* Compute source port for outgoing packet
660 * first choice to use L4 flow hash since it will spread
661 * better and maybe available from hardware
662 * secondary choice is to use jhash on the Ethernet header
664 static u16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb)
666 unsigned int range = (vxlan->port_max - vxlan->port_min) + 1;
669 hash = skb_get_rxhash(skb);
671 hash = jhash(skb->data, 2 * ETH_ALEN,
672 (__force u32) skb->protocol);
674 return (((u64) hash * range) >> 32) + vxlan->port_min;
677 /* Transmit local packets over Vxlan
679 * Outer IP header inherits ECN and DF from inner header.
680 * Outer UDP destination is the VXLAN assigned port.
681 * source port is based on hash of flow
683 static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
685 struct vxlan_dev *vxlan = netdev_priv(dev);
687 const struct iphdr *old_iph;
689 struct vxlanhdr *vxh;
692 unsigned int pkt_len = skb->len;
699 dst = vxlan_find_dst(vxlan, skb);
703 /* Need space for new headers (invalidates iph ptr) */
704 if (skb_cow_head(skb, VXLAN_HEADROOM))
707 old_iph = ip_hdr(skb);
710 if (!ttl && IN_MULTICAST(ntohl(dst)))
715 tos = vxlan_get_dsfield(old_iph, skb);
717 src_port = vxlan_src_port(vxlan, skb);
719 memset(&fl4, 0, sizeof(fl4));
720 fl4.flowi4_oif = vxlan->link;
721 fl4.flowi4_tos = RT_TOS(tos);
723 fl4.saddr = vxlan->saddr;
725 rt = ip_route_output_key(dev_net(dev), &fl4);
727 netdev_dbg(dev, "no route to %pI4\n", &dst);
728 dev->stats.tx_carrier_errors++;
732 if (rt->dst.dev == dev) {
733 netdev_dbg(dev, "circular route to %pI4\n", &dst);
735 dev->stats.collisions++;
739 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
740 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
743 skb_dst_set(skb, &rt->dst);
745 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
746 vxh->vx_flags = htonl(VXLAN_FLAGS);
747 vxh->vx_vni = htonl(vxlan->vni << 8);
749 __skb_push(skb, sizeof(*uh));
750 skb_reset_transport_header(skb);
753 uh->dest = htons(vxlan_port);
754 uh->source = htons(src_port);
756 uh->len = htons(skb->len);
759 __skb_push(skb, sizeof(*iph));
760 skb_reset_network_header(skb);
763 iph->ihl = sizeof(struct iphdr) >> 2;
765 iph->protocol = IPPROTO_UDP;
766 iph->tos = vxlan_ecn_encap(tos, old_iph, skb);
768 iph->saddr = fl4.saddr;
769 iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
771 vxlan_set_owner(dev, skb);
773 /* See __IPTUNNEL_XMIT */
774 skb->ip_summed = CHECKSUM_NONE;
775 ip_select_ident(iph, &rt->dst, NULL);
777 err = ip_local_out(skb);
778 if (likely(net_xmit_eval(err) == 0)) {
779 struct vxlan_stats *stats = this_cpu_ptr(vxlan->stats);
781 u64_stats_update_begin(&stats->syncp);
783 stats->tx_bytes += pkt_len;
784 u64_stats_update_end(&stats->syncp);
786 dev->stats.tx_errors++;
787 dev->stats.tx_aborted_errors++;
792 dev->stats.tx_dropped++;
796 dev->stats.tx_errors++;
802 /* Walk the forwarding table and purge stale entries */
803 static void vxlan_cleanup(unsigned long arg)
805 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
806 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
809 if (!netif_running(vxlan->dev))
812 spin_lock_bh(&vxlan->hash_lock);
813 for (h = 0; h < FDB_HASH_SIZE; ++h) {
814 struct hlist_node *p, *n;
815 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
817 = container_of(p, struct vxlan_fdb, hlist);
818 unsigned long timeout;
820 if (f->state == NUD_PERMANENT)
823 timeout = f->used + vxlan->age_interval * HZ;
824 if (time_before_eq(timeout, jiffies)) {
825 netdev_dbg(vxlan->dev,
826 "garbage collect %pM\n",
828 f->state = NUD_STALE;
829 vxlan_fdb_destroy(vxlan, f);
830 } else if (time_before(timeout, next_timer))
831 next_timer = timeout;
834 spin_unlock_bh(&vxlan->hash_lock);
836 mod_timer(&vxlan->age_timer, next_timer);
839 /* Setup stats when device is created */
840 static int vxlan_init(struct net_device *dev)
842 struct vxlan_dev *vxlan = netdev_priv(dev);
844 vxlan->stats = alloc_percpu(struct vxlan_stats);
851 /* Start ageing timer and join group when device is brought up */
852 static int vxlan_open(struct net_device *dev)
854 struct vxlan_dev *vxlan = netdev_priv(dev);
858 err = vxlan_join_group(dev);
863 if (vxlan->age_interval)
864 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
869 /* Purge the forwarding table */
870 static void vxlan_flush(struct vxlan_dev *vxlan)
874 spin_lock_bh(&vxlan->hash_lock);
875 for (h = 0; h < FDB_HASH_SIZE; ++h) {
876 struct hlist_node *p, *n;
877 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
879 = container_of(p, struct vxlan_fdb, hlist);
880 vxlan_fdb_destroy(vxlan, f);
883 spin_unlock_bh(&vxlan->hash_lock);
886 /* Cleanup timer and forwarding table on shutdown */
887 static int vxlan_stop(struct net_device *dev)
889 struct vxlan_dev *vxlan = netdev_priv(dev);
892 vxlan_leave_group(dev);
894 del_timer_sync(&vxlan->age_timer);
901 /* Merge per-cpu statistics */
902 static struct rtnl_link_stats64 *vxlan_stats64(struct net_device *dev,
903 struct rtnl_link_stats64 *stats)
905 struct vxlan_dev *vxlan = netdev_priv(dev);
906 struct vxlan_stats tmp, sum = { 0 };
909 for_each_possible_cpu(cpu) {
911 const struct vxlan_stats *stats
912 = per_cpu_ptr(vxlan->stats, cpu);
915 start = u64_stats_fetch_begin_bh(&stats->syncp);
916 memcpy(&tmp, stats, sizeof(tmp));
917 } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
919 sum.tx_bytes += tmp.tx_bytes;
920 sum.tx_packets += tmp.tx_packets;
921 sum.rx_bytes += tmp.rx_bytes;
922 sum.rx_packets += tmp.rx_packets;
925 stats->tx_bytes = sum.tx_bytes;
926 stats->tx_packets = sum.tx_packets;
927 stats->rx_bytes = sum.rx_bytes;
928 stats->rx_packets = sum.rx_packets;
930 stats->multicast = dev->stats.multicast;
931 stats->rx_length_errors = dev->stats.rx_length_errors;
932 stats->rx_frame_errors = dev->stats.rx_frame_errors;
933 stats->rx_errors = dev->stats.rx_errors;
935 stats->tx_dropped = dev->stats.tx_dropped;
936 stats->tx_carrier_errors = dev->stats.tx_carrier_errors;
937 stats->tx_aborted_errors = dev->stats.tx_aborted_errors;
938 stats->collisions = dev->stats.collisions;
939 stats->tx_errors = dev->stats.tx_errors;
944 /* Stub, nothing needs to be done. */
945 static void vxlan_set_multicast_list(struct net_device *dev)
949 static const struct net_device_ops vxlan_netdev_ops = {
950 .ndo_init = vxlan_init,
951 .ndo_open = vxlan_open,
952 .ndo_stop = vxlan_stop,
953 .ndo_start_xmit = vxlan_xmit,
954 .ndo_get_stats64 = vxlan_stats64,
955 .ndo_set_rx_mode = vxlan_set_multicast_list,
956 .ndo_change_mtu = eth_change_mtu,
957 .ndo_validate_addr = eth_validate_addr,
958 .ndo_set_mac_address = eth_mac_addr,
959 .ndo_fdb_add = vxlan_fdb_add,
960 .ndo_fdb_del = vxlan_fdb_delete,
961 .ndo_fdb_dump = vxlan_fdb_dump,
964 /* Info for udev, that this is a virtual tunnel endpoint */
965 static struct device_type vxlan_type = {
969 static void vxlan_free(struct net_device *dev)
971 struct vxlan_dev *vxlan = netdev_priv(dev);
973 free_percpu(vxlan->stats);
977 /* Initialize the device structure. */
978 static void vxlan_setup(struct net_device *dev)
980 struct vxlan_dev *vxlan = netdev_priv(dev);
984 eth_hw_addr_random(dev);
986 dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM;
988 dev->netdev_ops = &vxlan_netdev_ops;
989 dev->destructor = vxlan_free;
990 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
992 dev->tx_queue_len = 0;
993 dev->features |= NETIF_F_LLTX;
994 dev->features |= NETIF_F_NETNS_LOCAL;
995 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
997 spin_lock_init(&vxlan->hash_lock);
999 init_timer_deferrable(&vxlan->age_timer);
1000 vxlan->age_timer.function = vxlan_cleanup;
1001 vxlan->age_timer.data = (unsigned long) vxlan;
1003 inet_get_local_port_range(&low, &high);
1004 vxlan->port_min = low;
1005 vxlan->port_max = high;
1009 for (h = 0; h < FDB_HASH_SIZE; ++h)
1010 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
1013 static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
1014 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
1015 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1016 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
1017 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1018 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
1019 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
1020 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
1021 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
1022 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
1023 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
1026 static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
1028 if (tb[IFLA_ADDRESS]) {
1029 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
1030 pr_debug("invalid link address (not ethernet)\n");
1034 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
1035 pr_debug("invalid all zero ethernet address\n");
1036 return -EADDRNOTAVAIL;
1043 if (data[IFLA_VXLAN_ID]) {
1044 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
1045 if (id >= VXLAN_VID_MASK)
1049 if (data[IFLA_VXLAN_GROUP]) {
1050 __be32 gaddr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
1051 if (!IN_MULTICAST(ntohl(gaddr))) {
1052 pr_debug("group address is not IPv4 multicast\n");
1053 return -EADDRNOTAVAIL;
1057 if (data[IFLA_VXLAN_PORT_RANGE]) {
1058 const struct ifla_vxlan_port_range *p
1059 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
1061 if (ntohs(p->high) < ntohs(p->low)) {
1062 pr_debug("port range %u .. %u not valid\n",
1063 ntohs(p->low), ntohs(p->high));
1071 static int vxlan_newlink(struct net *net, struct net_device *dev,
1072 struct nlattr *tb[], struct nlattr *data[])
1074 struct vxlan_dev *vxlan = netdev_priv(dev);
1078 if (!data[IFLA_VXLAN_ID])
1081 vni = nla_get_u32(data[IFLA_VXLAN_ID]);
1082 if (vxlan_find_vni(net, vni)) {
1083 pr_info("duplicate VNI %u\n", vni);
1088 if (data[IFLA_VXLAN_GROUP])
1089 vxlan->gaddr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
1091 if (data[IFLA_VXLAN_LOCAL])
1092 vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
1094 if (data[IFLA_VXLAN_LINK]) {
1095 vxlan->link = nla_get_u32(data[IFLA_VXLAN_LINK]);
1097 if (!tb[IFLA_MTU]) {
1098 struct net_device *lowerdev;
1099 lowerdev = __dev_get_by_index(net, vxlan->link);
1100 dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
1104 if (data[IFLA_VXLAN_TOS])
1105 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
1107 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
1108 vxlan->learn = true;
1110 if (data[IFLA_VXLAN_AGEING])
1111 vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
1113 vxlan->age_interval = FDB_AGE_DEFAULT;
1115 if (data[IFLA_VXLAN_LIMIT])
1116 vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
1118 if (data[IFLA_VXLAN_PORT_RANGE]) {
1119 const struct ifla_vxlan_port_range *p
1120 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
1121 vxlan->port_min = ntohs(p->low);
1122 vxlan->port_max = ntohs(p->high);
1125 err = register_netdevice(dev);
1127 hlist_add_head_rcu(&vxlan->hlist, vni_head(net, vxlan->vni));
1132 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
1134 struct vxlan_dev *vxlan = netdev_priv(dev);
1136 hlist_del_rcu(&vxlan->hlist);
1138 unregister_netdevice_queue(dev, head);
1141 static size_t vxlan_get_size(const struct net_device *dev)
1144 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
1145 nla_total_size(sizeof(__be32)) +/* IFLA_VXLAN_GROUP */
1146 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
1147 nla_total_size(sizeof(__be32))+ /* IFLA_VXLAN_LOCAL */
1148 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
1149 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
1150 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
1151 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
1152 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
1153 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
1157 static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1159 const struct vxlan_dev *vxlan = netdev_priv(dev);
1160 struct ifla_vxlan_port_range ports = {
1161 .low = htons(vxlan->port_min),
1162 .high = htons(vxlan->port_max),
1165 if (nla_put_u32(skb, IFLA_VXLAN_ID, vxlan->vni))
1166 goto nla_put_failure;
1168 if (vxlan->gaddr && nla_put_be32(skb, IFLA_VXLAN_GROUP, vxlan->gaddr))
1169 goto nla_put_failure;
1171 if (vxlan->link && nla_put_u32(skb, IFLA_VXLAN_LINK, vxlan->link))
1172 goto nla_put_failure;
1174 if (vxlan->saddr && nla_put_be32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr))
1175 goto nla_put_failure;
1177 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
1178 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
1179 nla_put_u8(skb, IFLA_VXLAN_LEARNING, vxlan->learn) ||
1180 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
1181 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax))
1182 goto nla_put_failure;
1184 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
1185 goto nla_put_failure;
1193 static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
1195 .maxtype = IFLA_VXLAN_MAX,
1196 .policy = vxlan_policy,
1197 .priv_size = sizeof(struct vxlan_dev),
1198 .setup = vxlan_setup,
1199 .validate = vxlan_validate,
1200 .newlink = vxlan_newlink,
1201 .dellink = vxlan_dellink,
1202 .get_size = vxlan_get_size,
1203 .fill_info = vxlan_fill_info,
1206 static __net_init int vxlan_init_net(struct net *net)
1208 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1210 struct sockaddr_in vxlan_addr = {
1211 .sin_family = AF_INET,
1212 .sin_addr.s_addr = htonl(INADDR_ANY),
1217 /* Create UDP socket for encapsulation receive. */
1218 rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vn->sock);
1220 pr_debug("UDP socket create failed\n");
1223 /* Put in proper namespace */
1225 sk_change_net(sk, net);
1227 vxlan_addr.sin_port = htons(vxlan_port);
1229 rc = kernel_bind(vn->sock, (struct sockaddr *) &vxlan_addr,
1230 sizeof(vxlan_addr));
1232 pr_debug("bind for UDP socket %pI4:%u (%d)\n",
1233 &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
1234 sk_release_kernel(sk);
1239 /* Disable multicast loopback */
1240 inet_sk(sk)->mc_loop = 0;
1242 /* Mark socket as an encapsulation socket. */
1243 udp_sk(sk)->encap_type = 1;
1244 udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
1247 for (h = 0; h < VNI_HASH_SIZE; ++h)
1248 INIT_HLIST_HEAD(&vn->vni_list[h]);
1253 static __net_exit void vxlan_exit_net(struct net *net)
1255 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1258 sk_release_kernel(vn->sock->sk);
1263 static struct pernet_operations vxlan_net_ops = {
1264 .init = vxlan_init_net,
1265 .exit = vxlan_exit_net,
1266 .id = &vxlan_net_id,
1267 .size = sizeof(struct vxlan_net),
1270 static int __init vxlan_init_module(void)
1274 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
1276 rc = register_pernet_device(&vxlan_net_ops);
1280 rc = rtnl_link_register(&vxlan_link_ops);
1287 unregister_pernet_device(&vxlan_net_ops);
1291 module_init(vxlan_init_module);
1293 static void __exit vxlan_cleanup_module(void)
1295 rtnl_link_unregister(&vxlan_link_ops);
1296 unregister_pernet_device(&vxlan_net_ops);
1298 module_exit(vxlan_cleanup_module);
1300 MODULE_LICENSE("GPL");
1301 MODULE_VERSION(VXLAN_VERSION);
1302 MODULE_AUTHOR("Stephen Hemminger <shemminger@vyatta.com>");
1303 MODULE_ALIAS_RTNL_LINK("vxlan");