2 * VXLAN: Virtual eXtensible Local Area Network
4 * Copyright (c) 2012-2013 Vyatta Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/errno.h>
16 #include <linux/slab.h>
17 #include <linux/udp.h>
18 #include <linux/igmp.h>
19 #include <linux/if_ether.h>
20 #include <linux/ethtool.h>
22 #include <net/ndisc.h>
25 #include <net/rtnetlink.h>
26 #include <net/inet_ecn.h>
27 #include <net/net_namespace.h>
28 #include <net/netns/generic.h>
29 #include <net/tun_proto.h>
30 #include <net/vxlan.h>
32 #if IS_ENABLED(CONFIG_IPV6)
33 #include <net/ip6_tunnel.h>
34 #include <net/ip6_checksum.h>
37 #define VXLAN_VERSION "0.1"
39 #define PORT_HASH_BITS 8
40 #define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
41 #define FDB_AGE_DEFAULT 300 /* 5 min */
42 #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
44 /* UDP port for VXLAN traffic.
45 * The IANA assigned port is 4789, but the Linux default is 8472
46 * for compatibility with early adopters.
48 static unsigned short vxlan_port __read_mostly = 8472;
49 module_param_named(udp_port, vxlan_port, ushort, 0444);
50 MODULE_PARM_DESC(udp_port, "Destination UDP port");
52 static bool log_ecn_error = true;
53 module_param(log_ecn_error, bool, 0644);
54 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
56 static unsigned int vxlan_net_id;
57 static struct rtnl_link_ops vxlan_link_ops;
59 static const u8 all_zeros_mac[ETH_ALEN + 2];
61 static int vxlan_sock_add(struct vxlan_dev *vxlan);
63 static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
65 /* per-network namespace private data for this module */
67 struct list_head vxlan_list;
68 struct hlist_head sock_list[PORT_HASH_SIZE];
72 /* Forwarding table entry */
74 struct hlist_node hlist; /* linked list of entries */
76 unsigned long updated; /* jiffies */
78 struct list_head remotes;
79 u8 eth_addr[ETH_ALEN];
80 u16 state; /* see ndm_state */
82 u8 flags; /* see ndm_flags */
85 /* salt for hash table */
86 static u32 vxlan_salt __read_mostly;
88 static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
90 return vs->flags & VXLAN_F_COLLECT_METADATA ||
91 ip_tunnel_collect_metadata();
94 #if IS_ENABLED(CONFIG_IPV6)
96 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
98 if (a->sa.sa_family != b->sa.sa_family)
100 if (a->sa.sa_family == AF_INET6)
101 return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
103 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
106 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
108 if (ipa->sa.sa_family == AF_INET6)
109 return ipv6_addr_any(&ipa->sin6.sin6_addr);
111 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
114 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
116 if (ipa->sa.sa_family == AF_INET6)
117 return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
119 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
122 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
124 if (nla_len(nla) >= sizeof(struct in6_addr)) {
125 ip->sin6.sin6_addr = nla_get_in6_addr(nla);
126 ip->sa.sa_family = AF_INET6;
128 } else if (nla_len(nla) >= sizeof(__be32)) {
129 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
130 ip->sa.sa_family = AF_INET;
133 return -EAFNOSUPPORT;
137 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
138 const union vxlan_addr *ip)
140 if (ip->sa.sa_family == AF_INET6)
141 return nla_put_in6_addr(skb, attr, &ip->sin6.sin6_addr);
143 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
146 #else /* !CONFIG_IPV6 */
149 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
151 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
154 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
156 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
159 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
161 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
164 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
166 if (nla_len(nla) >= sizeof(struct in6_addr)) {
167 return -EAFNOSUPPORT;
168 } else if (nla_len(nla) >= sizeof(__be32)) {
169 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
170 ip->sa.sa_family = AF_INET;
173 return -EAFNOSUPPORT;
177 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
178 const union vxlan_addr *ip)
180 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
184 /* Virtual Network hash table head */
185 static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni)
187 return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)];
190 /* Socket hash table head */
191 static inline struct hlist_head *vs_head(struct net *net, __be16 port)
193 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
195 return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
198 /* First remote destination for a forwarding entry.
199 * Guaranteed to be non-NULL because remotes are never deleted.
201 static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
203 return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
206 static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
208 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
211 /* Find VXLAN socket based on network namespace, address family and UDP port
212 * and enabled unshareable flags.
214 static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
215 __be16 port, u32 flags)
217 struct vxlan_sock *vs;
219 flags &= VXLAN_F_RCV_FLAGS;
221 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
222 if (inet_sk(vs->sock->sk)->inet_sport == port &&
223 vxlan_get_sk_family(vs) == family &&
230 static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, int ifindex,
233 struct vxlan_dev_node *node;
235 /* For flow based devices, map all packets to VNI 0 */
236 if (vs->flags & VXLAN_F_COLLECT_METADATA)
239 hlist_for_each_entry_rcu(node, vni_head(vs, vni), hlist) {
240 if (node->vxlan->default_dst.remote_vni != vni)
243 if (IS_ENABLED(CONFIG_IPV6)) {
244 const struct vxlan_config *cfg = &node->vxlan->cfg;
246 if ((cfg->flags & VXLAN_F_IPV6_LINKLOCAL) &&
247 cfg->remote_ifindex != ifindex)
257 /* Look up VNI in a per net namespace table */
258 static struct vxlan_dev *vxlan_find_vni(struct net *net, int ifindex,
259 __be32 vni, sa_family_t family,
260 __be16 port, u32 flags)
262 struct vxlan_sock *vs;
264 vs = vxlan_find_sock(net, family, port, flags);
268 return vxlan_vs_find_vni(vs, ifindex, vni);
271 /* Fill in neighbour message in skbuff. */
272 static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
273 const struct vxlan_fdb *fdb,
274 u32 portid, u32 seq, int type, unsigned int flags,
275 const struct vxlan_rdst *rdst)
277 unsigned long now = jiffies;
278 struct nda_cacheinfo ci;
279 struct nlmsghdr *nlh;
281 bool send_ip, send_eth;
283 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
287 ndm = nlmsg_data(nlh);
288 memset(ndm, 0, sizeof(*ndm));
290 send_eth = send_ip = true;
292 if (type == RTM_GETNEIGH) {
293 send_ip = !vxlan_addr_any(&rdst->remote_ip);
294 send_eth = !is_zero_ether_addr(fdb->eth_addr);
295 ndm->ndm_family = send_ip ? rdst->remote_ip.sa.sa_family : AF_INET;
297 ndm->ndm_family = AF_BRIDGE;
298 ndm->ndm_state = fdb->state;
299 ndm->ndm_ifindex = vxlan->dev->ifindex;
300 ndm->ndm_flags = fdb->flags;
301 ndm->ndm_type = RTN_UNICAST;
303 if (!net_eq(dev_net(vxlan->dev), vxlan->net) &&
304 nla_put_s32(skb, NDA_LINK_NETNSID,
305 peernet2id(dev_net(vxlan->dev), vxlan->net)))
306 goto nla_put_failure;
308 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
309 goto nla_put_failure;
311 if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
312 goto nla_put_failure;
314 if (rdst->remote_port && rdst->remote_port != vxlan->cfg.dst_port &&
315 nla_put_be16(skb, NDA_PORT, rdst->remote_port))
316 goto nla_put_failure;
317 if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
318 nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni)))
319 goto nla_put_failure;
320 if ((vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) && fdb->vni &&
321 nla_put_u32(skb, NDA_SRC_VNI,
322 be32_to_cpu(fdb->vni)))
323 goto nla_put_failure;
324 if (rdst->remote_ifindex &&
325 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
326 goto nla_put_failure;
328 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
329 ci.ndm_confirmed = 0;
330 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
333 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
334 goto nla_put_failure;
340 nlmsg_cancel(skb, nlh);
344 static inline size_t vxlan_nlmsg_size(void)
346 return NLMSG_ALIGN(sizeof(struct ndmsg))
347 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
348 + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */
349 + nla_total_size(sizeof(__be16)) /* NDA_PORT */
350 + nla_total_size(sizeof(__be32)) /* NDA_VNI */
351 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
352 + nla_total_size(sizeof(__s32)) /* NDA_LINK_NETNSID */
353 + nla_total_size(sizeof(struct nda_cacheinfo));
356 static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
357 struct vxlan_rdst *rd, int type)
359 struct net *net = dev_net(vxlan->dev);
363 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
367 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd);
369 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
370 WARN_ON(err == -EMSGSIZE);
375 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
379 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
382 static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
384 struct vxlan_dev *vxlan = netdev_priv(dev);
385 struct vxlan_fdb f = {
388 struct vxlan_rdst remote = {
389 .remote_ip = *ipa, /* goes to NDA_DST */
390 .remote_vni = cpu_to_be32(VXLAN_N_VID),
393 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
396 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
398 struct vxlan_fdb f = {
401 struct vxlan_rdst remote = { };
403 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
405 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
408 /* Hash Ethernet address */
409 static u32 eth_hash(const unsigned char *addr)
411 u64 value = get_unaligned((u64 *)addr);
413 /* only want 6 bytes */
419 return hash_64(value, FDB_HASH_BITS);
422 static u32 eth_vni_hash(const unsigned char *addr, __be32 vni)
424 /* use 1 byte of OUI and 3 bytes of NIC */
425 u32 key = get_unaligned((u32 *)(addr + 2));
427 return jhash_2words(key, vni, vxlan_salt) & (FDB_HASH_SIZE - 1);
430 /* Hash chain to use given mac address */
431 static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
432 const u8 *mac, __be32 vni)
434 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)
435 return &vxlan->fdb_head[eth_vni_hash(mac, vni)];
437 return &vxlan->fdb_head[eth_hash(mac)];
440 /* Look up Ethernet address in forwarding table */
441 static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
442 const u8 *mac, __be32 vni)
444 struct hlist_head *head = vxlan_fdb_head(vxlan, mac, vni);
447 hlist_for_each_entry_rcu(f, head, hlist) {
448 if (ether_addr_equal(mac, f->eth_addr)) {
449 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) {
461 static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
462 const u8 *mac, __be32 vni)
466 f = __vxlan_find_mac(vxlan, mac, vni);
473 /* caller should hold vxlan->hash_lock */
474 static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
475 union vxlan_addr *ip, __be16 port,
476 __be32 vni, __u32 ifindex)
478 struct vxlan_rdst *rd;
480 list_for_each_entry(rd, &f->remotes, list) {
481 if (vxlan_addr_equal(&rd->remote_ip, ip) &&
482 rd->remote_port == port &&
483 rd->remote_vni == vni &&
484 rd->remote_ifindex == ifindex)
491 /* Replace destination of unicast mac */
492 static int vxlan_fdb_replace(struct vxlan_fdb *f,
493 union vxlan_addr *ip, __be16 port, __be32 vni,
496 struct vxlan_rdst *rd;
498 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
502 rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
506 dst_cache_reset(&rd->dst_cache);
508 rd->remote_port = port;
509 rd->remote_vni = vni;
510 rd->remote_ifindex = ifindex;
514 /* Add/update destinations for multicast */
515 static int vxlan_fdb_append(struct vxlan_fdb *f,
516 union vxlan_addr *ip, __be16 port, __be32 vni,
517 __u32 ifindex, struct vxlan_rdst **rdp)
519 struct vxlan_rdst *rd;
521 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
525 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
529 if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
535 rd->remote_port = port;
536 rd->remote_vni = vni;
537 rd->remote_ifindex = ifindex;
539 list_add_tail_rcu(&rd->list, &f->remotes);
545 static bool vxlan_parse_gpe_proto(struct vxlanhdr *hdr, __be16 *protocol)
547 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)hdr;
549 /* Need to have Next Protocol set for interfaces in GPE mode. */
550 if (!gpe->np_applied)
552 /* "The initial version is 0. If a receiver does not support the
553 * version indicated it MUST drop the packet.
555 if (gpe->version != 0)
557 /* "When the O bit is set to 1, the packet is an OAM packet and OAM
558 * processing MUST occur." However, we don't implement OAM
559 * processing, thus drop the packet.
564 *protocol = tun_p_to_eth_p(gpe->next_protocol);
571 static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
573 struct vxlanhdr *vh, size_t hdrlen,
575 struct gro_remcsum *grc,
578 size_t start, offset;
580 if (skb->remcsum_offload)
583 if (!NAPI_GRO_CB(skb)->csum_valid)
586 start = vxlan_rco_start(vni_field);
587 offset = start + vxlan_rco_offset(vni_field);
589 vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen,
590 start, offset, grc, nopartial);
592 skb->remcsum_offload = 1;
597 static struct sk_buff *vxlan_gro_receive(struct sock *sk,
598 struct list_head *head,
601 struct sk_buff *pp = NULL;
603 struct vxlanhdr *vh, *vh2;
604 unsigned int hlen, off_vx;
606 struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk);
608 struct gro_remcsum grc;
610 skb_gro_remcsum_init(&grc);
612 off_vx = skb_gro_offset(skb);
613 hlen = off_vx + sizeof(*vh);
614 vh = skb_gro_header_fast(skb, off_vx);
615 if (skb_gro_header_hard(skb, hlen)) {
616 vh = skb_gro_header_slow(skb, hlen, off_vx);
621 skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
623 flags = vh->vx_flags;
625 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
626 vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
629 VXLAN_F_REMCSUM_NOPARTIAL));
635 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
637 list_for_each_entry(p, head, list) {
638 if (!NAPI_GRO_CB(p)->same_flow)
641 vh2 = (struct vxlanhdr *)(p->data + off_vx);
642 if (vh->vx_flags != vh2->vx_flags ||
643 vh->vx_vni != vh2->vx_vni) {
644 NAPI_GRO_CB(p)->same_flow = 0;
649 pp = call_gro_receive(eth_gro_receive, head, skb);
653 skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
658 static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
660 /* Sets 'skb->inner_mac_header' since we are always called with
661 * 'skb->encapsulation' set.
663 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
666 static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
667 const u8 *mac, __u16 state,
668 __be32 src_vni, __u8 ndm_flags)
672 f = kmalloc(sizeof(*f), GFP_ATOMIC);
676 f->flags = ndm_flags;
677 f->updated = f->used = jiffies;
679 INIT_LIST_HEAD(&f->remotes);
680 memcpy(f->eth_addr, mac, ETH_ALEN);
685 static int vxlan_fdb_create(struct vxlan_dev *vxlan,
686 const u8 *mac, union vxlan_addr *ip,
687 __u16 state, __be16 port, __be32 src_vni,
688 __be32 vni, __u32 ifindex, __u8 ndm_flags,
689 struct vxlan_fdb **fdb)
691 struct vxlan_rdst *rd = NULL;
695 if (vxlan->cfg.addrmax &&
696 vxlan->addrcnt >= vxlan->cfg.addrmax)
699 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
700 f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags);
704 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
711 hlist_add_head_rcu(&f->hlist,
712 vxlan_fdb_head(vxlan, mac, src_vni));
719 /* Add new entry to forwarding table -- assumes lock held */
720 static int vxlan_fdb_update(struct vxlan_dev *vxlan,
721 const u8 *mac, union vxlan_addr *ip,
722 __u16 state, __u16 flags,
723 __be16 port, __be32 src_vni, __be32 vni,
724 __u32 ifindex, __u8 ndm_flags)
726 struct vxlan_rdst *rd = NULL;
731 f = __vxlan_find_mac(vxlan, mac, src_vni);
733 if (flags & NLM_F_EXCL) {
734 netdev_dbg(vxlan->dev,
735 "lost race to create %pM\n", mac);
738 if (f->state != state) {
740 f->updated = jiffies;
743 if (f->flags != ndm_flags) {
744 f->flags = ndm_flags;
745 f->updated = jiffies;
748 if ((flags & NLM_F_REPLACE)) {
749 /* Only change unicasts */
750 if (!(is_multicast_ether_addr(f->eth_addr) ||
751 is_zero_ether_addr(f->eth_addr))) {
752 notify |= vxlan_fdb_replace(f, ip, port, vni,
757 if ((flags & NLM_F_APPEND) &&
758 (is_multicast_ether_addr(f->eth_addr) ||
759 is_zero_ether_addr(f->eth_addr))) {
760 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
767 if (!(flags & NLM_F_CREATE))
770 /* Disallow replace to add a multicast entry */
771 if ((flags & NLM_F_REPLACE) &&
772 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
775 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
776 rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni,
777 vni, ifindex, ndm_flags, &f);
785 rd = first_remote_rtnl(f);
786 vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH);
792 static void vxlan_fdb_free(struct rcu_head *head)
794 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
795 struct vxlan_rdst *rd, *nd;
797 list_for_each_entry_safe(rd, nd, &f->remotes, list) {
798 dst_cache_destroy(&rd->dst_cache);
804 static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
807 netdev_dbg(vxlan->dev,
808 "delete %pM\n", f->eth_addr);
812 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
814 hlist_del_rcu(&f->hlist);
815 call_rcu(&f->rcu, vxlan_fdb_free);
818 static void vxlan_dst_free(struct rcu_head *head)
820 struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
822 dst_cache_destroy(&rd->dst_cache);
826 static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
827 struct vxlan_rdst *rd)
829 list_del_rcu(&rd->list);
830 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
831 call_rcu(&rd->rcu, vxlan_dst_free);
834 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
835 union vxlan_addr *ip, __be16 *port, __be32 *src_vni,
836 __be32 *vni, u32 *ifindex)
838 struct net *net = dev_net(vxlan->dev);
842 err = vxlan_nla_get_addr(ip, tb[NDA_DST]);
846 union vxlan_addr *remote = &vxlan->default_dst.remote_ip;
847 if (remote->sa.sa_family == AF_INET) {
848 ip->sin.sin_addr.s_addr = htonl(INADDR_ANY);
849 ip->sa.sa_family = AF_INET;
850 #if IS_ENABLED(CONFIG_IPV6)
852 ip->sin6.sin6_addr = in6addr_any;
853 ip->sa.sa_family = AF_INET6;
859 if (nla_len(tb[NDA_PORT]) != sizeof(__be16))
861 *port = nla_get_be16(tb[NDA_PORT]);
863 *port = vxlan->cfg.dst_port;
867 if (nla_len(tb[NDA_VNI]) != sizeof(u32))
869 *vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI]));
871 *vni = vxlan->default_dst.remote_vni;
874 if (tb[NDA_SRC_VNI]) {
875 if (nla_len(tb[NDA_SRC_VNI]) != sizeof(u32))
877 *src_vni = cpu_to_be32(nla_get_u32(tb[NDA_SRC_VNI]));
879 *src_vni = vxlan->default_dst.remote_vni;
882 if (tb[NDA_IFINDEX]) {
883 struct net_device *tdev;
885 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
887 *ifindex = nla_get_u32(tb[NDA_IFINDEX]);
888 tdev = __dev_get_by_index(net, *ifindex);
890 return -EADDRNOTAVAIL;
898 /* Add static entry (via netlink) */
899 static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
900 struct net_device *dev,
901 const unsigned char *addr, u16 vid, u16 flags)
903 struct vxlan_dev *vxlan = netdev_priv(dev);
904 /* struct net *net = dev_net(vxlan->dev); */
911 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
912 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
917 if (tb[NDA_DST] == NULL)
920 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex);
924 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
925 return -EAFNOSUPPORT;
927 spin_lock_bh(&vxlan->hash_lock);
928 err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags,
929 port, src_vni, vni, ifindex, ndm->ndm_flags);
930 spin_unlock_bh(&vxlan->hash_lock);
935 static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
936 const unsigned char *addr, union vxlan_addr ip,
937 __be16 port, __be32 src_vni, __be32 vni,
938 u32 ifindex, u16 vid)
941 struct vxlan_rdst *rd = NULL;
944 f = vxlan_find_mac(vxlan, addr, src_vni);
948 if (!vxlan_addr_any(&ip)) {
949 rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex);
954 /* remove a destination if it's not the only one on the list,
955 * otherwise destroy the fdb entry
957 if (rd && !list_is_singular(&f->remotes)) {
958 vxlan_fdb_dst_destroy(vxlan, f, rd);
962 vxlan_fdb_destroy(vxlan, f, true);
968 /* Delete entry (via netlink) */
969 static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
970 struct net_device *dev,
971 const unsigned char *addr, u16 vid)
973 struct vxlan_dev *vxlan = netdev_priv(dev);
980 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex);
984 spin_lock_bh(&vxlan->hash_lock);
985 err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex,
987 spin_unlock_bh(&vxlan->hash_lock);
992 /* Dump forwarding table */
993 static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
994 struct net_device *dev,
995 struct net_device *filter_dev, int *idx)
997 struct vxlan_dev *vxlan = netdev_priv(dev);
1001 for (h = 0; h < FDB_HASH_SIZE; ++h) {
1002 struct vxlan_fdb *f;
1005 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
1006 struct vxlan_rdst *rd;
1008 list_for_each_entry_rcu(rd, &f->remotes, list) {
1009 if (*idx < cb->args[2])
1012 err = vxlan_fdb_info(skb, vxlan, f,
1013 NETLINK_CB(cb->skb).portid,
1031 /* Watch incoming packets to learn mapping between Ethernet address
1032 * and Tunnel endpoint.
1033 * Return true if packet is bogus and should be dropped.
1035 static bool vxlan_snoop(struct net_device *dev,
1036 union vxlan_addr *src_ip, const u8 *src_mac,
1037 u32 src_ifindex, __be32 vni)
1039 struct vxlan_dev *vxlan = netdev_priv(dev);
1040 struct vxlan_fdb *f;
1043 #if IS_ENABLED(CONFIG_IPV6)
1044 if (src_ip->sa.sa_family == AF_INET6 &&
1045 (ipv6_addr_type(&src_ip->sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL))
1046 ifindex = src_ifindex;
1049 f = vxlan_find_mac(vxlan, src_mac, vni);
1051 struct vxlan_rdst *rdst = first_remote_rcu(f);
1053 if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip) &&
1054 rdst->remote_ifindex == ifindex))
1057 /* Don't migrate static entries, drop packets */
1058 if (f->state & (NUD_PERMANENT | NUD_NOARP))
1061 if (net_ratelimit())
1063 "%pM migrated from %pIS to %pIS\n",
1064 src_mac, &rdst->remote_ip.sa, &src_ip->sa);
1066 rdst->remote_ip = *src_ip;
1067 f->updated = jiffies;
1068 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH);
1070 /* learned new entry */
1071 spin_lock(&vxlan->hash_lock);
1073 /* close off race between vxlan_flush and incoming packets */
1074 if (netif_running(dev))
1075 vxlan_fdb_update(vxlan, src_mac, src_ip,
1077 NLM_F_EXCL|NLM_F_CREATE,
1078 vxlan->cfg.dst_port,
1080 vxlan->default_dst.remote_vni,
1082 spin_unlock(&vxlan->hash_lock);
1088 /* See if multicast group is already in use by other ID */
1089 static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
1091 struct vxlan_dev *vxlan;
1092 struct vxlan_sock *sock4;
1093 #if IS_ENABLED(CONFIG_IPV6)
1094 struct vxlan_sock *sock6;
1096 unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
1098 sock4 = rtnl_dereference(dev->vn4_sock);
1100 /* The vxlan_sock is only used by dev, leaving group has
1101 * no effect on other vxlan devices.
1103 if (family == AF_INET && sock4 && refcount_read(&sock4->refcnt) == 1)
1105 #if IS_ENABLED(CONFIG_IPV6)
1106 sock6 = rtnl_dereference(dev->vn6_sock);
1107 if (family == AF_INET6 && sock6 && refcount_read(&sock6->refcnt) == 1)
1111 list_for_each_entry(vxlan, &vn->vxlan_list, next) {
1112 if (!netif_running(vxlan->dev) || vxlan == dev)
1115 if (family == AF_INET &&
1116 rtnl_dereference(vxlan->vn4_sock) != sock4)
1118 #if IS_ENABLED(CONFIG_IPV6)
1119 if (family == AF_INET6 &&
1120 rtnl_dereference(vxlan->vn6_sock) != sock6)
1124 if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
1125 &dev->default_dst.remote_ip))
1128 if (vxlan->default_dst.remote_ifindex !=
1129 dev->default_dst.remote_ifindex)
1138 static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
1140 struct vxlan_net *vn;
1144 if (!refcount_dec_and_test(&vs->refcnt))
1147 vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
1148 spin_lock(&vn->sock_lock);
1149 hlist_del_rcu(&vs->hlist);
1150 udp_tunnel_notify_del_rx_port(vs->sock,
1151 (vs->flags & VXLAN_F_GPE) ?
1152 UDP_TUNNEL_TYPE_VXLAN_GPE :
1153 UDP_TUNNEL_TYPE_VXLAN);
1154 spin_unlock(&vn->sock_lock);
1159 static void vxlan_sock_release(struct vxlan_dev *vxlan)
1161 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
1162 #if IS_ENABLED(CONFIG_IPV6)
1163 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
1165 RCU_INIT_POINTER(vxlan->vn6_sock, NULL);
1168 RCU_INIT_POINTER(vxlan->vn4_sock, NULL);
1171 vxlan_vs_del_dev(vxlan);
1173 if (__vxlan_sock_release_prep(sock4)) {
1174 udp_tunnel_sock_release(sock4->sock);
1178 #if IS_ENABLED(CONFIG_IPV6)
1179 if (__vxlan_sock_release_prep(sock6)) {
1180 udp_tunnel_sock_release(sock6->sock);
1186 /* Update multicast group membership when first VNI on
1187 * multicast address is brought up
1189 static int vxlan_igmp_join(struct vxlan_dev *vxlan)
1192 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1193 int ifindex = vxlan->default_dst.remote_ifindex;
1196 if (ip->sa.sa_family == AF_INET) {
1197 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
1198 struct ip_mreqn mreq = {
1199 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1200 .imr_ifindex = ifindex,
1203 sk = sock4->sock->sk;
1205 ret = ip_mc_join_group(sk, &mreq);
1207 #if IS_ENABLED(CONFIG_IPV6)
1209 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
1211 sk = sock6->sock->sk;
1213 ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
1214 &ip->sin6.sin6_addr);
1222 /* Inverse of vxlan_igmp_join when last VNI is brought down */
1223 static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
1226 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1227 int ifindex = vxlan->default_dst.remote_ifindex;
1230 if (ip->sa.sa_family == AF_INET) {
1231 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
1232 struct ip_mreqn mreq = {
1233 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1234 .imr_ifindex = ifindex,
1237 sk = sock4->sock->sk;
1239 ret = ip_mc_leave_group(sk, &mreq);
1241 #if IS_ENABLED(CONFIG_IPV6)
1243 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
1245 sk = sock6->sock->sk;
1247 ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
1248 &ip->sin6.sin6_addr);
1256 static bool vxlan_remcsum(struct vxlanhdr *unparsed,
1257 struct sk_buff *skb, u32 vxflags)
1259 size_t start, offset;
1261 if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload)
1264 start = vxlan_rco_start(unparsed->vx_vni);
1265 offset = start + vxlan_rco_offset(unparsed->vx_vni);
1267 if (!pskb_may_pull(skb, offset + sizeof(u16)))
1270 skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset,
1271 !!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL));
1273 unparsed->vx_flags &= ~VXLAN_HF_RCO;
1274 unparsed->vx_vni &= VXLAN_VNI_MASK;
1278 static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
1279 struct sk_buff *skb, u32 vxflags,
1280 struct vxlan_metadata *md)
1282 struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed;
1283 struct metadata_dst *tun_dst;
1285 if (!(unparsed->vx_flags & VXLAN_HF_GBP))
1288 md->gbp = ntohs(gbp->policy_id);
1290 tun_dst = (struct metadata_dst *)skb_dst(skb);
1292 tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
1293 tun_dst->u.tun_info.options_len = sizeof(*md);
1295 if (gbp->dont_learn)
1296 md->gbp |= VXLAN_GBP_DONT_LEARN;
1298 if (gbp->policy_applied)
1299 md->gbp |= VXLAN_GBP_POLICY_APPLIED;
1301 /* In flow-based mode, GBP is carried in dst_metadata */
1302 if (!(vxflags & VXLAN_F_COLLECT_METADATA))
1303 skb->mark = md->gbp;
1305 unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
1308 static bool vxlan_set_mac(struct vxlan_dev *vxlan,
1309 struct vxlan_sock *vs,
1310 struct sk_buff *skb, __be32 vni)
1312 union vxlan_addr saddr;
1313 u32 ifindex = skb->dev->ifindex;
1315 skb_reset_mac_header(skb);
1316 skb->protocol = eth_type_trans(skb, vxlan->dev);
1317 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1319 /* Ignore packet loops (and multicast echo) */
1320 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
1323 /* Ignore packets from invalid src-address */
1324 if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
1327 /* Get address from the outer IP header */
1328 if (vxlan_get_sk_family(vs) == AF_INET) {
1329 saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
1330 saddr.sa.sa_family = AF_INET;
1331 #if IS_ENABLED(CONFIG_IPV6)
1333 saddr.sin6.sin6_addr = ipv6_hdr(skb)->saddr;
1334 saddr.sa.sa_family = AF_INET6;
1338 if ((vxlan->cfg.flags & VXLAN_F_LEARN) &&
1339 vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source, ifindex, vni))
1345 static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph,
1346 struct sk_buff *skb)
1350 if (vxlan_get_sk_family(vs) == AF_INET)
1351 err = IP_ECN_decapsulate(oiph, skb);
1352 #if IS_ENABLED(CONFIG_IPV6)
1354 err = IP6_ECN_decapsulate(oiph, skb);
1357 if (unlikely(err) && log_ecn_error) {
1358 if (vxlan_get_sk_family(vs) == AF_INET)
1359 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
1360 &((struct iphdr *)oiph)->saddr,
1361 ((struct iphdr *)oiph)->tos);
1363 net_info_ratelimited("non-ECT from %pI6\n",
1364 &((struct ipv6hdr *)oiph)->saddr);
1369 /* Callback from net/ipv4/udp.c to receive packets */
1370 static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
1372 struct pcpu_sw_netstats *stats;
1373 struct vxlan_dev *vxlan;
1374 struct vxlan_sock *vs;
1375 struct vxlanhdr unparsed;
1376 struct vxlan_metadata _md;
1377 struct vxlan_metadata *md = &_md;
1378 __be16 protocol = htons(ETH_P_TEB);
1379 bool raw_proto = false;
1383 /* Need UDP and VXLAN header to be present */
1384 if (!pskb_may_pull(skb, VXLAN_HLEN))
1387 unparsed = *vxlan_hdr(skb);
1388 /* VNI flag always required to be set */
1389 if (!(unparsed.vx_flags & VXLAN_HF_VNI)) {
1390 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
1391 ntohl(vxlan_hdr(skb)->vx_flags),
1392 ntohl(vxlan_hdr(skb)->vx_vni));
1393 /* Return non vxlan pkt */
1396 unparsed.vx_flags &= ~VXLAN_HF_VNI;
1397 unparsed.vx_vni &= ~VXLAN_VNI_MASK;
1399 vs = rcu_dereference_sk_user_data(sk);
1403 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
1405 vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni);
1409 /* For backwards compatibility, only allow reserved fields to be
1410 * used by VXLAN extensions if explicitly requested.
1412 if (vs->flags & VXLAN_F_GPE) {
1413 if (!vxlan_parse_gpe_proto(&unparsed, &protocol))
1415 unparsed.vx_flags &= ~VXLAN_GPE_USED_BITS;
1419 if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto,
1420 !net_eq(vxlan->net, dev_net(vxlan->dev))))
1423 if (vxlan_collect_metadata(vs)) {
1424 struct metadata_dst *tun_dst;
1426 tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY,
1427 key32_to_tunnel_id(vni), sizeof(*md));
1432 md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
1434 skb_dst_set(skb, (struct dst_entry *)tun_dst);
1436 memset(md, 0, sizeof(*md));
1439 if (vs->flags & VXLAN_F_REMCSUM_RX)
1440 if (!vxlan_remcsum(&unparsed, skb, vs->flags))
1442 if (vs->flags & VXLAN_F_GBP)
1443 vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md);
1444 /* Note that GBP and GPE can never be active together. This is
1445 * ensured in vxlan_dev_configure.
1448 if (unparsed.vx_flags || unparsed.vx_vni) {
1449 /* If there are any unprocessed flags remaining treat
1450 * this as a malformed packet. This behavior diverges from
1451 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
1452 * in reserved fields are to be ignored. The approach here
1453 * maintains compatibility with previous stack code, and also
1454 * is more robust and provides a little more security in
1455 * adding extensions to VXLAN.
1461 if (!vxlan_set_mac(vxlan, vs, skb, vni))
1464 skb_reset_mac_header(skb);
1465 skb->dev = vxlan->dev;
1466 skb->pkt_type = PACKET_HOST;
1469 oiph = skb_network_header(skb);
1470 skb_reset_network_header(skb);
1472 if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
1473 ++vxlan->dev->stats.rx_frame_errors;
1474 ++vxlan->dev->stats.rx_errors;
1480 if (unlikely(!(vxlan->dev->flags & IFF_UP))) {
1482 atomic_long_inc(&vxlan->dev->rx_dropped);
1486 stats = this_cpu_ptr(vxlan->dev->tstats);
1487 u64_stats_update_begin(&stats->syncp);
1488 stats->rx_packets++;
1489 stats->rx_bytes += skb->len;
1490 u64_stats_update_end(&stats->syncp);
1492 gro_cells_receive(&vxlan->gro_cells, skb);
1499 /* Consume bad packet */
1504 static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
1506 struct vxlan_dev *vxlan = netdev_priv(dev);
1507 struct arphdr *parp;
1510 struct neighbour *n;
1512 if (dev->flags & IFF_NOARP)
1515 if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
1516 dev->stats.tx_dropped++;
1519 parp = arp_hdr(skb);
1521 if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
1522 parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
1523 parp->ar_pro != htons(ETH_P_IP) ||
1524 parp->ar_op != htons(ARPOP_REQUEST) ||
1525 parp->ar_hln != dev->addr_len ||
1528 arpptr = (u8 *)parp + sizeof(struct arphdr);
1530 arpptr += dev->addr_len; /* sha */
1531 memcpy(&sip, arpptr, sizeof(sip));
1532 arpptr += sizeof(sip);
1533 arpptr += dev->addr_len; /* tha */
1534 memcpy(&tip, arpptr, sizeof(tip));
1536 if (ipv4_is_loopback(tip) ||
1537 ipv4_is_multicast(tip))
1540 n = neigh_lookup(&arp_tbl, &tip, dev);
1543 struct vxlan_fdb *f;
1544 struct sk_buff *reply;
1546 if (!(n->nud_state & NUD_CONNECTED)) {
1551 f = vxlan_find_mac(vxlan, n->ha, vni);
1552 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1553 /* bridge-local neighbor */
1558 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1566 skb_reset_mac_header(reply);
1567 __skb_pull(reply, skb_network_offset(reply));
1568 reply->ip_summed = CHECKSUM_UNNECESSARY;
1569 reply->pkt_type = PACKET_HOST;
1571 if (netif_rx_ni(reply) == NET_RX_DROP)
1572 dev->stats.rx_dropped++;
1573 } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) {
1574 union vxlan_addr ipa = {
1575 .sin.sin_addr.s_addr = tip,
1576 .sin.sin_family = AF_INET,
1579 vxlan_ip_miss(dev, &ipa);
1583 return NETDEV_TX_OK;
1586 #if IS_ENABLED(CONFIG_IPV6)
1587 static struct sk_buff *vxlan_na_create(struct sk_buff *request,
1588 struct neighbour *n, bool isrouter)
1590 struct net_device *dev = request->dev;
1591 struct sk_buff *reply;
1592 struct nd_msg *ns, *na;
1593 struct ipv6hdr *pip6;
1595 int na_olen = 8; /* opt hdr + ETH_ALEN for target */
1599 if (dev == NULL || !pskb_may_pull(request, request->len))
1602 len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
1603 sizeof(*na) + na_olen + dev->needed_tailroom;
1604 reply = alloc_skb(len, GFP_ATOMIC);
1608 reply->protocol = htons(ETH_P_IPV6);
1610 skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
1611 skb_push(reply, sizeof(struct ethhdr));
1612 skb_reset_mac_header(reply);
1614 ns = (struct nd_msg *)(ipv6_hdr(request) + 1);
1616 daddr = eth_hdr(request)->h_source;
1617 ns_olen = request->len - skb_network_offset(request) -
1618 sizeof(struct ipv6hdr) - sizeof(*ns);
1619 for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
1620 if (!ns->opt[i + 1]) {
1624 if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
1625 daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
1630 /* Ethernet header */
1631 ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
1632 ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
1633 eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
1634 reply->protocol = htons(ETH_P_IPV6);
1636 skb_pull(reply, sizeof(struct ethhdr));
1637 skb_reset_network_header(reply);
1638 skb_put(reply, sizeof(struct ipv6hdr));
1642 pip6 = ipv6_hdr(reply);
1643 memset(pip6, 0, sizeof(struct ipv6hdr));
1645 pip6->priority = ipv6_hdr(request)->priority;
1646 pip6->nexthdr = IPPROTO_ICMPV6;
1647 pip6->hop_limit = 255;
1648 pip6->daddr = ipv6_hdr(request)->saddr;
1649 pip6->saddr = *(struct in6_addr *)n->primary_key;
1651 skb_pull(reply, sizeof(struct ipv6hdr));
1652 skb_reset_transport_header(reply);
1654 /* Neighbor Advertisement */
1655 na = skb_put_zero(reply, sizeof(*na) + na_olen);
1656 na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
1657 na->icmph.icmp6_router = isrouter;
1658 na->icmph.icmp6_override = 1;
1659 na->icmph.icmp6_solicited = 1;
1660 na->target = ns->target;
1661 ether_addr_copy(&na->opt[2], n->ha);
1662 na->opt[0] = ND_OPT_TARGET_LL_ADDR;
1663 na->opt[1] = na_olen >> 3;
1665 na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
1666 &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
1667 csum_partial(na, sizeof(*na)+na_olen, 0));
1669 pip6->payload_len = htons(sizeof(*na)+na_olen);
1671 skb_push(reply, sizeof(struct ipv6hdr));
1673 reply->ip_summed = CHECKSUM_UNNECESSARY;
1678 static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
1680 struct vxlan_dev *vxlan = netdev_priv(dev);
1681 const struct in6_addr *daddr;
1682 const struct ipv6hdr *iphdr;
1683 struct inet6_dev *in6_dev;
1684 struct neighbour *n;
1688 in6_dev = __in6_dev_get(dev);
1692 iphdr = ipv6_hdr(skb);
1693 daddr = &iphdr->daddr;
1694 msg = (struct nd_msg *)(iphdr + 1);
1696 if (ipv6_addr_loopback(daddr) ||
1697 ipv6_addr_is_multicast(&msg->target))
1700 n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
1703 struct vxlan_fdb *f;
1704 struct sk_buff *reply;
1706 if (!(n->nud_state & NUD_CONNECTED)) {
1711 f = vxlan_find_mac(vxlan, n->ha, vni);
1712 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1713 /* bridge-local neighbor */
1718 reply = vxlan_na_create(skb, n,
1719 !!(f ? f->flags & NTF_ROUTER : 0));
1726 if (netif_rx_ni(reply) == NET_RX_DROP)
1727 dev->stats.rx_dropped++;
1729 } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) {
1730 union vxlan_addr ipa = {
1731 .sin6.sin6_addr = msg->target,
1732 .sin6.sin6_family = AF_INET6,
1735 vxlan_ip_miss(dev, &ipa);
1741 return NETDEV_TX_OK;
1745 static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
1747 struct vxlan_dev *vxlan = netdev_priv(dev);
1748 struct neighbour *n;
1750 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
1754 switch (ntohs(eth_hdr(skb)->h_proto)) {
1759 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
1762 n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
1763 if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) {
1764 union vxlan_addr ipa = {
1765 .sin.sin_addr.s_addr = pip->daddr,
1766 .sin.sin_family = AF_INET,
1769 vxlan_ip_miss(dev, &ipa);
1775 #if IS_ENABLED(CONFIG_IPV6)
1778 struct ipv6hdr *pip6;
1780 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
1782 pip6 = ipv6_hdr(skb);
1783 n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev);
1784 if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) {
1785 union vxlan_addr ipa = {
1786 .sin6.sin6_addr = pip6->daddr,
1787 .sin6.sin6_family = AF_INET6,
1790 vxlan_ip_miss(dev, &ipa);
1804 diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha);
1806 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
1808 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
1817 static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
1818 struct vxlan_metadata *md)
1820 struct vxlanhdr_gbp *gbp;
1825 gbp = (struct vxlanhdr_gbp *)vxh;
1826 vxh->vx_flags |= VXLAN_HF_GBP;
1828 if (md->gbp & VXLAN_GBP_DONT_LEARN)
1829 gbp->dont_learn = 1;
1831 if (md->gbp & VXLAN_GBP_POLICY_APPLIED)
1832 gbp->policy_applied = 1;
1834 gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
1837 static int vxlan_build_gpe_hdr(struct vxlanhdr *vxh, u32 vxflags,
1840 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)vxh;
1842 gpe->np_applied = 1;
1843 gpe->next_protocol = tun_p_from_eth_p(protocol);
1844 if (!gpe->next_protocol)
1845 return -EPFNOSUPPORT;
1849 static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
1850 int iphdr_len, __be32 vni,
1851 struct vxlan_metadata *md, u32 vxflags,
1854 struct vxlanhdr *vxh;
1857 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
1858 __be16 inner_protocol = htons(ETH_P_TEB);
1860 if ((vxflags & VXLAN_F_REMCSUM_TX) &&
1861 skb->ip_summed == CHECKSUM_PARTIAL) {
1862 int csum_start = skb_checksum_start_offset(skb);
1864 if (csum_start <= VXLAN_MAX_REMCSUM_START &&
1865 !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
1866 (skb->csum_offset == offsetof(struct udphdr, check) ||
1867 skb->csum_offset == offsetof(struct tcphdr, check)))
1868 type |= SKB_GSO_TUNNEL_REMCSUM;
1871 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
1872 + VXLAN_HLEN + iphdr_len;
1874 /* Need space for new headers (invalidates iph ptr) */
1875 err = skb_cow_head(skb, min_headroom);
1879 err = iptunnel_handle_offloads(skb, type);
1883 vxh = __skb_push(skb, sizeof(*vxh));
1884 vxh->vx_flags = VXLAN_HF_VNI;
1885 vxh->vx_vni = vxlan_vni_field(vni);
1887 if (type & SKB_GSO_TUNNEL_REMCSUM) {
1890 start = skb_checksum_start_offset(skb) - sizeof(struct vxlanhdr);
1891 vxh->vx_vni |= vxlan_compute_rco(start, skb->csum_offset);
1892 vxh->vx_flags |= VXLAN_HF_RCO;
1894 if (!skb_is_gso(skb)) {
1895 skb->ip_summed = CHECKSUM_NONE;
1896 skb->encapsulation = 0;
1900 if (vxflags & VXLAN_F_GBP)
1901 vxlan_build_gbp_hdr(vxh, vxflags, md);
1902 if (vxflags & VXLAN_F_GPE) {
1903 err = vxlan_build_gpe_hdr(vxh, vxflags, skb->protocol);
1906 inner_protocol = skb->protocol;
1909 skb_set_inner_protocol(skb, inner_protocol);
1913 static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev,
1914 struct vxlan_sock *sock4,
1915 struct sk_buff *skb, int oif, u8 tos,
1916 __be32 daddr, __be32 *saddr, __be16 dport, __be16 sport,
1917 struct dst_cache *dst_cache,
1918 const struct ip_tunnel_info *info)
1920 bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
1921 struct rtable *rt = NULL;
1925 return ERR_PTR(-EIO);
1930 rt = dst_cache_get_ip4(dst_cache, saddr);
1935 memset(&fl4, 0, sizeof(fl4));
1936 fl4.flowi4_oif = oif;
1937 fl4.flowi4_tos = RT_TOS(tos);
1938 fl4.flowi4_mark = skb->mark;
1939 fl4.flowi4_proto = IPPROTO_UDP;
1942 fl4.fl4_dport = dport;
1943 fl4.fl4_sport = sport;
1945 rt = ip_route_output_key(vxlan->net, &fl4);
1946 if (likely(!IS_ERR(rt))) {
1947 if (rt->dst.dev == dev) {
1948 netdev_dbg(dev, "circular route to %pI4\n", &daddr);
1950 return ERR_PTR(-ELOOP);
1955 dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
1957 netdev_dbg(dev, "no route to %pI4\n", &daddr);
1958 return ERR_PTR(-ENETUNREACH);
1963 #if IS_ENABLED(CONFIG_IPV6)
1964 static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
1965 struct net_device *dev,
1966 struct vxlan_sock *sock6,
1967 struct sk_buff *skb, int oif, u8 tos,
1969 const struct in6_addr *daddr,
1970 struct in6_addr *saddr,
1971 __be16 dport, __be16 sport,
1972 struct dst_cache *dst_cache,
1973 const struct ip_tunnel_info *info)
1975 bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
1976 struct dst_entry *ndst;
1980 return ERR_PTR(-EIO);
1985 ndst = dst_cache_get_ip6(dst_cache, saddr);
1990 memset(&fl6, 0, sizeof(fl6));
1991 fl6.flowi6_oif = oif;
1994 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
1995 fl6.flowi6_mark = skb->mark;
1996 fl6.flowi6_proto = IPPROTO_UDP;
1997 fl6.fl6_dport = dport;
1998 fl6.fl6_sport = sport;
2000 ndst = ipv6_stub->ipv6_dst_lookup_flow(vxlan->net, sock6->sock->sk,
2002 if (unlikely(IS_ERR(ndst))) {
2003 netdev_dbg(dev, "no route to %pI6\n", daddr);
2004 return ERR_PTR(-ENETUNREACH);
2007 if (unlikely(ndst->dev == dev)) {
2008 netdev_dbg(dev, "circular route to %pI6\n", daddr);
2010 return ERR_PTR(-ELOOP);
2015 dst_cache_set_ip6(dst_cache, ndst, saddr);
2020 /* Bypass encapsulation if the destination is local */
2021 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
2022 struct vxlan_dev *dst_vxlan, __be32 vni)
2024 struct pcpu_sw_netstats *tx_stats, *rx_stats;
2025 union vxlan_addr loopback;
2026 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
2027 struct net_device *dev;
2030 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
2031 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
2032 skb->pkt_type = PACKET_HOST;
2033 skb->encapsulation = 0;
2034 skb->dev = dst_vxlan->dev;
2035 __skb_pull(skb, skb_network_offset(skb));
2037 if (remote_ip->sa.sa_family == AF_INET) {
2038 loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
2039 loopback.sa.sa_family = AF_INET;
2040 #if IS_ENABLED(CONFIG_IPV6)
2042 loopback.sin6.sin6_addr = in6addr_loopback;
2043 loopback.sa.sa_family = AF_INET6;
2049 if (unlikely(!(dev->flags & IFF_UP))) {
2054 if (dst_vxlan->cfg.flags & VXLAN_F_LEARN)
2055 vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni);
2057 u64_stats_update_begin(&tx_stats->syncp);
2058 tx_stats->tx_packets++;
2059 tx_stats->tx_bytes += len;
2060 u64_stats_update_end(&tx_stats->syncp);
2062 if (netif_rx(skb) == NET_RX_SUCCESS) {
2063 u64_stats_update_begin(&rx_stats->syncp);
2064 rx_stats->rx_packets++;
2065 rx_stats->rx_bytes += len;
2066 u64_stats_update_end(&rx_stats->syncp);
2069 dev->stats.rx_dropped++;
2074 static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
2075 struct vxlan_dev *vxlan,
2076 union vxlan_addr *daddr,
2077 __be16 dst_port, int dst_ifindex, __be32 vni,
2078 struct dst_entry *dst,
2081 #if IS_ENABLED(CONFIG_IPV6)
2082 /* IPv6 rt-flags are checked against RTF_LOCAL, but the value of
2083 * RTF_LOCAL is equal to RTCF_LOCAL. So to keep code simple
2084 * we can use RTCF_LOCAL which works for ipv4 and ipv6 route entry.
2086 BUILD_BUG_ON(RTCF_LOCAL != RTF_LOCAL);
2088 /* Bypass encapsulation if the destination is local */
2089 if (rt_flags & RTCF_LOCAL &&
2090 !(rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
2091 struct vxlan_dev *dst_vxlan;
2094 dst_vxlan = vxlan_find_vni(vxlan->net, dst_ifindex, vni,
2095 daddr->sa.sa_family, dst_port,
2098 dev->stats.tx_errors++;
2103 vxlan_encap_bypass(skb, vxlan, dst_vxlan, vni);
2110 static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2111 __be32 default_vni, struct vxlan_rdst *rdst,
2114 struct dst_cache *dst_cache;
2115 struct ip_tunnel_info *info;
2116 struct vxlan_dev *vxlan = netdev_priv(dev);
2117 const struct iphdr *old_iph = ip_hdr(skb);
2118 union vxlan_addr *dst;
2119 union vxlan_addr remote_ip, local_ip;
2120 struct vxlan_metadata _md;
2121 struct vxlan_metadata *md = &_md;
2122 __be16 src_port = 0, dst_port;
2123 struct dst_entry *ndst = NULL;
2128 u32 flags = vxlan->cfg.flags;
2129 bool udp_sum = false;
2130 bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev));
2132 info = skb_tunnel_info(skb);
2135 dst = &rdst->remote_ip;
2136 if (vxlan_addr_any(dst)) {
2138 /* short-circuited back to local bridge */
2139 vxlan_encap_bypass(skb, vxlan, vxlan, default_vni);
2145 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
2146 vni = (rdst->remote_vni) ? : default_vni;
2147 ifindex = rdst->remote_ifindex;
2148 local_ip = vxlan->cfg.saddr;
2149 dst_cache = &rdst->dst_cache;
2150 md->gbp = skb->mark;
2151 if (flags & VXLAN_F_TTL_INHERIT) {
2152 ttl = ip_tunnel_get_ttl(old_iph, skb);
2154 ttl = vxlan->cfg.ttl;
2155 if (!ttl && vxlan_addr_multicast(dst))
2159 tos = vxlan->cfg.tos;
2161 tos = ip_tunnel_get_dsfield(old_iph, skb);
2163 if (dst->sa.sa_family == AF_INET)
2164 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX);
2166 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
2167 label = vxlan->cfg.label;
2170 WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
2174 remote_ip.sa.sa_family = ip_tunnel_info_af(info);
2175 if (remote_ip.sa.sa_family == AF_INET) {
2176 remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
2177 local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src;
2179 remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
2180 local_ip.sin6.sin6_addr = info->key.u.ipv6.src;
2183 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
2184 vni = tunnel_id_to_key32(info->key.tun_id);
2186 dst_cache = &info->dst_cache;
2187 if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
2188 if (info->options_len < sizeof(*md))
2190 md = ip_tunnel_info_opts(info);
2192 ttl = info->key.ttl;
2193 tos = info->key.tos;
2194 label = info->key.label;
2195 udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
2197 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
2198 vxlan->cfg.port_max, true);
2201 if (dst->sa.sa_family == AF_INET) {
2202 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
2206 rt = vxlan_get_route(vxlan, dev, sock4, skb, ifindex, tos,
2207 dst->sin.sin_addr.s_addr,
2208 &local_ip.sin.sin_addr.s_addr,
2216 /* Bypass encapsulation if the destination is local */
2218 err = encap_bypass_if_local(skb, dev, vxlan, dst,
2219 dst_port, ifindex, vni,
2220 &rt->dst, rt->rt_flags);
2223 } else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) {
2228 skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM);
2230 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
2231 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
2232 err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
2233 vni, md, flags, udp_sum);
2237 udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, local_ip.sin.sin_addr.s_addr,
2238 dst->sin.sin_addr.s_addr, tos, ttl, df,
2239 src_port, dst_port, xnet, !udp_sum);
2240 #if IS_ENABLED(CONFIG_IPV6)
2242 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
2244 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, ifindex, tos,
2245 label, &dst->sin6.sin6_addr,
2246 &local_ip.sin6.sin6_addr,
2250 err = PTR_ERR(ndst);
2256 u32 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
2258 err = encap_bypass_if_local(skb, dev, vxlan, dst,
2259 dst_port, ifindex, vni,
2265 skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM);
2267 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
2268 ttl = ttl ? : ip6_dst_hoplimit(ndst);
2269 skb_scrub_packet(skb, xnet);
2270 err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
2271 vni, md, flags, udp_sum);
2275 udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev,
2276 &local_ip.sin6.sin6_addr,
2277 &dst->sin6.sin6_addr, tos, ttl,
2278 label, src_port, dst_port, !udp_sum);
2286 dev->stats.tx_dropped++;
2293 dev->stats.collisions++;
2294 else if (err == -ENETUNREACH)
2295 dev->stats.tx_carrier_errors++;
2297 dev->stats.tx_errors++;
2301 /* Transmit local packets over Vxlan
2303 * Outer IP header inherits ECN and DF from inner header.
2304 * Outer UDP destination is the VXLAN assigned port.
2305 * source port is based on hash of flow
2307 static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
2309 struct vxlan_dev *vxlan = netdev_priv(dev);
2310 struct vxlan_rdst *rdst, *fdst = NULL;
2311 const struct ip_tunnel_info *info;
2312 bool did_rsc = false;
2313 struct vxlan_fdb *f;
2317 info = skb_tunnel_info(skb);
2319 skb_reset_mac_header(skb);
2321 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) {
2322 if (info && info->mode & IP_TUNNEL_INFO_BRIDGE &&
2323 info->mode & IP_TUNNEL_INFO_TX) {
2324 vni = tunnel_id_to_key32(info->key.tun_id);
2326 if (info && info->mode & IP_TUNNEL_INFO_TX)
2327 vxlan_xmit_one(skb, dev, vni, NULL, false);
2330 return NETDEV_TX_OK;
2334 if (vxlan->cfg.flags & VXLAN_F_PROXY) {
2336 if (ntohs(eth->h_proto) == ETH_P_ARP)
2337 return arp_reduce(dev, skb, vni);
2338 #if IS_ENABLED(CONFIG_IPV6)
2339 else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
2340 pskb_may_pull(skb, sizeof(struct ipv6hdr) +
2341 sizeof(struct nd_msg)) &&
2342 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
2343 struct nd_msg *m = (struct nd_msg *)(ipv6_hdr(skb) + 1);
2345 if (m->icmph.icmp6_code == 0 &&
2346 m->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
2347 return neigh_reduce(dev, skb, vni);
2353 f = vxlan_find_mac(vxlan, eth->h_dest, vni);
2356 if (f && (f->flags & NTF_ROUTER) && (vxlan->cfg.flags & VXLAN_F_RSC) &&
2357 (ntohs(eth->h_proto) == ETH_P_IP ||
2358 ntohs(eth->h_proto) == ETH_P_IPV6)) {
2359 did_rsc = route_shortcircuit(dev, skb);
2361 f = vxlan_find_mac(vxlan, eth->h_dest, vni);
2365 f = vxlan_find_mac(vxlan, all_zeros_mac, vni);
2367 if ((vxlan->cfg.flags & VXLAN_F_L2MISS) &&
2368 !is_multicast_ether_addr(eth->h_dest))
2369 vxlan_fdb_miss(vxlan, eth->h_dest);
2371 dev->stats.tx_dropped++;
2373 return NETDEV_TX_OK;
2377 list_for_each_entry_rcu(rdst, &f->remotes, list) {
2378 struct sk_buff *skb1;
2384 skb1 = skb_clone(skb, GFP_ATOMIC);
2386 vxlan_xmit_one(skb1, dev, vni, rdst, did_rsc);
2390 vxlan_xmit_one(skb, dev, vni, fdst, did_rsc);
2393 return NETDEV_TX_OK;
2396 /* Walk the forwarding table and purge stale entries */
2397 static void vxlan_cleanup(struct timer_list *t)
2399 struct vxlan_dev *vxlan = from_timer(vxlan, t, age_timer);
2400 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
2403 if (!netif_running(vxlan->dev))
2406 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2407 struct hlist_node *p, *n;
2409 spin_lock_bh(&vxlan->hash_lock);
2410 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2412 = container_of(p, struct vxlan_fdb, hlist);
2413 unsigned long timeout;
2415 if (f->state & (NUD_PERMANENT | NUD_NOARP))
2418 if (f->flags & NTF_EXT_LEARNED)
2421 timeout = f->used + vxlan->cfg.age_interval * HZ;
2422 if (time_before_eq(timeout, jiffies)) {
2423 netdev_dbg(vxlan->dev,
2424 "garbage collect %pM\n",
2426 f->state = NUD_STALE;
2427 vxlan_fdb_destroy(vxlan, f, true);
2428 } else if (time_before(timeout, next_timer))
2429 next_timer = timeout;
2431 spin_unlock_bh(&vxlan->hash_lock);
2434 mod_timer(&vxlan->age_timer, next_timer);
2437 static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
2439 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2441 spin_lock(&vn->sock_lock);
2442 hlist_del_init_rcu(&vxlan->hlist4.hlist);
2443 #if IS_ENABLED(CONFIG_IPV6)
2444 hlist_del_init_rcu(&vxlan->hlist6.hlist);
2446 spin_unlock(&vn->sock_lock);
2449 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan,
2450 struct vxlan_dev_node *node)
2452 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2453 __be32 vni = vxlan->default_dst.remote_vni;
2455 node->vxlan = vxlan;
2456 spin_lock(&vn->sock_lock);
2457 hlist_add_head_rcu(&node->hlist, vni_head(vs, vni));
2458 spin_unlock(&vn->sock_lock);
2461 /* Setup stats when device is created */
2462 static int vxlan_init(struct net_device *dev)
2464 struct vxlan_dev *vxlan = netdev_priv(dev);
2467 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2471 err = gro_cells_init(&vxlan->gro_cells, dev);
2473 free_percpu(dev->tstats);
2480 static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
2482 struct vxlan_fdb *f;
2484 spin_lock_bh(&vxlan->hash_lock);
2485 f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
2487 vxlan_fdb_destroy(vxlan, f, true);
2488 spin_unlock_bh(&vxlan->hash_lock);
2491 static void vxlan_uninit(struct net_device *dev)
2493 struct vxlan_dev *vxlan = netdev_priv(dev);
2495 gro_cells_destroy(&vxlan->gro_cells);
2497 vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
2499 free_percpu(dev->tstats);
2502 /* Start ageing timer and join group when device is brought up */
2503 static int vxlan_open(struct net_device *dev)
2505 struct vxlan_dev *vxlan = netdev_priv(dev);
2508 ret = vxlan_sock_add(vxlan);
2512 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
2513 ret = vxlan_igmp_join(vxlan);
2514 if (ret == -EADDRINUSE)
2517 vxlan_sock_release(vxlan);
2522 if (vxlan->cfg.age_interval)
2523 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
2528 /* Purge the forwarding table */
2529 static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
2533 spin_lock_bh(&vxlan->hash_lock);
2534 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2535 struct hlist_node *p, *n;
2536 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2538 = container_of(p, struct vxlan_fdb, hlist);
2539 if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP)))
2541 /* the all_zeros_mac entry is deleted at vxlan_uninit */
2542 if (!is_zero_ether_addr(f->eth_addr))
2543 vxlan_fdb_destroy(vxlan, f, true);
2546 spin_unlock_bh(&vxlan->hash_lock);
2549 /* Cleanup timer and forwarding table on shutdown */
2550 static int vxlan_stop(struct net_device *dev)
2552 struct vxlan_dev *vxlan = netdev_priv(dev);
2553 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2556 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
2557 !vxlan_group_used(vn, vxlan))
2558 ret = vxlan_igmp_leave(vxlan);
2560 del_timer_sync(&vxlan->age_timer);
2562 vxlan_flush(vxlan, false);
2563 vxlan_sock_release(vxlan);
2568 /* Stub, nothing needs to be done. */
2569 static void vxlan_set_multicast_list(struct net_device *dev)
2573 static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
2575 struct vxlan_dev *vxlan = netdev_priv(dev);
2576 struct vxlan_rdst *dst = &vxlan->default_dst;
2577 struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
2578 dst->remote_ifindex);
2579 bool use_ipv6 = !!(vxlan->cfg.flags & VXLAN_F_IPV6);
2581 /* This check is different than dev->max_mtu, because it looks at
2582 * the lowerdev->mtu, rather than the static dev->max_mtu
2585 int max_mtu = lowerdev->mtu -
2586 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2587 if (new_mtu > max_mtu)
2595 static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
2597 struct vxlan_dev *vxlan = netdev_priv(dev);
2598 struct ip_tunnel_info *info = skb_tunnel_info(skb);
2599 __be16 sport, dport;
2601 sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
2602 vxlan->cfg.port_max, true);
2603 dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
2605 if (ip_tunnel_info_af(info) == AF_INET) {
2606 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
2609 rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos,
2610 info->key.u.ipv4.dst,
2611 &info->key.u.ipv4.src, dport, sport,
2612 &info->dst_cache, info);
2617 #if IS_ENABLED(CONFIG_IPV6)
2618 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
2619 struct dst_entry *ndst;
2621 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos,
2622 info->key.label, &info->key.u.ipv6.dst,
2623 &info->key.u.ipv6.src, dport, sport,
2624 &info->dst_cache, info);
2626 return PTR_ERR(ndst);
2628 #else /* !CONFIG_IPV6 */
2629 return -EPFNOSUPPORT;
2632 info->key.tp_src = sport;
2633 info->key.tp_dst = dport;
2637 static const struct net_device_ops vxlan_netdev_ether_ops = {
2638 .ndo_init = vxlan_init,
2639 .ndo_uninit = vxlan_uninit,
2640 .ndo_open = vxlan_open,
2641 .ndo_stop = vxlan_stop,
2642 .ndo_start_xmit = vxlan_xmit,
2643 .ndo_get_stats64 = ip_tunnel_get_stats64,
2644 .ndo_set_rx_mode = vxlan_set_multicast_list,
2645 .ndo_change_mtu = vxlan_change_mtu,
2646 .ndo_validate_addr = eth_validate_addr,
2647 .ndo_set_mac_address = eth_mac_addr,
2648 .ndo_fdb_add = vxlan_fdb_add,
2649 .ndo_fdb_del = vxlan_fdb_delete,
2650 .ndo_fdb_dump = vxlan_fdb_dump,
2651 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
2654 static const struct net_device_ops vxlan_netdev_raw_ops = {
2655 .ndo_init = vxlan_init,
2656 .ndo_uninit = vxlan_uninit,
2657 .ndo_open = vxlan_open,
2658 .ndo_stop = vxlan_stop,
2659 .ndo_start_xmit = vxlan_xmit,
2660 .ndo_get_stats64 = ip_tunnel_get_stats64,
2661 .ndo_change_mtu = vxlan_change_mtu,
2662 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
2665 /* Info for udev, that this is a virtual tunnel endpoint */
2666 static struct device_type vxlan_type = {
2670 /* Calls the ndo_udp_tunnel_add of the caller in order to
2671 * supply the listening VXLAN udp ports. Callers are expected
2672 * to implement the ndo_udp_tunnel_add.
2674 static void vxlan_offload_rx_ports(struct net_device *dev, bool push)
2676 struct vxlan_sock *vs;
2677 struct net *net = dev_net(dev);
2678 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2681 spin_lock(&vn->sock_lock);
2682 for (i = 0; i < PORT_HASH_SIZE; ++i) {
2683 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
2684 unsigned short type;
2686 if (vs->flags & VXLAN_F_GPE)
2687 type = UDP_TUNNEL_TYPE_VXLAN_GPE;
2689 type = UDP_TUNNEL_TYPE_VXLAN;
2692 udp_tunnel_push_rx_port(dev, vs->sock, type);
2694 udp_tunnel_drop_rx_port(dev, vs->sock, type);
2697 spin_unlock(&vn->sock_lock);
2700 /* Initialize the device structure. */
2701 static void vxlan_setup(struct net_device *dev)
2703 struct vxlan_dev *vxlan = netdev_priv(dev);
2706 eth_hw_addr_random(dev);
2709 dev->needs_free_netdev = true;
2710 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
2712 dev->features |= NETIF_F_LLTX;
2713 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2714 dev->features |= NETIF_F_RXCSUM;
2715 dev->features |= NETIF_F_GSO_SOFTWARE;
2717 dev->vlan_features = dev->features;
2718 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2719 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
2720 netif_keep_dst(dev);
2721 dev->priv_flags |= IFF_NO_QUEUE;
2723 /* MTU range: 68 - 65535 */
2724 dev->min_mtu = ETH_MIN_MTU;
2725 dev->max_mtu = ETH_MAX_MTU;
2727 INIT_LIST_HEAD(&vxlan->next);
2728 spin_lock_init(&vxlan->hash_lock);
2730 timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE);
2734 for (h = 0; h < FDB_HASH_SIZE; ++h)
2735 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
2738 static void vxlan_ether_setup(struct net_device *dev)
2740 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2741 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2742 dev->netdev_ops = &vxlan_netdev_ether_ops;
2745 static void vxlan_raw_setup(struct net_device *dev)
2747 dev->header_ops = NULL;
2748 dev->type = ARPHRD_NONE;
2749 dev->hard_header_len = 0;
2751 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
2752 dev->netdev_ops = &vxlan_netdev_raw_ops;
2755 static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
2756 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
2757 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
2758 [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) },
2759 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
2760 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
2761 [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) },
2762 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
2763 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
2764 [IFLA_VXLAN_LABEL] = { .type = NLA_U32 },
2765 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
2766 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
2767 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
2768 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
2769 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
2770 [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
2771 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
2772 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
2773 [IFLA_VXLAN_COLLECT_METADATA] = { .type = NLA_U8 },
2774 [IFLA_VXLAN_PORT] = { .type = NLA_U16 },
2775 [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 },
2776 [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
2777 [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
2778 [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 },
2779 [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 },
2780 [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, },
2781 [IFLA_VXLAN_GPE] = { .type = NLA_FLAG, },
2782 [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG },
2783 [IFLA_VXLAN_TTL_INHERIT] = { .type = NLA_FLAG },
2786 static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
2787 struct netlink_ext_ack *extack)
2789 if (tb[IFLA_ADDRESS]) {
2790 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
2791 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS],
2792 "Provided link layer address is not Ethernet");
2796 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
2797 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS],
2798 "Provided Ethernet address is not unicast");
2799 return -EADDRNOTAVAIL;
2804 u32 mtu = nla_get_u32(tb[IFLA_MTU]);
2806 if (mtu < ETH_MIN_MTU || mtu > ETH_MAX_MTU) {
2807 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_MTU],
2808 "MTU must be between 68 and 65535");
2814 NL_SET_ERR_MSG(extack,
2815 "Required attributes not provided to perform the operation");
2819 if (data[IFLA_VXLAN_ID]) {
2820 u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
2822 if (id >= VXLAN_N_VID) {
2823 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_ID],
2824 "VXLAN ID must be lower than 16777216");
2829 if (data[IFLA_VXLAN_PORT_RANGE]) {
2830 const struct ifla_vxlan_port_range *p
2831 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
2833 if (ntohs(p->high) < ntohs(p->low)) {
2834 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_PORT_RANGE],
2835 "Invalid source port range");
2843 static void vxlan_get_drvinfo(struct net_device *netdev,
2844 struct ethtool_drvinfo *drvinfo)
2846 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
2847 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
2850 static const struct ethtool_ops vxlan_ethtool_ops = {
2851 .get_drvinfo = vxlan_get_drvinfo,
2852 .get_link = ethtool_op_get_link,
2855 static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
2856 __be16 port, u32 flags)
2858 struct socket *sock;
2859 struct udp_port_cfg udp_conf;
2862 memset(&udp_conf, 0, sizeof(udp_conf));
2865 udp_conf.family = AF_INET6;
2866 udp_conf.use_udp6_rx_checksums =
2867 !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
2868 udp_conf.ipv6_v6only = 1;
2870 udp_conf.family = AF_INET;
2873 udp_conf.local_udp_port = port;
2875 /* Open UDP socket */
2876 err = udp_sock_create(net, &udp_conf, &sock);
2878 return ERR_PTR(err);
2883 /* Create new listen socket if needed */
2884 static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
2885 __be16 port, u32 flags)
2887 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2888 struct vxlan_sock *vs;
2889 struct socket *sock;
2891 struct udp_tunnel_sock_cfg tunnel_cfg;
2893 vs = kzalloc(sizeof(*vs), GFP_KERNEL);
2895 return ERR_PTR(-ENOMEM);
2897 for (h = 0; h < VNI_HASH_SIZE; ++h)
2898 INIT_HLIST_HEAD(&vs->vni_list[h]);
2900 sock = vxlan_create_sock(net, ipv6, port, flags);
2903 return ERR_CAST(sock);
2907 refcount_set(&vs->refcnt, 1);
2908 vs->flags = (flags & VXLAN_F_RCV_FLAGS);
2910 spin_lock(&vn->sock_lock);
2911 hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
2912 udp_tunnel_notify_add_rx_port(sock,
2913 (vs->flags & VXLAN_F_GPE) ?
2914 UDP_TUNNEL_TYPE_VXLAN_GPE :
2915 UDP_TUNNEL_TYPE_VXLAN);
2916 spin_unlock(&vn->sock_lock);
2918 /* Mark socket as an encapsulation socket. */
2919 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
2920 tunnel_cfg.sk_user_data = vs;
2921 tunnel_cfg.encap_type = 1;
2922 tunnel_cfg.encap_rcv = vxlan_rcv;
2923 tunnel_cfg.encap_destroy = NULL;
2924 tunnel_cfg.gro_receive = vxlan_gro_receive;
2925 tunnel_cfg.gro_complete = vxlan_gro_complete;
2927 setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
2932 static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
2934 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2935 struct vxlan_sock *vs = NULL;
2936 struct vxlan_dev_node *node;
2938 if (!vxlan->cfg.no_share) {
2939 spin_lock(&vn->sock_lock);
2940 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
2941 vxlan->cfg.dst_port, vxlan->cfg.flags);
2942 if (vs && !refcount_inc_not_zero(&vs->refcnt)) {
2943 spin_unlock(&vn->sock_lock);
2946 spin_unlock(&vn->sock_lock);
2949 vs = vxlan_socket_create(vxlan->net, ipv6,
2950 vxlan->cfg.dst_port, vxlan->cfg.flags);
2953 #if IS_ENABLED(CONFIG_IPV6)
2955 rcu_assign_pointer(vxlan->vn6_sock, vs);
2956 node = &vxlan->hlist6;
2960 rcu_assign_pointer(vxlan->vn4_sock, vs);
2961 node = &vxlan->hlist4;
2963 vxlan_vs_add_dev(vs, vxlan, node);
2967 static int vxlan_sock_add(struct vxlan_dev *vxlan)
2969 bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA;
2970 bool ipv6 = vxlan->cfg.flags & VXLAN_F_IPV6 || metadata;
2971 bool ipv4 = !ipv6 || metadata;
2974 RCU_INIT_POINTER(vxlan->vn4_sock, NULL);
2975 #if IS_ENABLED(CONFIG_IPV6)
2976 RCU_INIT_POINTER(vxlan->vn6_sock, NULL);
2978 ret = __vxlan_sock_add(vxlan, true);
2979 if (ret < 0 && ret != -EAFNOSUPPORT)
2984 ret = __vxlan_sock_add(vxlan, false);
2986 vxlan_sock_release(vxlan);
2990 static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf,
2991 struct net_device **lower,
2992 struct vxlan_dev *old,
2993 struct netlink_ext_ack *extack)
2995 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
2996 struct vxlan_dev *tmp;
2997 bool use_ipv6 = false;
2999 if (conf->flags & VXLAN_F_GPE) {
3000 /* For now, allow GPE only together with
3001 * COLLECT_METADATA. This can be relaxed later; in such
3002 * case, the other side of the PtP link will have to be
3005 if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) ||
3006 !(conf->flags & VXLAN_F_COLLECT_METADATA)) {
3007 NL_SET_ERR_MSG(extack,
3008 "VXLAN GPE does not support this combination of attributes");
3013 if (!conf->remote_ip.sa.sa_family && !conf->saddr.sa.sa_family) {
3014 /* Unless IPv6 is explicitly requested, assume IPv4 */
3015 conf->remote_ip.sa.sa_family = AF_INET;
3016 conf->saddr.sa.sa_family = AF_INET;
3017 } else if (!conf->remote_ip.sa.sa_family) {
3018 conf->remote_ip.sa.sa_family = conf->saddr.sa.sa_family;
3019 } else if (!conf->saddr.sa.sa_family) {
3020 conf->saddr.sa.sa_family = conf->remote_ip.sa.sa_family;
3023 if (conf->saddr.sa.sa_family != conf->remote_ip.sa.sa_family) {
3024 NL_SET_ERR_MSG(extack,
3025 "Local and remote address must be from the same family");
3029 if (vxlan_addr_multicast(&conf->saddr)) {
3030 NL_SET_ERR_MSG(extack, "Local address cannot be multicast");
3034 if (conf->saddr.sa.sa_family == AF_INET6) {
3035 if (!IS_ENABLED(CONFIG_IPV6)) {
3036 NL_SET_ERR_MSG(extack,
3037 "IPv6 support not enabled in the kernel");
3038 return -EPFNOSUPPORT;
3041 conf->flags |= VXLAN_F_IPV6;
3043 if (!(conf->flags & VXLAN_F_COLLECT_METADATA)) {
3045 ipv6_addr_type(&conf->saddr.sin6.sin6_addr);
3047 ipv6_addr_type(&conf->remote_ip.sin6.sin6_addr);
3049 if (local_type & IPV6_ADDR_LINKLOCAL) {
3050 if (!(remote_type & IPV6_ADDR_LINKLOCAL) &&
3051 (remote_type != IPV6_ADDR_ANY)) {
3052 NL_SET_ERR_MSG(extack,
3053 "Invalid combination of local and remote address scopes");
3057 conf->flags |= VXLAN_F_IPV6_LINKLOCAL;
3060 (IPV6_ADDR_UNICAST | IPV6_ADDR_LINKLOCAL)) {
3061 NL_SET_ERR_MSG(extack,
3062 "Invalid combination of local and remote address scopes");
3066 conf->flags &= ~VXLAN_F_IPV6_LINKLOCAL;
3071 if (conf->label && !use_ipv6) {
3072 NL_SET_ERR_MSG(extack,
3073 "Label attribute only applies to IPv6 VXLAN devices");
3077 if (conf->remote_ifindex) {
3078 struct net_device *lowerdev;
3080 lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
3082 NL_SET_ERR_MSG(extack,
3083 "Invalid local interface, device not found");
3087 #if IS_ENABLED(CONFIG_IPV6)
3089 struct inet6_dev *idev = __in6_dev_get(lowerdev);
3090 if (idev && idev->cnf.disable_ipv6) {
3091 NL_SET_ERR_MSG(extack,
3092 "IPv6 support disabled by administrator");
3100 if (vxlan_addr_multicast(&conf->remote_ip)) {
3101 NL_SET_ERR_MSG(extack,
3102 "Local interface required for multicast remote destination");
3107 #if IS_ENABLED(CONFIG_IPV6)
3108 if (conf->flags & VXLAN_F_IPV6_LINKLOCAL) {
3109 NL_SET_ERR_MSG(extack,
3110 "Local interface required for link-local local/remote addresses");
3118 if (!conf->dst_port) {
3119 if (conf->flags & VXLAN_F_GPE)
3120 conf->dst_port = htons(4790); /* IANA VXLAN-GPE port */
3122 conf->dst_port = htons(vxlan_port);
3125 if (!conf->age_interval)
3126 conf->age_interval = FDB_AGE_DEFAULT;
3128 list_for_each_entry(tmp, &vn->vxlan_list, next) {
3132 if (tmp->cfg.vni != conf->vni)
3134 if (tmp->cfg.dst_port != conf->dst_port)
3136 if ((tmp->cfg.flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)) !=
3137 (conf->flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)))
3140 if ((conf->flags & VXLAN_F_IPV6_LINKLOCAL) &&
3141 tmp->cfg.remote_ifindex != conf->remote_ifindex)
3144 NL_SET_ERR_MSG(extack,
3145 "A VXLAN device with the specified VNI already exists");
3152 static void vxlan_config_apply(struct net_device *dev,
3153 struct vxlan_config *conf,
3154 struct net_device *lowerdev,
3155 struct net *src_net,
3158 struct vxlan_dev *vxlan = netdev_priv(dev);
3159 struct vxlan_rdst *dst = &vxlan->default_dst;
3160 unsigned short needed_headroom = ETH_HLEN;
3161 bool use_ipv6 = !!(conf->flags & VXLAN_F_IPV6);
3162 int max_mtu = ETH_MAX_MTU;
3165 if (conf->flags & VXLAN_F_GPE)
3166 vxlan_raw_setup(dev);
3168 vxlan_ether_setup(dev);
3171 dev->mtu = conf->mtu;
3173 vxlan->net = src_net;
3176 dst->remote_vni = conf->vni;
3178 memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip));
3181 dst->remote_ifindex = conf->remote_ifindex;
3183 dev->gso_max_size = lowerdev->gso_max_size;
3184 dev->gso_max_segs = lowerdev->gso_max_segs;
3186 needed_headroom = lowerdev->hard_header_len;
3187 needed_headroom += lowerdev->needed_headroom;
3189 dev->needed_tailroom = lowerdev->needed_tailroom;
3191 max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
3193 if (max_mtu < ETH_MIN_MTU)
3194 max_mtu = ETH_MIN_MTU;
3196 if (!changelink && !conf->mtu)
3200 if (dev->mtu > max_mtu)
3203 if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
3204 needed_headroom += VXLAN6_HEADROOM;
3206 needed_headroom += VXLAN_HEADROOM;
3207 dev->needed_headroom = needed_headroom;
3209 memcpy(&vxlan->cfg, conf, sizeof(*conf));
3212 static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
3213 struct vxlan_config *conf, bool changelink,
3214 struct netlink_ext_ack *extack)
3216 struct vxlan_dev *vxlan = netdev_priv(dev);
3217 struct net_device *lowerdev;
3220 ret = vxlan_config_validate(src_net, conf, &lowerdev, vxlan, extack);
3224 vxlan_config_apply(dev, conf, lowerdev, src_net, changelink);
3229 static int __vxlan_dev_create(struct net *net, struct net_device *dev,
3230 struct vxlan_config *conf,
3231 struct netlink_ext_ack *extack)
3233 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3234 struct vxlan_dev *vxlan = netdev_priv(dev);
3235 struct vxlan_fdb *f = NULL;
3236 bool unregister = false;
3239 err = vxlan_dev_configure(net, dev, conf, false, extack);
3243 dev->ethtool_ops = &vxlan_ethtool_ops;
3245 /* create an fdb entry for a valid default destination */
3246 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
3247 err = vxlan_fdb_create(vxlan, all_zeros_mac,
3248 &vxlan->default_dst.remote_ip,
3249 NUD_REACHABLE | NUD_PERMANENT,
3250 vxlan->cfg.dst_port,
3251 vxlan->default_dst.remote_vni,
3252 vxlan->default_dst.remote_vni,
3253 vxlan->default_dst.remote_ifindex,
3259 err = register_netdevice(dev);
3264 err = rtnl_configure_link(dev, NULL);
3268 /* notify default fdb entry */
3270 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
3272 list_add(&vxlan->next, &vn->vxlan_list);
3276 /* unregister_netdevice() destroys the default FDB entry with deletion
3277 * notification. But the addition notification was not sent yet, so
3278 * destroy the entry by hand here.
3281 vxlan_fdb_destroy(vxlan, f, false);
3283 unregister_netdevice(dev);
3287 static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
3288 struct net_device *dev, struct vxlan_config *conf,
3291 struct vxlan_dev *vxlan = netdev_priv(dev);
3293 memset(conf, 0, sizeof(*conf));
3295 /* if changelink operation, start with old existing cfg */
3297 memcpy(conf, &vxlan->cfg, sizeof(*conf));
3299 if (data[IFLA_VXLAN_ID]) {
3300 __be32 vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID]));
3302 if (changelink && (vni != conf->vni))
3304 conf->vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID]));
3307 if (data[IFLA_VXLAN_GROUP]) {
3308 if (changelink && (conf->remote_ip.sa.sa_family != AF_INET))
3311 conf->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
3312 conf->remote_ip.sa.sa_family = AF_INET;
3313 } else if (data[IFLA_VXLAN_GROUP6]) {
3314 if (!IS_ENABLED(CONFIG_IPV6))
3315 return -EPFNOSUPPORT;
3317 if (changelink && (conf->remote_ip.sa.sa_family != AF_INET6))
3320 conf->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
3321 conf->remote_ip.sa.sa_family = AF_INET6;
3324 if (data[IFLA_VXLAN_LOCAL]) {
3325 if (changelink && (conf->saddr.sa.sa_family != AF_INET))
3328 conf->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
3329 conf->saddr.sa.sa_family = AF_INET;
3330 } else if (data[IFLA_VXLAN_LOCAL6]) {
3331 if (!IS_ENABLED(CONFIG_IPV6))
3332 return -EPFNOSUPPORT;
3334 if (changelink && (conf->saddr.sa.sa_family != AF_INET6))
3337 /* TODO: respect scope id */
3338 conf->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
3339 conf->saddr.sa.sa_family = AF_INET6;
3342 if (data[IFLA_VXLAN_LINK])
3343 conf->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]);
3345 if (data[IFLA_VXLAN_TOS])
3346 conf->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
3348 if (data[IFLA_VXLAN_TTL])
3349 conf->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
3351 if (data[IFLA_VXLAN_TTL_INHERIT]) {
3354 conf->flags |= VXLAN_F_TTL_INHERIT;
3357 if (data[IFLA_VXLAN_LABEL])
3358 conf->label = nla_get_be32(data[IFLA_VXLAN_LABEL]) &
3359 IPV6_FLOWLABEL_MASK;
3361 if (data[IFLA_VXLAN_LEARNING]) {
3362 if (nla_get_u8(data[IFLA_VXLAN_LEARNING]))
3363 conf->flags |= VXLAN_F_LEARN;
3365 conf->flags &= ~VXLAN_F_LEARN;
3366 } else if (!changelink) {
3367 /* default to learn on a new device */
3368 conf->flags |= VXLAN_F_LEARN;
3371 if (data[IFLA_VXLAN_AGEING]) {
3374 conf->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
3377 if (data[IFLA_VXLAN_PROXY]) {
3380 if (nla_get_u8(data[IFLA_VXLAN_PROXY]))
3381 conf->flags |= VXLAN_F_PROXY;
3384 if (data[IFLA_VXLAN_RSC]) {
3387 if (nla_get_u8(data[IFLA_VXLAN_RSC]))
3388 conf->flags |= VXLAN_F_RSC;
3391 if (data[IFLA_VXLAN_L2MISS]) {
3394 if (nla_get_u8(data[IFLA_VXLAN_L2MISS]))
3395 conf->flags |= VXLAN_F_L2MISS;
3398 if (data[IFLA_VXLAN_L3MISS]) {
3401 if (nla_get_u8(data[IFLA_VXLAN_L3MISS]))
3402 conf->flags |= VXLAN_F_L3MISS;
3405 if (data[IFLA_VXLAN_LIMIT]) {
3408 conf->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
3411 if (data[IFLA_VXLAN_COLLECT_METADATA]) {
3414 if (nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA]))
3415 conf->flags |= VXLAN_F_COLLECT_METADATA;
3418 if (data[IFLA_VXLAN_PORT_RANGE]) {
3420 const struct ifla_vxlan_port_range *p
3421 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
3422 conf->port_min = ntohs(p->low);
3423 conf->port_max = ntohs(p->high);
3429 if (data[IFLA_VXLAN_PORT]) {
3432 conf->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
3435 if (data[IFLA_VXLAN_UDP_CSUM]) {
3438 if (!nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
3439 conf->flags |= VXLAN_F_UDP_ZERO_CSUM_TX;
3442 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]) {
3445 if (nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
3446 conf->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
3449 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]) {
3452 if (nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
3453 conf->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
3456 if (data[IFLA_VXLAN_REMCSUM_TX]) {
3459 if (nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
3460 conf->flags |= VXLAN_F_REMCSUM_TX;
3463 if (data[IFLA_VXLAN_REMCSUM_RX]) {
3466 if (nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
3467 conf->flags |= VXLAN_F_REMCSUM_RX;
3470 if (data[IFLA_VXLAN_GBP]) {
3473 conf->flags |= VXLAN_F_GBP;
3476 if (data[IFLA_VXLAN_GPE]) {
3479 conf->flags |= VXLAN_F_GPE;
3482 if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) {
3485 conf->flags |= VXLAN_F_REMCSUM_NOPARTIAL;
3491 conf->mtu = nla_get_u32(tb[IFLA_MTU]);
3497 static int vxlan_newlink(struct net *src_net, struct net_device *dev,
3498 struct nlattr *tb[], struct nlattr *data[],
3499 struct netlink_ext_ack *extack)
3501 struct vxlan_config conf;
3504 err = vxlan_nl2conf(tb, data, dev, &conf, false);
3508 return __vxlan_dev_create(src_net, dev, &conf, extack);
3511 static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
3512 struct nlattr *data[],
3513 struct netlink_ext_ack *extack)
3515 struct vxlan_dev *vxlan = netdev_priv(dev);
3516 struct vxlan_rdst *dst = &vxlan->default_dst;
3517 struct vxlan_rdst old_dst;
3518 struct vxlan_config conf;
3521 err = vxlan_nl2conf(tb, data,
3526 memcpy(&old_dst, dst, sizeof(struct vxlan_rdst));
3528 err = vxlan_dev_configure(vxlan->net, dev, &conf, true, extack);
3532 /* handle default dst entry */
3533 if (!vxlan_addr_equal(&dst->remote_ip, &old_dst.remote_ip)) {
3534 spin_lock_bh(&vxlan->hash_lock);
3535 if (!vxlan_addr_any(&old_dst.remote_ip))
3536 __vxlan_fdb_delete(vxlan, all_zeros_mac,
3538 vxlan->cfg.dst_port,
3541 old_dst.remote_ifindex, 0);
3543 if (!vxlan_addr_any(&dst->remote_ip)) {
3544 err = vxlan_fdb_update(vxlan, all_zeros_mac,
3546 NUD_REACHABLE | NUD_PERMANENT,
3547 NLM_F_APPEND | NLM_F_CREATE,
3548 vxlan->cfg.dst_port,
3551 dst->remote_ifindex,
3554 spin_unlock_bh(&vxlan->hash_lock);
3558 spin_unlock_bh(&vxlan->hash_lock);
3564 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
3566 struct vxlan_dev *vxlan = netdev_priv(dev);
3568 vxlan_flush(vxlan, true);
3570 list_del(&vxlan->next);
3571 unregister_netdevice_queue(dev, head);
3574 static size_t vxlan_get_size(const struct net_device *dev)
3577 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
3578 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */
3579 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
3580 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
3581 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
3582 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL_INHERIT */
3583 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
3584 nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */
3585 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
3586 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
3587 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
3588 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
3589 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
3590 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_COLLECT_METADATA */
3591 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
3592 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
3593 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
3594 nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */
3595 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */
3596 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
3597 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
3598 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */
3599 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */
3603 static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
3605 const struct vxlan_dev *vxlan = netdev_priv(dev);
3606 const struct vxlan_rdst *dst = &vxlan->default_dst;
3607 struct ifla_vxlan_port_range ports = {
3608 .low = htons(vxlan->cfg.port_min),
3609 .high = htons(vxlan->cfg.port_max),
3612 if (nla_put_u32(skb, IFLA_VXLAN_ID, be32_to_cpu(dst->remote_vni)))
3613 goto nla_put_failure;
3615 if (!vxlan_addr_any(&dst->remote_ip)) {
3616 if (dst->remote_ip.sa.sa_family == AF_INET) {
3617 if (nla_put_in_addr(skb, IFLA_VXLAN_GROUP,
3618 dst->remote_ip.sin.sin_addr.s_addr))
3619 goto nla_put_failure;
3620 #if IS_ENABLED(CONFIG_IPV6)
3622 if (nla_put_in6_addr(skb, IFLA_VXLAN_GROUP6,
3623 &dst->remote_ip.sin6.sin6_addr))
3624 goto nla_put_failure;
3629 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
3630 goto nla_put_failure;
3632 if (!vxlan_addr_any(&vxlan->cfg.saddr)) {
3633 if (vxlan->cfg.saddr.sa.sa_family == AF_INET) {
3634 if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL,
3635 vxlan->cfg.saddr.sin.sin_addr.s_addr))
3636 goto nla_put_failure;
3637 #if IS_ENABLED(CONFIG_IPV6)
3639 if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6,
3640 &vxlan->cfg.saddr.sin6.sin6_addr))
3641 goto nla_put_failure;
3646 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
3647 nla_put_u8(skb, IFLA_VXLAN_TTL_INHERIT,
3648 !!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) ||
3649 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
3650 nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
3651 nla_put_u8(skb, IFLA_VXLAN_LEARNING,
3652 !!(vxlan->cfg.flags & VXLAN_F_LEARN)) ||
3653 nla_put_u8(skb, IFLA_VXLAN_PROXY,
3654 !!(vxlan->cfg.flags & VXLAN_F_PROXY)) ||
3655 nla_put_u8(skb, IFLA_VXLAN_RSC,
3656 !!(vxlan->cfg.flags & VXLAN_F_RSC)) ||
3657 nla_put_u8(skb, IFLA_VXLAN_L2MISS,
3658 !!(vxlan->cfg.flags & VXLAN_F_L2MISS)) ||
3659 nla_put_u8(skb, IFLA_VXLAN_L3MISS,
3660 !!(vxlan->cfg.flags & VXLAN_F_L3MISS)) ||
3661 nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA,
3662 !!(vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)) ||
3663 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) ||
3664 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
3665 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
3666 nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
3667 !(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM_TX)) ||
3668 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
3669 !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
3670 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
3671 !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
3672 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
3673 !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_TX)) ||
3674 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
3675 !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_RX)))
3676 goto nla_put_failure;
3678 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
3679 goto nla_put_failure;
3681 if (vxlan->cfg.flags & VXLAN_F_GBP &&
3682 nla_put_flag(skb, IFLA_VXLAN_GBP))
3683 goto nla_put_failure;
3685 if (vxlan->cfg.flags & VXLAN_F_GPE &&
3686 nla_put_flag(skb, IFLA_VXLAN_GPE))
3687 goto nla_put_failure;
3689 if (vxlan->cfg.flags & VXLAN_F_REMCSUM_NOPARTIAL &&
3690 nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL))
3691 goto nla_put_failure;
3699 static struct net *vxlan_get_link_net(const struct net_device *dev)
3701 struct vxlan_dev *vxlan = netdev_priv(dev);
3706 static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
3708 .maxtype = IFLA_VXLAN_MAX,
3709 .policy = vxlan_policy,
3710 .priv_size = sizeof(struct vxlan_dev),
3711 .setup = vxlan_setup,
3712 .validate = vxlan_validate,
3713 .newlink = vxlan_newlink,
3714 .changelink = vxlan_changelink,
3715 .dellink = vxlan_dellink,
3716 .get_size = vxlan_get_size,
3717 .fill_info = vxlan_fill_info,
3718 .get_link_net = vxlan_get_link_net,
3721 struct net_device *vxlan_dev_create(struct net *net, const char *name,
3722 u8 name_assign_type,
3723 struct vxlan_config *conf)
3725 struct nlattr *tb[IFLA_MAX + 1];
3726 struct net_device *dev;
3729 memset(&tb, 0, sizeof(tb));
3731 dev = rtnl_create_link(net, name, name_assign_type,
3732 &vxlan_link_ops, tb);
3736 err = __vxlan_dev_create(net, dev, conf, NULL);
3739 return ERR_PTR(err);
3742 err = rtnl_configure_link(dev, NULL);
3744 LIST_HEAD(list_kill);
3746 vxlan_dellink(dev, &list_kill);
3747 unregister_netdevice_many(&list_kill);
3748 return ERR_PTR(err);
3753 EXPORT_SYMBOL_GPL(vxlan_dev_create);
3755 static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
3756 struct net_device *dev)
3758 struct vxlan_dev *vxlan, *next;
3759 LIST_HEAD(list_kill);
3761 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
3762 struct vxlan_rdst *dst = &vxlan->default_dst;
3764 /* In case we created vxlan device with carrier
3765 * and we loose the carrier due to module unload
3766 * we also need to remove vxlan device. In other
3767 * cases, it's not necessary and remote_ifindex
3768 * is 0 here, so no matches.
3770 if (dst->remote_ifindex == dev->ifindex)
3771 vxlan_dellink(vxlan->dev, &list_kill);
3774 unregister_netdevice_many(&list_kill);
3777 static int vxlan_netdevice_event(struct notifier_block *unused,
3778 unsigned long event, void *ptr)
3780 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3781 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
3783 if (event == NETDEV_UNREGISTER) {
3784 vxlan_offload_rx_ports(dev, false);
3785 vxlan_handle_lowerdev_unregister(vn, dev);
3786 } else if (event == NETDEV_REGISTER) {
3787 vxlan_offload_rx_ports(dev, true);
3788 } else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO ||
3789 event == NETDEV_UDP_TUNNEL_DROP_INFO) {
3790 vxlan_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO);
3796 static struct notifier_block vxlan_notifier_block __read_mostly = {
3797 .notifier_call = vxlan_netdevice_event,
3800 static __net_init int vxlan_init_net(struct net *net)
3802 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3805 INIT_LIST_HEAD(&vn->vxlan_list);
3806 spin_lock_init(&vn->sock_lock);
3808 for (h = 0; h < PORT_HASH_SIZE; ++h)
3809 INIT_HLIST_HEAD(&vn->sock_list[h]);
3814 static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
3816 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3817 struct vxlan_dev *vxlan, *next;
3818 struct net_device *dev, *aux;
3820 for_each_netdev_safe(net, dev, aux)
3821 if (dev->rtnl_link_ops == &vxlan_link_ops)
3822 unregister_netdevice_queue(dev, head);
3824 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
3825 /* If vxlan->dev is in the same netns, it has already been added
3826 * to the list by the previous loop.
3828 if (!net_eq(dev_net(vxlan->dev), net))
3829 unregister_netdevice_queue(vxlan->dev, head);
3834 static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
3841 list_for_each_entry(net, net_list, exit_list)
3842 vxlan_destroy_tunnels(net, &list);
3844 unregister_netdevice_many(&list);
3847 list_for_each_entry(net, net_list, exit_list) {
3848 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3850 for (h = 0; h < PORT_HASH_SIZE; ++h)
3851 WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
3855 static struct pernet_operations vxlan_net_ops = {
3856 .init = vxlan_init_net,
3857 .exit_batch = vxlan_exit_batch_net,
3858 .id = &vxlan_net_id,
3859 .size = sizeof(struct vxlan_net),
3862 static int __init vxlan_init_module(void)
3866 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
3868 rc = register_pernet_subsys(&vxlan_net_ops);
3872 rc = register_netdevice_notifier(&vxlan_notifier_block);
3876 rc = rtnl_link_register(&vxlan_link_ops);
3882 unregister_netdevice_notifier(&vxlan_notifier_block);
3884 unregister_pernet_subsys(&vxlan_net_ops);
3888 late_initcall(vxlan_init_module);
3890 static void __exit vxlan_cleanup_module(void)
3892 rtnl_link_unregister(&vxlan_link_ops);
3893 unregister_netdevice_notifier(&vxlan_notifier_block);
3894 unregister_pernet_subsys(&vxlan_net_ops);
3895 /* rcu_barrier() is called by netns */
3897 module_exit(vxlan_cleanup_module);
3899 MODULE_LICENSE("GPL");
3900 MODULE_VERSION(VXLAN_VERSION);
3901 MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
3902 MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic");
3903 MODULE_ALIAS_RTNL_LINK("vxlan");