2 * Linux NET3: GRE over IP protocol decoder.
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <asm/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_vlan.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
39 #include <net/protocol.h>
40 #include <net/ip_tunnels.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
50 #include <net/dst_metadata.h>
56 1. The most important issue is detecting local dead loops.
57 They would cause complete host lockup in transmit, which
58 would be "resolved" by stack overflow or, if queueing is enabled,
59 with infinite looping in net_bh.
61 We cannot track such dead loops during route installation,
62 it is infeasible task. The most general solutions would be
63 to keep skb->encapsulation counter (sort of local ttl),
64 and silently drop packet when it expires. It is a good
65 solution, but it supposes maintaining new variable in ALL
66 skb, even if no tunneling is used.
68 Current solution: xmit_recursion breaks dead loops. This is a percpu
69 counter, since when we enter the first ndo_xmit(), cpu migration is
70 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
72 2. Networking dead loops would not kill routers, but would really
73 kill network. IP hop limit plays role of "t->recursion" in this case,
74 if we copy it from packet being encapsulated to upper header.
75 It is very good solution, but it introduces two problems:
77 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
78 do not work over tunnels.
79 - traceroute does not work. I planned to relay ICMP from tunnel,
80 so that this problem would be solved and traceroute output
81 would even more informative. This idea appeared to be wrong:
82 only Linux complies to rfc1812 now (yes, guys, Linux is the only
83 true router now :-)), all routers (at least, in neighbourhood of mine)
84 return only 8 bytes of payload. It is the end.
86 Hence, if we want that OSPF worked or traceroute said something reasonable,
87 we should search for another solution.
89 One of them is to parse packet trying to detect inner encapsulation
90 made by our node. It is difficult or even impossible, especially,
91 taking into account fragmentation. TO be short, ttl is not solution at all.
93 Current solution: The solution was UNEXPECTEDLY SIMPLE.
94 We force DF flag on tunnels with preconfigured hop limit,
95 that is ALL. :-) Well, it does not remove the problem completely,
96 but exponential growth of network traffic is changed to linear
97 (branches, that exceed pmtu are pruned) and tunnel mtu
98 rapidly degrades to value <68, where looping stops.
99 Yes, it is not good if there exists a router in the loop,
100 which does not force DF, even when encapsulating packets have DF set.
101 But it is not our problem! Nobody could accuse us, we made
102 all that we could make. Even if it is your gated who injected
103 fatal route to network, even if it were you who configured
104 fatal static route: you are innocent. :-)
109 static bool log_ecn_error = true;
110 module_param(log_ecn_error, bool, 0644);
111 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
113 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
114 static int ipgre_tunnel_init(struct net_device *dev);
116 static int ipgre_net_id __read_mostly;
117 static int gre_tap_net_id __read_mostly;
119 static void ipgre_err(struct sk_buff *skb, u32 info,
120 const struct tnl_ptk_info *tpi)
123 /* All the routers (except for Linux) return only
124 8 bytes of packet payload. It means, that precise relaying of
125 ICMP in the real Internet is absolutely infeasible.
127 Moreover, Cisco "wise men" put GRE key to the third word
128 in GRE header. It makes impossible maintaining even soft
129 state for keyed GRE tunnels with enabled checksum. Tell
132 Well, I wonder, rfc1812 was written by Cisco employee,
133 what the hell these idiots break standards established
136 struct net *net = dev_net(skb->dev);
137 struct ip_tunnel_net *itn;
138 const struct iphdr *iph;
139 const int type = icmp_hdr(skb)->type;
140 const int code = icmp_hdr(skb)->code;
141 unsigned int data_len = 0;
146 case ICMP_PARAMETERPROB:
149 case ICMP_DEST_UNREACH:
152 case ICMP_PORT_UNREACH:
153 /* Impossible event. */
156 /* All others are translated to HOST_UNREACH.
157 rfc2003 contains "deep thoughts" about NET_UNREACH,
158 I believe they are just ether pollution. --ANK
164 case ICMP_TIME_EXCEEDED:
165 if (code != ICMP_EXC_TTL)
167 data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
174 if (tpi->proto == htons(ETH_P_TEB))
175 itn = net_generic(net, gre_tap_net_id);
177 itn = net_generic(net, ipgre_net_id);
179 iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
180 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
181 iph->daddr, iph->saddr, tpi->key);
186 #if IS_ENABLED(CONFIG_IPV6)
187 if (tpi->proto == htons(ETH_P_IPV6) &&
188 !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
193 if (t->parms.iph.daddr == 0 ||
194 ipv4_is_multicast(t->parms.iph.daddr))
197 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
200 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
204 t->err_time = jiffies;
207 static void gre_err(struct sk_buff *skb, u32 info)
209 /* All the routers (except for Linux) return only
210 * 8 bytes of packet payload. It means, that precise relaying of
211 * ICMP in the real Internet is absolutely infeasible.
213 * Moreover, Cisco "wise men" put GRE key to the third word
214 * in GRE header. It makes impossible maintaining even soft
216 * GRE tunnels with enabled checksum. Tell them "thank you".
218 * Well, I wonder, rfc1812 was written by Cisco employee,
219 * what the hell these idiots break standards established
223 const struct iphdr *iph = (struct iphdr *)skb->data;
224 const int type = icmp_hdr(skb)->type;
225 const int code = icmp_hdr(skb)->code;
226 struct tnl_ptk_info tpi;
228 if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
232 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
233 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
234 skb->dev->ifindex, 0, IPPROTO_GRE, 0);
237 if (type == ICMP_REDIRECT) {
238 ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
243 ipgre_err(skb, info, &tpi);
246 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
247 struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
249 struct metadata_dst *tun_dst = NULL;
250 const struct iphdr *iph;
251 struct ip_tunnel *tunnel;
254 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
255 iph->saddr, iph->daddr, tpi->key);
258 if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
259 raw_proto, false) < 0)
262 if (tunnel->dev->type != ARPHRD_NONE)
263 skb_pop_mac_header(skb);
265 skb_reset_mac_header(skb);
266 if (tunnel->collect_md) {
270 flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
271 tun_id = key32_to_tunnel_id(tpi->key);
272 tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
274 return PACKET_REJECT;
277 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
287 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
290 struct net *net = dev_net(skb->dev);
291 struct ip_tunnel_net *itn;
294 if (tpi->proto == htons(ETH_P_TEB))
295 itn = net_generic(net, gre_tap_net_id);
297 itn = net_generic(net, ipgre_net_id);
299 res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
300 if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
301 /* ipgre tunnels in collect metadata mode should receive
302 * also ETH_P_TEB traffic.
304 itn = net_generic(net, ipgre_net_id);
305 res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
310 static int gre_rcv(struct sk_buff *skb)
312 struct tnl_ptk_info tpi;
313 bool csum_err = false;
316 #ifdef CONFIG_NET_IPGRE_BROADCAST
317 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
318 /* Looped back packet, drop it! */
319 if (rt_is_output_route(skb_rtable(skb)))
324 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
328 if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
331 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
337 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
338 const struct iphdr *tnl_params,
341 struct ip_tunnel *tunnel = netdev_priv(dev);
342 __be16 flags = tunnel->parms.o_flags;
344 /* Push GRE header. */
345 gre_build_header(skb, tunnel->tun_hlen,
346 flags, proto, tunnel->parms.o_key,
347 (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
349 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
352 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
354 if (csum && skb_checksum_start(skb) < skb->data)
356 return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
359 static struct rtable *gre_get_rt(struct sk_buff *skb,
360 struct net_device *dev,
362 const struct ip_tunnel_key *key)
364 struct net *net = dev_net(dev);
366 memset(fl, 0, sizeof(*fl));
367 fl->daddr = key->u.ipv4.dst;
368 fl->saddr = key->u.ipv4.src;
369 fl->flowi4_tos = RT_TOS(key->tos);
370 fl->flowi4_mark = skb->mark;
371 fl->flowi4_proto = IPPROTO_GRE;
373 return ip_route_output_key(net, fl);
376 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
379 struct ip_tunnel_info *tun_info;
380 const struct ip_tunnel_key *key;
381 struct rtable *rt = NULL;
389 tun_info = skb_tunnel_info(skb);
390 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
391 ip_tunnel_info_af(tun_info) != AF_INET))
394 key = &tun_info->key;
395 use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
397 rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl.saddr);
399 rt = gre_get_rt(skb, dev, &fl, key);
403 dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
407 tunnel_hlen = gre_calc_hlen(key->tun_flags);
409 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
410 + tunnel_hlen + sizeof(struct iphdr);
411 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
412 int head_delta = SKB_DATA_ALIGN(min_headroom -
415 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
421 /* Push Tunnel header. */
422 if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
425 flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
426 gre_build_header(skb, tunnel_hlen, flags, proto,
427 tunnel_id_to_key32(tun_info->key.tun_id), 0);
429 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
431 iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
432 key->tos, key->ttl, df, false);
439 dev->stats.tx_dropped++;
442 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
444 struct ip_tunnel_info *info = skb_tunnel_info(skb);
448 if (ip_tunnel_info_af(info) != AF_INET)
451 rt = gre_get_rt(skb, dev, &fl4, &info->key);
456 info->key.u.ipv4.src = fl4.saddr;
460 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
461 struct net_device *dev)
463 struct ip_tunnel *tunnel = netdev_priv(dev);
464 const struct iphdr *tnl_params;
466 if (tunnel->collect_md) {
467 gre_fb_xmit(skb, dev, skb->protocol);
471 if (dev->header_ops) {
472 /* Need space for new headers */
473 if (skb_cow_head(skb, dev->needed_headroom -
474 (tunnel->hlen + sizeof(struct iphdr))))
477 tnl_params = (const struct iphdr *)skb->data;
479 /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
482 skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
483 skb_reset_mac_header(skb);
485 if (skb_cow_head(skb, dev->needed_headroom))
488 tnl_params = &tunnel->parms.iph;
491 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
494 __gre_xmit(skb, dev, tnl_params, skb->protocol);
499 dev->stats.tx_dropped++;
503 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
504 struct net_device *dev)
506 struct ip_tunnel *tunnel = netdev_priv(dev);
508 if (tunnel->collect_md) {
509 gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
513 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
516 if (skb_cow_head(skb, dev->needed_headroom))
519 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
524 dev->stats.tx_dropped++;
528 static int ipgre_tunnel_ioctl(struct net_device *dev,
529 struct ifreq *ifr, int cmd)
532 struct ip_tunnel_parm p;
534 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
536 if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
537 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
538 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
539 ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
542 p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
543 p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
545 err = ip_tunnel_ioctl(dev, &p, cmd);
549 p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
550 p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
552 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
557 /* Nice toy. Unfortunately, useless in real life :-)
558 It allows to construct virtual multiprotocol broadcast "LAN"
559 over the Internet, provided multicast routing is tuned.
562 I have no idea was this bicycle invented before me,
563 so that I had to set ARPHRD_IPGRE to a random value.
564 I have an impression, that Cisco could make something similar,
565 but this feature is apparently missing in IOS<=11.2(8).
567 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
568 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
570 ping -t 255 224.66.66.66
572 If nobody answers, mbone does not work.
574 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
575 ip addr add 10.66.66.<somewhat>/24 dev Universe
577 ifconfig Universe add fe80::<Your_real_addr>/10
578 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
581 ftp fec0:6666:6666::193.233.7.65
584 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
586 const void *daddr, const void *saddr, unsigned int len)
588 struct ip_tunnel *t = netdev_priv(dev);
590 struct gre_base_hdr *greh;
592 iph = (struct iphdr *)skb_push(skb, t->hlen + sizeof(*iph));
593 greh = (struct gre_base_hdr *)(iph+1);
594 greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
595 greh->protocol = htons(type);
597 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
599 /* Set the source hardware address. */
601 memcpy(&iph->saddr, saddr, 4);
603 memcpy(&iph->daddr, daddr, 4);
605 return t->hlen + sizeof(*iph);
607 return -(t->hlen + sizeof(*iph));
610 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
612 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
613 memcpy(haddr, &iph->saddr, 4);
617 static const struct header_ops ipgre_header_ops = {
618 .create = ipgre_header,
619 .parse = ipgre_header_parse,
622 #ifdef CONFIG_NET_IPGRE_BROADCAST
623 static int ipgre_open(struct net_device *dev)
625 struct ip_tunnel *t = netdev_priv(dev);
627 if (ipv4_is_multicast(t->parms.iph.daddr)) {
631 rt = ip_route_output_gre(t->net, &fl4,
635 RT_TOS(t->parms.iph.tos),
638 return -EADDRNOTAVAIL;
641 if (!__in_dev_get_rtnl(dev))
642 return -EADDRNOTAVAIL;
643 t->mlink = dev->ifindex;
644 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
649 static int ipgre_close(struct net_device *dev)
651 struct ip_tunnel *t = netdev_priv(dev);
653 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
654 struct in_device *in_dev;
655 in_dev = inetdev_by_index(t->net, t->mlink);
657 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
663 static const struct net_device_ops ipgre_netdev_ops = {
664 .ndo_init = ipgre_tunnel_init,
665 .ndo_uninit = ip_tunnel_uninit,
666 #ifdef CONFIG_NET_IPGRE_BROADCAST
667 .ndo_open = ipgre_open,
668 .ndo_stop = ipgre_close,
670 .ndo_start_xmit = ipgre_xmit,
671 .ndo_do_ioctl = ipgre_tunnel_ioctl,
672 .ndo_change_mtu = ip_tunnel_change_mtu,
673 .ndo_get_stats64 = ip_tunnel_get_stats64,
674 .ndo_get_iflink = ip_tunnel_get_iflink,
677 #define GRE_FEATURES (NETIF_F_SG | \
682 static void ipgre_tunnel_setup(struct net_device *dev)
684 dev->netdev_ops = &ipgre_netdev_ops;
685 dev->type = ARPHRD_IPGRE;
686 ip_tunnel_setup(dev, ipgre_net_id);
689 static void __gre_tunnel_init(struct net_device *dev)
691 struct ip_tunnel *tunnel;
694 tunnel = netdev_priv(dev);
695 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
696 tunnel->parms.iph.protocol = IPPROTO_GRE;
698 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
700 t_hlen = tunnel->hlen + sizeof(struct iphdr);
702 dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
703 dev->mtu = ETH_DATA_LEN - t_hlen - 4;
705 dev->features |= GRE_FEATURES;
706 dev->hw_features |= GRE_FEATURES;
708 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
709 /* TCP offload with GRE SEQ is not supported, nor
710 * can we support 2 levels of outer headers requiring
713 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
714 (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
715 dev->features |= NETIF_F_GSO_SOFTWARE;
716 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
719 /* Can use a lockless transmit, unless we generate
722 dev->features |= NETIF_F_LLTX;
726 static int ipgre_tunnel_init(struct net_device *dev)
728 struct ip_tunnel *tunnel = netdev_priv(dev);
729 struct iphdr *iph = &tunnel->parms.iph;
731 __gre_tunnel_init(dev);
733 memcpy(dev->dev_addr, &iph->saddr, 4);
734 memcpy(dev->broadcast, &iph->daddr, 4);
736 dev->flags = IFF_NOARP;
740 if (iph->daddr && !tunnel->collect_md) {
741 #ifdef CONFIG_NET_IPGRE_BROADCAST
742 if (ipv4_is_multicast(iph->daddr)) {
745 dev->flags = IFF_BROADCAST;
746 dev->header_ops = &ipgre_header_ops;
749 } else if (!tunnel->collect_md) {
750 dev->header_ops = &ipgre_header_ops;
753 return ip_tunnel_init(dev);
756 static const struct gre_protocol ipgre_protocol = {
758 .err_handler = gre_err,
761 static int __net_init ipgre_init_net(struct net *net)
763 return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
766 static void __net_exit ipgre_exit_net(struct net *net)
768 struct ip_tunnel_net *itn = net_generic(net, ipgre_net_id);
769 ip_tunnel_delete_net(itn, &ipgre_link_ops);
772 static struct pernet_operations ipgre_net_ops = {
773 .init = ipgre_init_net,
774 .exit = ipgre_exit_net,
776 .size = sizeof(struct ip_tunnel_net),
779 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
787 if (data[IFLA_GRE_IFLAGS])
788 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
789 if (data[IFLA_GRE_OFLAGS])
790 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
791 if (flags & (GRE_VERSION|GRE_ROUTING))
794 if (data[IFLA_GRE_COLLECT_METADATA] &&
795 data[IFLA_GRE_ENCAP_TYPE] &&
796 nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
802 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
806 if (tb[IFLA_ADDRESS]) {
807 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
809 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
810 return -EADDRNOTAVAIL;
816 if (data[IFLA_GRE_REMOTE]) {
817 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
823 return ipgre_tunnel_validate(tb, data);
826 static int ipgre_netlink_parms(struct net_device *dev,
827 struct nlattr *data[],
829 struct ip_tunnel_parm *parms)
831 struct ip_tunnel *t = netdev_priv(dev);
833 memset(parms, 0, sizeof(*parms));
835 parms->iph.protocol = IPPROTO_GRE;
840 if (data[IFLA_GRE_LINK])
841 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
843 if (data[IFLA_GRE_IFLAGS])
844 parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
846 if (data[IFLA_GRE_OFLAGS])
847 parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
849 if (data[IFLA_GRE_IKEY])
850 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
852 if (data[IFLA_GRE_OKEY])
853 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
855 if (data[IFLA_GRE_LOCAL])
856 parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
858 if (data[IFLA_GRE_REMOTE])
859 parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
861 if (data[IFLA_GRE_TTL])
862 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
864 if (data[IFLA_GRE_TOS])
865 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
867 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
870 parms->iph.frag_off = htons(IP_DF);
873 if (data[IFLA_GRE_COLLECT_METADATA]) {
874 t->collect_md = true;
875 if (dev->type == ARPHRD_IPGRE)
876 dev->type = ARPHRD_NONE;
879 if (data[IFLA_GRE_IGNORE_DF]) {
880 if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
881 && (parms->iph.frag_off & htons(IP_DF)))
883 t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
889 /* This function returns true when ENCAP attributes are present in the nl msg */
890 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
891 struct ip_tunnel_encap *ipencap)
895 memset(ipencap, 0, sizeof(*ipencap));
900 if (data[IFLA_GRE_ENCAP_TYPE]) {
902 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
905 if (data[IFLA_GRE_ENCAP_FLAGS]) {
907 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
910 if (data[IFLA_GRE_ENCAP_SPORT]) {
912 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
915 if (data[IFLA_GRE_ENCAP_DPORT]) {
917 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
923 static int gre_tap_init(struct net_device *dev)
925 __gre_tunnel_init(dev);
926 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
928 return ip_tunnel_init(dev);
931 static const struct net_device_ops gre_tap_netdev_ops = {
932 .ndo_init = gre_tap_init,
933 .ndo_uninit = ip_tunnel_uninit,
934 .ndo_start_xmit = gre_tap_xmit,
935 .ndo_set_mac_address = eth_mac_addr,
936 .ndo_validate_addr = eth_validate_addr,
937 .ndo_change_mtu = ip_tunnel_change_mtu,
938 .ndo_get_stats64 = ip_tunnel_get_stats64,
939 .ndo_get_iflink = ip_tunnel_get_iflink,
940 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
943 static void ipgre_tap_setup(struct net_device *dev)
946 dev->netdev_ops = &gre_tap_netdev_ops;
947 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
948 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
949 ip_tunnel_setup(dev, gre_tap_net_id);
952 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
953 struct nlattr *tb[], struct nlattr *data[])
955 struct ip_tunnel_parm p;
956 struct ip_tunnel_encap ipencap;
959 if (ipgre_netlink_encap_parms(data, &ipencap)) {
960 struct ip_tunnel *t = netdev_priv(dev);
961 err = ip_tunnel_encap_setup(t, &ipencap);
967 err = ipgre_netlink_parms(dev, data, tb, &p);
970 return ip_tunnel_newlink(dev, tb, &p);
973 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
974 struct nlattr *data[])
976 struct ip_tunnel_parm p;
977 struct ip_tunnel_encap ipencap;
980 if (ipgre_netlink_encap_parms(data, &ipencap)) {
981 struct ip_tunnel *t = netdev_priv(dev);
982 err = ip_tunnel_encap_setup(t, &ipencap);
988 err = ipgre_netlink_parms(dev, data, tb, &p);
991 return ip_tunnel_changelink(dev, tb, &p);
994 static size_t ipgre_get_size(const struct net_device *dev)
999 /* IFLA_GRE_IFLAGS */
1001 /* IFLA_GRE_OFLAGS */
1007 /* IFLA_GRE_LOCAL */
1009 /* IFLA_GRE_REMOTE */
1015 /* IFLA_GRE_PMTUDISC */
1017 /* IFLA_GRE_ENCAP_TYPE */
1019 /* IFLA_GRE_ENCAP_FLAGS */
1021 /* IFLA_GRE_ENCAP_SPORT */
1023 /* IFLA_GRE_ENCAP_DPORT */
1025 /* IFLA_GRE_COLLECT_METADATA */
1027 /* IFLA_GRE_IGNORE_DF */
1032 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1034 struct ip_tunnel *t = netdev_priv(dev);
1035 struct ip_tunnel_parm *p = &t->parms;
1037 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1038 nla_put_be16(skb, IFLA_GRE_IFLAGS,
1039 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1040 nla_put_be16(skb, IFLA_GRE_OFLAGS,
1041 gre_tnl_flags_to_gre_flags(p->o_flags)) ||
1042 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1043 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1044 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1045 nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1046 nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1047 nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1048 nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1049 !!(p->iph.frag_off & htons(IP_DF))))
1050 goto nla_put_failure;
1052 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1054 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1056 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1058 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1060 goto nla_put_failure;
1062 if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1063 goto nla_put_failure;
1065 if (t->collect_md) {
1066 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1067 goto nla_put_failure;
1076 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1077 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1078 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1079 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1080 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1081 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1082 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1083 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1084 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1085 [IFLA_GRE_TOS] = { .type = NLA_U8 },
1086 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
1087 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
1088 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
1089 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
1090 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
1091 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
1092 [IFLA_GRE_IGNORE_DF] = { .type = NLA_U8 },
1095 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1097 .maxtype = IFLA_GRE_MAX,
1098 .policy = ipgre_policy,
1099 .priv_size = sizeof(struct ip_tunnel),
1100 .setup = ipgre_tunnel_setup,
1101 .validate = ipgre_tunnel_validate,
1102 .newlink = ipgre_newlink,
1103 .changelink = ipgre_changelink,
1104 .dellink = ip_tunnel_dellink,
1105 .get_size = ipgre_get_size,
1106 .fill_info = ipgre_fill_info,
1107 .get_link_net = ip_tunnel_get_link_net,
1110 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1112 .maxtype = IFLA_GRE_MAX,
1113 .policy = ipgre_policy,
1114 .priv_size = sizeof(struct ip_tunnel),
1115 .setup = ipgre_tap_setup,
1116 .validate = ipgre_tap_validate,
1117 .newlink = ipgre_newlink,
1118 .changelink = ipgre_changelink,
1119 .dellink = ip_tunnel_dellink,
1120 .get_size = ipgre_get_size,
1121 .fill_info = ipgre_fill_info,
1122 .get_link_net = ip_tunnel_get_link_net,
1125 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1126 u8 name_assign_type)
1128 struct nlattr *tb[IFLA_MAX + 1];
1129 struct net_device *dev;
1130 LIST_HEAD(list_kill);
1131 struct ip_tunnel *t;
1134 memset(&tb, 0, sizeof(tb));
1136 dev = rtnl_create_link(net, name, name_assign_type,
1137 &ipgre_tap_ops, tb);
1141 /* Configure flow based GRE device. */
1142 t = netdev_priv(dev);
1143 t->collect_md = true;
1145 err = ipgre_newlink(net, dev, tb, NULL);
1148 return ERR_PTR(err);
1151 /* openvswitch users expect packet sizes to be unrestricted,
1152 * so set the largest MTU we can.
1154 err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1158 err = rtnl_configure_link(dev, NULL);
1164 ip_tunnel_dellink(dev, &list_kill);
1165 unregister_netdevice_many(&list_kill);
1166 return ERR_PTR(err);
1168 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1170 static int __net_init ipgre_tap_init_net(struct net *net)
1172 return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1175 static void __net_exit ipgre_tap_exit_net(struct net *net)
1177 struct ip_tunnel_net *itn = net_generic(net, gre_tap_net_id);
1178 ip_tunnel_delete_net(itn, &ipgre_tap_ops);
1181 static struct pernet_operations ipgre_tap_net_ops = {
1182 .init = ipgre_tap_init_net,
1183 .exit = ipgre_tap_exit_net,
1184 .id = &gre_tap_net_id,
1185 .size = sizeof(struct ip_tunnel_net),
1188 static int __init ipgre_init(void)
1192 pr_info("GRE over IPv4 tunneling driver\n");
1194 err = register_pernet_device(&ipgre_net_ops);
1198 err = register_pernet_device(&ipgre_tap_net_ops);
1200 goto pnet_tap_faied;
1202 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1204 pr_info("%s: can't add protocol\n", __func__);
1205 goto add_proto_failed;
1208 err = rtnl_link_register(&ipgre_link_ops);
1210 goto rtnl_link_failed;
1212 err = rtnl_link_register(&ipgre_tap_ops);
1214 goto tap_ops_failed;
1219 rtnl_link_unregister(&ipgre_link_ops);
1221 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1223 unregister_pernet_device(&ipgre_tap_net_ops);
1225 unregister_pernet_device(&ipgre_net_ops);
1229 static void __exit ipgre_fini(void)
1231 rtnl_link_unregister(&ipgre_tap_ops);
1232 rtnl_link_unregister(&ipgre_link_ops);
1233 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1234 unregister_pernet_device(&ipgre_tap_net_ops);
1235 unregister_pernet_device(&ipgre_net_ops);
1238 module_init(ipgre_init);
1239 module_exit(ipgre_fini);
1240 MODULE_LICENSE("GPL");
1241 MODULE_ALIAS_RTNL_LINK("gre");
1242 MODULE_ALIAS_RTNL_LINK("gretap");
1243 MODULE_ALIAS_NETDEV("gre0");
1244 MODULE_ALIAS_NETDEV("gretap0");