2 * Linux NET3: GRE over IP protocol decoder.
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_vlan.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
39 #include <net/protocol.h>
40 #include <net/ip_tunnels.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
50 #include <net/dst_metadata.h>
51 #include <net/erspan.h>
57 1. The most important issue is detecting local dead loops.
58 They would cause complete host lockup in transmit, which
59 would be "resolved" by stack overflow or, if queueing is enabled,
60 with infinite looping in net_bh.
62 We cannot track such dead loops during route installation,
63 it is infeasible task. The most general solutions would be
64 to keep skb->encapsulation counter (sort of local ttl),
65 and silently drop packet when it expires. It is a good
66 solution, but it supposes maintaining new variable in ALL
67 skb, even if no tunneling is used.
69 Current solution: xmit_recursion breaks dead loops. This is a percpu
70 counter, since when we enter the first ndo_xmit(), cpu migration is
71 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
73 2. Networking dead loops would not kill routers, but would really
74 kill network. IP hop limit plays role of "t->recursion" in this case,
75 if we copy it from packet being encapsulated to upper header.
76 It is very good solution, but it introduces two problems:
78 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
79 do not work over tunnels.
80 - traceroute does not work. I planned to relay ICMP from tunnel,
81 so that this problem would be solved and traceroute output
82 would even more informative. This idea appeared to be wrong:
83 only Linux complies to rfc1812 now (yes, guys, Linux is the only
84 true router now :-)), all routers (at least, in neighbourhood of mine)
85 return only 8 bytes of payload. It is the end.
87 Hence, if we want that OSPF worked or traceroute said something reasonable,
88 we should search for another solution.
90 One of them is to parse packet trying to detect inner encapsulation
91 made by our node. It is difficult or even impossible, especially,
92 taking into account fragmentation. TO be short, ttl is not solution at all.
94 Current solution: The solution was UNEXPECTEDLY SIMPLE.
95 We force DF flag on tunnels with preconfigured hop limit,
96 that is ALL. :-) Well, it does not remove the problem completely,
97 but exponential growth of network traffic is changed to linear
98 (branches, that exceed pmtu are pruned) and tunnel mtu
99 rapidly degrades to value <68, where looping stops.
100 Yes, it is not good if there exists a router in the loop,
101 which does not force DF, even when encapsulating packets have DF set.
102 But it is not our problem! Nobody could accuse us, we made
103 all that we could make. Even if it is your gated who injected
104 fatal route to network, even if it were you who configured
105 fatal static route: you are innocent. :-)
110 static bool log_ecn_error = true;
111 module_param(log_ecn_error, bool, 0644);
112 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
114 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
115 static int ipgre_tunnel_init(struct net_device *dev);
116 static void erspan_build_header(struct sk_buff *skb,
117 __be32 id, u32 index, bool truncate);
119 static unsigned int ipgre_net_id __read_mostly;
120 static unsigned int gre_tap_net_id __read_mostly;
121 static unsigned int erspan_net_id __read_mostly;
123 static void ipgre_err(struct sk_buff *skb, u32 info,
124 const struct tnl_ptk_info *tpi)
127 /* All the routers (except for Linux) return only
128 8 bytes of packet payload. It means, that precise relaying of
129 ICMP in the real Internet is absolutely infeasible.
131 Moreover, Cisco "wise men" put GRE key to the third word
132 in GRE header. It makes impossible maintaining even soft
133 state for keyed GRE tunnels with enabled checksum. Tell
136 Well, I wonder, rfc1812 was written by Cisco employee,
137 what the hell these idiots break standards established
140 struct net *net = dev_net(skb->dev);
141 struct ip_tunnel_net *itn;
142 const struct iphdr *iph;
143 const int type = icmp_hdr(skb)->type;
144 const int code = icmp_hdr(skb)->code;
145 unsigned int data_len = 0;
150 case ICMP_PARAMETERPROB:
153 case ICMP_DEST_UNREACH:
156 case ICMP_PORT_UNREACH:
157 /* Impossible event. */
160 /* All others are translated to HOST_UNREACH.
161 rfc2003 contains "deep thoughts" about NET_UNREACH,
162 I believe they are just ether pollution. --ANK
168 case ICMP_TIME_EXCEEDED:
169 if (code != ICMP_EXC_TTL)
171 data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
178 if (tpi->proto == htons(ETH_P_TEB))
179 itn = net_generic(net, gre_tap_net_id);
180 else if (tpi->proto == htons(ETH_P_ERSPAN))
181 itn = net_generic(net, erspan_net_id);
183 itn = net_generic(net, ipgre_net_id);
185 iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
186 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
187 iph->daddr, iph->saddr, tpi->key);
192 #if IS_ENABLED(CONFIG_IPV6)
193 if (tpi->proto == htons(ETH_P_IPV6) &&
194 !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
199 if (t->parms.iph.daddr == 0 ||
200 ipv4_is_multicast(t->parms.iph.daddr))
203 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
206 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
210 t->err_time = jiffies;
213 static void gre_err(struct sk_buff *skb, u32 info)
215 /* All the routers (except for Linux) return only
216 * 8 bytes of packet payload. It means, that precise relaying of
217 * ICMP in the real Internet is absolutely infeasible.
219 * Moreover, Cisco "wise men" put GRE key to the third word
220 * in GRE header. It makes impossible maintaining even soft
222 * GRE tunnels with enabled checksum. Tell them "thank you".
224 * Well, I wonder, rfc1812 was written by Cisco employee,
225 * what the hell these idiots break standards established
229 const struct iphdr *iph = (struct iphdr *)skb->data;
230 const int type = icmp_hdr(skb)->type;
231 const int code = icmp_hdr(skb)->code;
232 struct tnl_ptk_info tpi;
234 if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
238 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
239 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
240 skb->dev->ifindex, 0, IPPROTO_GRE, 0);
243 if (type == ICMP_REDIRECT) {
244 ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
249 ipgre_err(skb, info, &tpi);
252 static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
255 struct net *net = dev_net(skb->dev);
256 struct metadata_dst *tun_dst = NULL;
257 struct ip_tunnel_net *itn;
258 struct ip_tunnel *tunnel;
259 struct erspanhdr *ershdr;
260 const struct iphdr *iph;
264 itn = net_generic(net, erspan_net_id);
265 len = gre_hdr_len + sizeof(*ershdr);
267 if (unlikely(!pskb_may_pull(skb, len)))
271 ershdr = (struct erspanhdr *)(skb->data + gre_hdr_len);
273 /* The original GRE header does not have key field,
274 * Use ERSPAN 10-bit session ID as key.
276 tpi->key = cpu_to_be32(ntohs(ershdr->session_id) & ID_MASK);
277 index = ershdr->md.index;
278 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
279 tpi->flags | TUNNEL_KEY,
280 iph->saddr, iph->daddr, tpi->key);
283 if (__iptunnel_pull_header(skb,
284 gre_hdr_len + sizeof(*ershdr),
289 if (tunnel->collect_md) {
290 struct ip_tunnel_info *info;
291 struct erspan_metadata *md;
295 tpi->flags |= TUNNEL_KEY;
297 tun_id = key32_to_tunnel_id(tpi->key);
299 tun_dst = ip_tun_rx_dst(skb, flags,
300 tun_id, sizeof(*md));
302 return PACKET_REJECT;
304 md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
306 dst_release((struct dst_entry *)tun_dst);
307 return PACKET_REJECT;
311 info = &tun_dst->u.tun_info;
312 info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
313 info->options_len = sizeof(*md);
315 tunnel->index = ntohl(index);
318 skb_reset_mac_header(skb);
319 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
322 return PACKET_REJECT;
329 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
330 struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
332 struct metadata_dst *tun_dst = NULL;
333 const struct iphdr *iph;
334 struct ip_tunnel *tunnel;
337 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
338 iph->saddr, iph->daddr, tpi->key);
341 if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
342 raw_proto, false) < 0)
345 if (tunnel->dev->type != ARPHRD_NONE)
346 skb_pop_mac_header(skb);
348 skb_reset_mac_header(skb);
349 if (tunnel->collect_md) {
353 flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
354 tun_id = key32_to_tunnel_id(tpi->key);
355 tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
357 return PACKET_REJECT;
360 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
370 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
373 struct net *net = dev_net(skb->dev);
374 struct ip_tunnel_net *itn;
377 if (tpi->proto == htons(ETH_P_TEB))
378 itn = net_generic(net, gre_tap_net_id);
380 itn = net_generic(net, ipgre_net_id);
382 res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
383 if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
384 /* ipgre tunnels in collect metadata mode should receive
385 * also ETH_P_TEB traffic.
387 itn = net_generic(net, ipgre_net_id);
388 res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
393 static int gre_rcv(struct sk_buff *skb)
395 struct tnl_ptk_info tpi;
396 bool csum_err = false;
399 #ifdef CONFIG_NET_IPGRE_BROADCAST
400 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
401 /* Looped back packet, drop it! */
402 if (rt_is_output_route(skb_rtable(skb)))
407 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
411 if (unlikely(tpi.proto == htons(ETH_P_ERSPAN))) {
412 if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
417 if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
421 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
427 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
428 const struct iphdr *tnl_params,
431 struct ip_tunnel *tunnel = netdev_priv(dev);
432 __be16 flags = tunnel->parms.o_flags;
434 /* Push GRE header. */
435 gre_build_header(skb, tunnel->tun_hlen,
436 flags, proto, tunnel->parms.o_key,
437 (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
439 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
442 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
444 if (csum && skb_checksum_start(skb) < skb->data)
446 return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
449 static struct rtable *gre_get_rt(struct sk_buff *skb,
450 struct net_device *dev,
452 const struct ip_tunnel_key *key)
454 struct net *net = dev_net(dev);
456 memset(fl, 0, sizeof(*fl));
457 fl->daddr = key->u.ipv4.dst;
458 fl->saddr = key->u.ipv4.src;
459 fl->flowi4_tos = RT_TOS(key->tos);
460 fl->flowi4_mark = skb->mark;
461 fl->flowi4_proto = IPPROTO_GRE;
463 return ip_route_output_key(net, fl);
466 static struct rtable *prepare_fb_xmit(struct sk_buff *skb,
467 struct net_device *dev,
471 struct ip_tunnel_info *tun_info;
472 const struct ip_tunnel_key *key;
473 struct rtable *rt = NULL;
478 tun_info = skb_tunnel_info(skb);
479 key = &tun_info->key;
480 use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
483 rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl->saddr);
485 rt = gre_get_rt(skb, dev, fl, key);
489 dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
493 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
494 + tunnel_hlen + sizeof(struct iphdr);
495 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
496 int head_delta = SKB_DATA_ALIGN(min_headroom -
499 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
510 dev->stats.tx_dropped++;
514 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
517 struct ip_tunnel_info *tun_info;
518 const struct ip_tunnel_key *key;
519 struct rtable *rt = NULL;
524 tun_info = skb_tunnel_info(skb);
525 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
526 ip_tunnel_info_af(tun_info) != AF_INET))
529 key = &tun_info->key;
530 tunnel_hlen = gre_calc_hlen(key->tun_flags);
532 rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
536 /* Push Tunnel header. */
537 if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
540 flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
541 gre_build_header(skb, tunnel_hlen, flags, proto,
542 tunnel_id_to_key32(tun_info->key.tun_id), 0);
544 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
546 iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
547 key->tos, key->ttl, df, false);
554 dev->stats.tx_dropped++;
557 static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
560 struct ip_tunnel *tunnel = netdev_priv(dev);
561 struct ip_tunnel_info *tun_info;
562 const struct ip_tunnel_key *key;
563 struct erspan_metadata *md;
564 struct rtable *rt = NULL;
565 bool truncate = false;
570 tun_info = skb_tunnel_info(skb);
571 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
572 ip_tunnel_info_af(tun_info) != AF_INET))
575 key = &tun_info->key;
577 /* ERSPAN has fixed 8 byte GRE header */
578 tunnel_hlen = 8 + sizeof(struct erspanhdr);
580 rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
584 if (gre_handle_offloads(skb, false))
587 if (skb->len > dev->mtu + dev->hard_header_len) {
588 pskb_trim(skb, dev->mtu + dev->hard_header_len);
592 if (tun_info->options_len < sizeof(*md))
595 md = ip_tunnel_info_opts(tun_info);
599 erspan_build_header(skb, tunnel_id_to_key32(key->tun_id),
600 ntohl(md->index), truncate);
602 gre_build_header(skb, 8, TUNNEL_SEQ,
603 htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++));
605 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
607 iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
608 key->tos, key->ttl, df, false);
615 dev->stats.tx_dropped++;
618 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
620 struct ip_tunnel_info *info = skb_tunnel_info(skb);
624 if (ip_tunnel_info_af(info) != AF_INET)
627 rt = gre_get_rt(skb, dev, &fl4, &info->key);
632 info->key.u.ipv4.src = fl4.saddr;
636 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
637 struct net_device *dev)
639 struct ip_tunnel *tunnel = netdev_priv(dev);
640 const struct iphdr *tnl_params;
642 if (tunnel->collect_md) {
643 gre_fb_xmit(skb, dev, skb->protocol);
647 if (dev->header_ops) {
648 /* Need space for new headers */
649 if (skb_cow_head(skb, dev->needed_headroom -
650 (tunnel->hlen + sizeof(struct iphdr))))
653 tnl_params = (const struct iphdr *)skb->data;
655 /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
658 skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
659 skb_reset_mac_header(skb);
661 if (skb_cow_head(skb, dev->needed_headroom))
664 tnl_params = &tunnel->parms.iph;
667 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
670 __gre_xmit(skb, dev, tnl_params, skb->protocol);
675 dev->stats.tx_dropped++;
679 static inline u8 tos_to_cos(u8 tos)
688 static void erspan_build_header(struct sk_buff *skb,
689 __be32 id, u32 index, bool truncate)
691 struct iphdr *iphdr = ip_hdr(skb);
692 struct ethhdr *eth = (struct ethhdr *)skb->data;
693 enum erspan_encap_type enc_type;
694 struct erspanhdr *ershdr;
701 enc_type = ERSPAN_ENCAP_NOVLAN;
703 /* If mirrored packet has vlan tag, extract tci and
704 * perserve vlan header in the mirrored frame.
706 if (eth->h_proto == htons(ETH_P_8021Q)) {
707 qp = (struct qtag_prefix *)(skb->data + 2 * ETH_ALEN);
708 vlan_tci = ntohs(qp->tci);
709 enc_type = ERSPAN_ENCAP_INFRAME;
712 skb_push(skb, sizeof(*ershdr));
713 ershdr = (struct erspanhdr *)skb->data;
714 memset(ershdr, 0, sizeof(*ershdr));
716 ershdr->ver_vlan = htons((vlan_tci & VLAN_MASK) |
717 (ERSPAN_VERSION << VER_OFFSET));
718 ershdr->session_id = htons((u16)(ntohl(id) & ID_MASK) |
719 ((tos_to_cos(iphdr->tos) << COS_OFFSET) & COS_MASK) |
720 (enc_type << EN_OFFSET & EN_MASK) |
721 ((truncate << T_OFFSET) & T_MASK));
722 ershdr->md.index = htonl(index & INDEX_MASK);
725 static netdev_tx_t erspan_xmit(struct sk_buff *skb,
726 struct net_device *dev)
728 struct ip_tunnel *tunnel = netdev_priv(dev);
729 bool truncate = false;
731 if (tunnel->collect_md) {
732 erspan_fb_xmit(skb, dev, skb->protocol);
736 if (gre_handle_offloads(skb, false))
739 if (skb_cow_head(skb, dev->needed_headroom))
742 if (skb->len > dev->mtu + dev->hard_header_len) {
743 pskb_trim(skb, dev->mtu + dev->hard_header_len);
747 /* Push ERSPAN header */
748 erspan_build_header(skb, tunnel->parms.o_key, tunnel->index, truncate);
749 tunnel->parms.o_flags &= ~TUNNEL_KEY;
750 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN));
755 dev->stats.tx_dropped++;
759 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
760 struct net_device *dev)
762 struct ip_tunnel *tunnel = netdev_priv(dev);
764 if (tunnel->collect_md) {
765 gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
769 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
772 if (skb_cow_head(skb, dev->needed_headroom))
775 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
780 dev->stats.tx_dropped++;
784 static int ipgre_tunnel_ioctl(struct net_device *dev,
785 struct ifreq *ifr, int cmd)
788 struct ip_tunnel_parm p;
790 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
792 if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
793 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
794 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
795 ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
798 p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
799 p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
801 err = ip_tunnel_ioctl(dev, &p, cmd);
805 p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
806 p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
808 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
813 /* Nice toy. Unfortunately, useless in real life :-)
814 It allows to construct virtual multiprotocol broadcast "LAN"
815 over the Internet, provided multicast routing is tuned.
818 I have no idea was this bicycle invented before me,
819 so that I had to set ARPHRD_IPGRE to a random value.
820 I have an impression, that Cisco could make something similar,
821 but this feature is apparently missing in IOS<=11.2(8).
823 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
824 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
826 ping -t 255 224.66.66.66
828 If nobody answers, mbone does not work.
830 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
831 ip addr add 10.66.66.<somewhat>/24 dev Universe
833 ifconfig Universe add fe80::<Your_real_addr>/10
834 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
837 ftp fec0:6666:6666::193.233.7.65
840 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
842 const void *daddr, const void *saddr, unsigned int len)
844 struct ip_tunnel *t = netdev_priv(dev);
846 struct gre_base_hdr *greh;
848 iph = skb_push(skb, t->hlen + sizeof(*iph));
849 greh = (struct gre_base_hdr *)(iph+1);
850 greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
851 greh->protocol = htons(type);
853 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
855 /* Set the source hardware address. */
857 memcpy(&iph->saddr, saddr, 4);
859 memcpy(&iph->daddr, daddr, 4);
861 return t->hlen + sizeof(*iph);
863 return -(t->hlen + sizeof(*iph));
866 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
868 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
869 memcpy(haddr, &iph->saddr, 4);
873 static const struct header_ops ipgre_header_ops = {
874 .create = ipgre_header,
875 .parse = ipgre_header_parse,
878 #ifdef CONFIG_NET_IPGRE_BROADCAST
879 static int ipgre_open(struct net_device *dev)
881 struct ip_tunnel *t = netdev_priv(dev);
883 if (ipv4_is_multicast(t->parms.iph.daddr)) {
887 rt = ip_route_output_gre(t->net, &fl4,
891 RT_TOS(t->parms.iph.tos),
894 return -EADDRNOTAVAIL;
897 if (!__in_dev_get_rtnl(dev))
898 return -EADDRNOTAVAIL;
899 t->mlink = dev->ifindex;
900 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
905 static int ipgre_close(struct net_device *dev)
907 struct ip_tunnel *t = netdev_priv(dev);
909 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
910 struct in_device *in_dev;
911 in_dev = inetdev_by_index(t->net, t->mlink);
913 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
919 static const struct net_device_ops ipgre_netdev_ops = {
920 .ndo_init = ipgre_tunnel_init,
921 .ndo_uninit = ip_tunnel_uninit,
922 #ifdef CONFIG_NET_IPGRE_BROADCAST
923 .ndo_open = ipgre_open,
924 .ndo_stop = ipgre_close,
926 .ndo_start_xmit = ipgre_xmit,
927 .ndo_do_ioctl = ipgre_tunnel_ioctl,
928 .ndo_change_mtu = ip_tunnel_change_mtu,
929 .ndo_get_stats64 = ip_tunnel_get_stats64,
930 .ndo_get_iflink = ip_tunnel_get_iflink,
933 #define GRE_FEATURES (NETIF_F_SG | \
938 static void ipgre_tunnel_setup(struct net_device *dev)
940 dev->netdev_ops = &ipgre_netdev_ops;
941 dev->type = ARPHRD_IPGRE;
942 ip_tunnel_setup(dev, ipgre_net_id);
945 static void __gre_tunnel_init(struct net_device *dev)
947 struct ip_tunnel *tunnel;
950 tunnel = netdev_priv(dev);
951 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
952 tunnel->parms.iph.protocol = IPPROTO_GRE;
954 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
956 t_hlen = tunnel->hlen + sizeof(struct iphdr);
958 dev->features |= GRE_FEATURES;
959 dev->hw_features |= GRE_FEATURES;
961 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
962 /* TCP offload with GRE SEQ is not supported, nor
963 * can we support 2 levels of outer headers requiring
966 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
967 (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
968 dev->features |= NETIF_F_GSO_SOFTWARE;
969 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
972 /* Can use a lockless transmit, unless we generate
975 dev->features |= NETIF_F_LLTX;
979 static int ipgre_tunnel_init(struct net_device *dev)
981 struct ip_tunnel *tunnel = netdev_priv(dev);
982 struct iphdr *iph = &tunnel->parms.iph;
984 __gre_tunnel_init(dev);
986 memcpy(dev->dev_addr, &iph->saddr, 4);
987 memcpy(dev->broadcast, &iph->daddr, 4);
989 dev->flags = IFF_NOARP;
993 if (iph->daddr && !tunnel->collect_md) {
994 #ifdef CONFIG_NET_IPGRE_BROADCAST
995 if (ipv4_is_multicast(iph->daddr)) {
998 dev->flags = IFF_BROADCAST;
999 dev->header_ops = &ipgre_header_ops;
1002 } else if (!tunnel->collect_md) {
1003 dev->header_ops = &ipgre_header_ops;
1006 return ip_tunnel_init(dev);
1009 static const struct gre_protocol ipgre_protocol = {
1011 .err_handler = gre_err,
1014 static int __net_init ipgre_init_net(struct net *net)
1016 return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
1019 static void __net_exit ipgre_exit_net(struct net *net)
1021 struct ip_tunnel_net *itn = net_generic(net, ipgre_net_id);
1022 ip_tunnel_delete_net(itn, &ipgre_link_ops);
1025 static struct pernet_operations ipgre_net_ops = {
1026 .init = ipgre_init_net,
1027 .exit = ipgre_exit_net,
1028 .id = &ipgre_net_id,
1029 .size = sizeof(struct ip_tunnel_net),
1032 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1033 struct netlink_ext_ack *extack)
1041 if (data[IFLA_GRE_IFLAGS])
1042 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1043 if (data[IFLA_GRE_OFLAGS])
1044 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1045 if (flags & (GRE_VERSION|GRE_ROUTING))
1048 if (data[IFLA_GRE_COLLECT_METADATA] &&
1049 data[IFLA_GRE_ENCAP_TYPE] &&
1050 nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1056 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1057 struct netlink_ext_ack *extack)
1061 if (tb[IFLA_ADDRESS]) {
1062 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1064 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1065 return -EADDRNOTAVAIL;
1071 if (data[IFLA_GRE_REMOTE]) {
1072 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1078 return ipgre_tunnel_validate(tb, data, extack);
1081 static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1082 struct netlink_ext_ack *extack)
1090 ret = ipgre_tap_validate(tb, data, extack);
1094 /* ERSPAN should only have GRE sequence and key flag */
1095 if (data[IFLA_GRE_OFLAGS])
1096 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1097 if (data[IFLA_GRE_IFLAGS])
1098 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1099 if (!data[IFLA_GRE_COLLECT_METADATA] &&
1100 flags != (GRE_SEQ | GRE_KEY))
1103 /* ERSPAN Session ID only has 10-bit. Since we reuse
1104 * 32-bit key field as ID, check it's range.
1106 if (data[IFLA_GRE_IKEY] &&
1107 (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1110 if (data[IFLA_GRE_OKEY] &&
1111 (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1117 static int ipgre_netlink_parms(struct net_device *dev,
1118 struct nlattr *data[],
1119 struct nlattr *tb[],
1120 struct ip_tunnel_parm *parms,
1123 struct ip_tunnel *t = netdev_priv(dev);
1125 memset(parms, 0, sizeof(*parms));
1127 parms->iph.protocol = IPPROTO_GRE;
1132 if (data[IFLA_GRE_LINK])
1133 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1135 if (data[IFLA_GRE_IFLAGS])
1136 parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
1138 if (data[IFLA_GRE_OFLAGS])
1139 parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
1141 if (data[IFLA_GRE_IKEY])
1142 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1144 if (data[IFLA_GRE_OKEY])
1145 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1147 if (data[IFLA_GRE_LOCAL])
1148 parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1150 if (data[IFLA_GRE_REMOTE])
1151 parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1153 if (data[IFLA_GRE_TTL])
1154 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1156 if (data[IFLA_GRE_TOS])
1157 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1159 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1162 parms->iph.frag_off = htons(IP_DF);
1165 if (data[IFLA_GRE_COLLECT_METADATA]) {
1166 t->collect_md = true;
1167 if (dev->type == ARPHRD_IPGRE)
1168 dev->type = ARPHRD_NONE;
1171 if (data[IFLA_GRE_IGNORE_DF]) {
1172 if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1173 && (parms->iph.frag_off & htons(IP_DF)))
1175 t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1178 if (data[IFLA_GRE_FWMARK])
1179 *fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1181 if (data[IFLA_GRE_ERSPAN_INDEX]) {
1182 t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1184 if (t->index & ~INDEX_MASK)
1191 /* This function returns true when ENCAP attributes are present in the nl msg */
1192 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1193 struct ip_tunnel_encap *ipencap)
1197 memset(ipencap, 0, sizeof(*ipencap));
1202 if (data[IFLA_GRE_ENCAP_TYPE]) {
1204 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1207 if (data[IFLA_GRE_ENCAP_FLAGS]) {
1209 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1212 if (data[IFLA_GRE_ENCAP_SPORT]) {
1214 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1217 if (data[IFLA_GRE_ENCAP_DPORT]) {
1219 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1225 static int gre_tap_init(struct net_device *dev)
1227 __gre_tunnel_init(dev);
1228 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1229 netif_keep_dst(dev);
1231 return ip_tunnel_init(dev);
1234 static const struct net_device_ops gre_tap_netdev_ops = {
1235 .ndo_init = gre_tap_init,
1236 .ndo_uninit = ip_tunnel_uninit,
1237 .ndo_start_xmit = gre_tap_xmit,
1238 .ndo_set_mac_address = eth_mac_addr,
1239 .ndo_validate_addr = eth_validate_addr,
1240 .ndo_change_mtu = ip_tunnel_change_mtu,
1241 .ndo_get_stats64 = ip_tunnel_get_stats64,
1242 .ndo_get_iflink = ip_tunnel_get_iflink,
1243 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
1246 static int erspan_tunnel_init(struct net_device *dev)
1248 struct ip_tunnel *tunnel = netdev_priv(dev);
1251 tunnel->tun_hlen = 8;
1252 tunnel->parms.iph.protocol = IPPROTO_GRE;
1253 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1254 sizeof(struct erspanhdr);
1255 t_hlen = tunnel->hlen + sizeof(struct iphdr);
1257 dev->features |= GRE_FEATURES;
1258 dev->hw_features |= GRE_FEATURES;
1259 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1260 netif_keep_dst(dev);
1262 return ip_tunnel_init(dev);
1265 static const struct net_device_ops erspan_netdev_ops = {
1266 .ndo_init = erspan_tunnel_init,
1267 .ndo_uninit = ip_tunnel_uninit,
1268 .ndo_start_xmit = erspan_xmit,
1269 .ndo_set_mac_address = eth_mac_addr,
1270 .ndo_validate_addr = eth_validate_addr,
1271 .ndo_change_mtu = ip_tunnel_change_mtu,
1272 .ndo_get_stats64 = ip_tunnel_get_stats64,
1273 .ndo_get_iflink = ip_tunnel_get_iflink,
1274 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
1277 static void ipgre_tap_setup(struct net_device *dev)
1281 dev->netdev_ops = &gre_tap_netdev_ops;
1282 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1283 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1284 ip_tunnel_setup(dev, gre_tap_net_id);
1287 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1288 struct nlattr *tb[], struct nlattr *data[],
1289 struct netlink_ext_ack *extack)
1291 struct ip_tunnel_parm p;
1292 struct ip_tunnel_encap ipencap;
1296 if (ipgre_netlink_encap_parms(data, &ipencap)) {
1297 struct ip_tunnel *t = netdev_priv(dev);
1298 err = ip_tunnel_encap_setup(t, &ipencap);
1304 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1307 return ip_tunnel_newlink(dev, tb, &p, fwmark);
1310 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1311 struct nlattr *data[],
1312 struct netlink_ext_ack *extack)
1314 struct ip_tunnel *t = netdev_priv(dev);
1315 struct ip_tunnel_parm p;
1316 struct ip_tunnel_encap ipencap;
1317 __u32 fwmark = t->fwmark;
1320 if (ipgre_netlink_encap_parms(data, &ipencap)) {
1321 err = ip_tunnel_encap_setup(t, &ipencap);
1327 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1330 return ip_tunnel_changelink(dev, tb, &p, fwmark);
1333 static size_t ipgre_get_size(const struct net_device *dev)
1338 /* IFLA_GRE_IFLAGS */
1340 /* IFLA_GRE_OFLAGS */
1346 /* IFLA_GRE_LOCAL */
1348 /* IFLA_GRE_REMOTE */
1354 /* IFLA_GRE_PMTUDISC */
1356 /* IFLA_GRE_ENCAP_TYPE */
1358 /* IFLA_GRE_ENCAP_FLAGS */
1360 /* IFLA_GRE_ENCAP_SPORT */
1362 /* IFLA_GRE_ENCAP_DPORT */
1364 /* IFLA_GRE_COLLECT_METADATA */
1366 /* IFLA_GRE_IGNORE_DF */
1368 /* IFLA_GRE_FWMARK */
1370 /* IFLA_GRE_ERSPAN_INDEX */
1375 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1377 struct ip_tunnel *t = netdev_priv(dev);
1378 struct ip_tunnel_parm *p = &t->parms;
1380 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1381 nla_put_be16(skb, IFLA_GRE_IFLAGS,
1382 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1383 nla_put_be16(skb, IFLA_GRE_OFLAGS,
1384 gre_tnl_flags_to_gre_flags(p->o_flags)) ||
1385 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1386 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1387 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1388 nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1389 nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1390 nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1391 nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1392 !!(p->iph.frag_off & htons(IP_DF))) ||
1393 nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1394 goto nla_put_failure;
1396 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1398 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1400 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1402 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1404 goto nla_put_failure;
1406 if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1407 goto nla_put_failure;
1409 if (t->collect_md) {
1410 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1411 goto nla_put_failure;
1415 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1416 goto nla_put_failure;
1424 static void erspan_setup(struct net_device *dev)
1428 dev->netdev_ops = &erspan_netdev_ops;
1429 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1430 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1431 ip_tunnel_setup(dev, erspan_net_id);
1434 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1435 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1436 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1437 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1438 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1439 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1440 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1441 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1442 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1443 [IFLA_GRE_TOS] = { .type = NLA_U8 },
1444 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
1445 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
1446 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
1447 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
1448 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
1449 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
1450 [IFLA_GRE_IGNORE_DF] = { .type = NLA_U8 },
1451 [IFLA_GRE_FWMARK] = { .type = NLA_U32 },
1452 [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 },
1455 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1457 .maxtype = IFLA_GRE_MAX,
1458 .policy = ipgre_policy,
1459 .priv_size = sizeof(struct ip_tunnel),
1460 .setup = ipgre_tunnel_setup,
1461 .validate = ipgre_tunnel_validate,
1462 .newlink = ipgre_newlink,
1463 .changelink = ipgre_changelink,
1464 .dellink = ip_tunnel_dellink,
1465 .get_size = ipgre_get_size,
1466 .fill_info = ipgre_fill_info,
1467 .get_link_net = ip_tunnel_get_link_net,
1470 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1472 .maxtype = IFLA_GRE_MAX,
1473 .policy = ipgre_policy,
1474 .priv_size = sizeof(struct ip_tunnel),
1475 .setup = ipgre_tap_setup,
1476 .validate = ipgre_tap_validate,
1477 .newlink = ipgre_newlink,
1478 .changelink = ipgre_changelink,
1479 .dellink = ip_tunnel_dellink,
1480 .get_size = ipgre_get_size,
1481 .fill_info = ipgre_fill_info,
1482 .get_link_net = ip_tunnel_get_link_net,
1485 static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1487 .maxtype = IFLA_GRE_MAX,
1488 .policy = ipgre_policy,
1489 .priv_size = sizeof(struct ip_tunnel),
1490 .setup = erspan_setup,
1491 .validate = erspan_validate,
1492 .newlink = ipgre_newlink,
1493 .changelink = ipgre_changelink,
1494 .dellink = ip_tunnel_dellink,
1495 .get_size = ipgre_get_size,
1496 .fill_info = ipgre_fill_info,
1497 .get_link_net = ip_tunnel_get_link_net,
1500 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1501 u8 name_assign_type)
1503 struct nlattr *tb[IFLA_MAX + 1];
1504 struct net_device *dev;
1505 LIST_HEAD(list_kill);
1506 struct ip_tunnel *t;
1509 memset(&tb, 0, sizeof(tb));
1511 dev = rtnl_create_link(net, name, name_assign_type,
1512 &ipgre_tap_ops, tb);
1516 /* Configure flow based GRE device. */
1517 t = netdev_priv(dev);
1518 t->collect_md = true;
1520 err = ipgre_newlink(net, dev, tb, NULL, NULL);
1523 return ERR_PTR(err);
1526 /* openvswitch users expect packet sizes to be unrestricted,
1527 * so set the largest MTU we can.
1529 err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1533 err = rtnl_configure_link(dev, NULL);
1539 ip_tunnel_dellink(dev, &list_kill);
1540 unregister_netdevice_many(&list_kill);
1541 return ERR_PTR(err);
1543 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1545 static int __net_init ipgre_tap_init_net(struct net *net)
1547 return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1550 static void __net_exit ipgre_tap_exit_net(struct net *net)
1552 struct ip_tunnel_net *itn = net_generic(net, gre_tap_net_id);
1553 ip_tunnel_delete_net(itn, &ipgre_tap_ops);
1556 static struct pernet_operations ipgre_tap_net_ops = {
1557 .init = ipgre_tap_init_net,
1558 .exit = ipgre_tap_exit_net,
1559 .id = &gre_tap_net_id,
1560 .size = sizeof(struct ip_tunnel_net),
1563 static int __net_init erspan_init_net(struct net *net)
1565 return ip_tunnel_init_net(net, erspan_net_id,
1566 &erspan_link_ops, "erspan0");
1569 static void __net_exit erspan_exit_net(struct net *net)
1571 struct ip_tunnel_net *itn = net_generic(net, erspan_net_id);
1573 ip_tunnel_delete_net(itn, &erspan_link_ops);
1576 static struct pernet_operations erspan_net_ops = {
1577 .init = erspan_init_net,
1578 .exit = erspan_exit_net,
1579 .id = &erspan_net_id,
1580 .size = sizeof(struct ip_tunnel_net),
1583 static int __init ipgre_init(void)
1587 pr_info("GRE over IPv4 tunneling driver\n");
1589 err = register_pernet_device(&ipgre_net_ops);
1593 err = register_pernet_device(&ipgre_tap_net_ops);
1595 goto pnet_tap_failed;
1597 err = register_pernet_device(&erspan_net_ops);
1599 goto pnet_erspan_failed;
1601 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1603 pr_info("%s: can't add protocol\n", __func__);
1604 goto add_proto_failed;
1607 err = rtnl_link_register(&ipgre_link_ops);
1609 goto rtnl_link_failed;
1611 err = rtnl_link_register(&ipgre_tap_ops);
1613 goto tap_ops_failed;
1615 err = rtnl_link_register(&erspan_link_ops);
1617 goto erspan_link_failed;
1622 rtnl_link_unregister(&ipgre_tap_ops);
1624 rtnl_link_unregister(&ipgre_link_ops);
1626 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1628 unregister_pernet_device(&erspan_net_ops);
1630 unregister_pernet_device(&ipgre_tap_net_ops);
1632 unregister_pernet_device(&ipgre_net_ops);
1636 static void __exit ipgre_fini(void)
1638 rtnl_link_unregister(&ipgre_tap_ops);
1639 rtnl_link_unregister(&ipgre_link_ops);
1640 rtnl_link_unregister(&erspan_link_ops);
1641 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1642 unregister_pernet_device(&ipgre_tap_net_ops);
1643 unregister_pernet_device(&ipgre_net_ops);
1644 unregister_pernet_device(&erspan_net_ops);
1647 module_init(ipgre_init);
1648 module_exit(ipgre_fini);
1649 MODULE_LICENSE("GPL");
1650 MODULE_ALIAS_RTNL_LINK("gre");
1651 MODULE_ALIAS_RTNL_LINK("gretap");
1652 MODULE_ALIAS_RTNL_LINK("erspan");
1653 MODULE_ALIAS_NETDEV("gre0");
1654 MODULE_ALIAS_NETDEV("gretap0");
1655 MODULE_ALIAS_NETDEV("erspan0");