GNU Linux-libre 4.19.211-gnu1
[releases.git] / net / ipv4 / ip_gre.c
1 /*
2  *      Linux NET3:     GRE over IP protocol decoder.
3  *
4  *      Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
5  *
6  *      This program is free software; you can redistribute it and/or
7  *      modify it under the terms of the GNU General Public License
8  *      as published by the Free Software Foundation; either version
9  *      2 of the License, or (at your option) any later version.
10  *
11  */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
23 #include <linux/in.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_vlan.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
35
36 #include <net/sock.h>
37 #include <net/ip.h>
38 #include <net/icmp.h>
39 #include <net/protocol.h>
40 #include <net/ip_tunnels.h>
41 #include <net/arp.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
45 #include <net/xfrm.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
49 #include <net/gre.h>
50 #include <net/dst_metadata.h>
51 #include <net/erspan.h>
52
53 /*
54    Problems & solutions
55    --------------------
56
57    1. The most important issue is detecting local dead loops.
58    They would cause complete host lockup in transmit, which
59    would be "resolved" by stack overflow or, if queueing is enabled,
60    with infinite looping in net_bh.
61
62    We cannot track such dead loops during route installation,
63    it is infeasible task. The most general solutions would be
64    to keep skb->encapsulation counter (sort of local ttl),
65    and silently drop packet when it expires. It is a good
66    solution, but it supposes maintaining new variable in ALL
67    skb, even if no tunneling is used.
68
69    Current solution: xmit_recursion breaks dead loops. This is a percpu
70    counter, since when we enter the first ndo_xmit(), cpu migration is
71    forbidden. We force an exit if this counter reaches RECURSION_LIMIT
72
73    2. Networking dead loops would not kill routers, but would really
74    kill network. IP hop limit plays role of "t->recursion" in this case,
75    if we copy it from packet being encapsulated to upper header.
76    It is very good solution, but it introduces two problems:
77
78    - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
79      do not work over tunnels.
80    - traceroute does not work. I planned to relay ICMP from tunnel,
81      so that this problem would be solved and traceroute output
82      would even more informative. This idea appeared to be wrong:
83      only Linux complies to rfc1812 now (yes, guys, Linux is the only
84      true router now :-)), all routers (at least, in neighbourhood of mine)
85      return only 8 bytes of payload. It is the end.
86
87    Hence, if we want that OSPF worked or traceroute said something reasonable,
88    we should search for another solution.
89
90    One of them is to parse packet trying to detect inner encapsulation
91    made by our node. It is difficult or even impossible, especially,
92    taking into account fragmentation. TO be short, ttl is not solution at all.
93
94    Current solution: The solution was UNEXPECTEDLY SIMPLE.
95    We force DF flag on tunnels with preconfigured hop limit,
96    that is ALL. :-) Well, it does not remove the problem completely,
97    but exponential growth of network traffic is changed to linear
98    (branches, that exceed pmtu are pruned) and tunnel mtu
99    rapidly degrades to value <68, where looping stops.
100    Yes, it is not good if there exists a router in the loop,
101    which does not force DF, even when encapsulating packets have DF set.
102    But it is not our problem! Nobody could accuse us, we made
103    all that we could make. Even if it is your gated who injected
104    fatal route to network, even if it were you who configured
105    fatal static route: you are innocent. :-)
106
107    Alexey Kuznetsov.
108  */
109
110 static bool log_ecn_error = true;
111 module_param(log_ecn_error, bool, 0644);
112 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
113
114 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
115 static int ipgre_tunnel_init(struct net_device *dev);
116 static void erspan_build_header(struct sk_buff *skb,
117                                 u32 id, u32 index,
118                                 bool truncate, bool is_ipv4);
119
120 static unsigned int ipgre_net_id __read_mostly;
121 static unsigned int gre_tap_net_id __read_mostly;
122 static unsigned int erspan_net_id __read_mostly;
123
124 static void ipgre_err(struct sk_buff *skb, u32 info,
125                       const struct tnl_ptk_info *tpi)
126 {
127
128         /* All the routers (except for Linux) return only
129            8 bytes of packet payload. It means, that precise relaying of
130            ICMP in the real Internet is absolutely infeasible.
131
132            Moreover, Cisco "wise men" put GRE key to the third word
133            in GRE header. It makes impossible maintaining even soft
134            state for keyed GRE tunnels with enabled checksum. Tell
135            them "thank you".
136
137            Well, I wonder, rfc1812 was written by Cisco employee,
138            what the hell these idiots break standards established
139            by themselves???
140            */
141         struct net *net = dev_net(skb->dev);
142         struct ip_tunnel_net *itn;
143         const struct iphdr *iph;
144         const int type = icmp_hdr(skb)->type;
145         const int code = icmp_hdr(skb)->code;
146         unsigned int data_len = 0;
147         struct ip_tunnel *t;
148
149         switch (type) {
150         default:
151         case ICMP_PARAMETERPROB:
152                 return;
153
154         case ICMP_DEST_UNREACH:
155                 switch (code) {
156                 case ICMP_SR_FAILED:
157                 case ICMP_PORT_UNREACH:
158                         /* Impossible event. */
159                         return;
160                 default:
161                         /* All others are translated to HOST_UNREACH.
162                            rfc2003 contains "deep thoughts" about NET_UNREACH,
163                            I believe they are just ether pollution. --ANK
164                          */
165                         break;
166                 }
167                 break;
168
169         case ICMP_TIME_EXCEEDED:
170                 if (code != ICMP_EXC_TTL)
171                         return;
172                 data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
173                 break;
174
175         case ICMP_REDIRECT:
176                 break;
177         }
178
179         if (tpi->proto == htons(ETH_P_TEB))
180                 itn = net_generic(net, gre_tap_net_id);
181         else if (tpi->proto == htons(ETH_P_ERSPAN) ||
182                  tpi->proto == htons(ETH_P_ERSPAN2))
183                 itn = net_generic(net, erspan_net_id);
184         else
185                 itn = net_generic(net, ipgre_net_id);
186
187         iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
188         t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
189                              iph->daddr, iph->saddr, tpi->key);
190
191         if (!t)
192                 return;
193
194 #if IS_ENABLED(CONFIG_IPV6)
195        if (tpi->proto == htons(ETH_P_IPV6) &&
196            !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
197                                        type, data_len))
198                return;
199 #endif
200
201         if (t->parms.iph.daddr == 0 ||
202             ipv4_is_multicast(t->parms.iph.daddr))
203                 return;
204
205         if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
206                 return;
207
208         if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
209                 t->err_count++;
210         else
211                 t->err_count = 1;
212         t->err_time = jiffies;
213 }
214
215 static void gre_err(struct sk_buff *skb, u32 info)
216 {
217         /* All the routers (except for Linux) return only
218          * 8 bytes of packet payload. It means, that precise relaying of
219          * ICMP in the real Internet is absolutely infeasible.
220          *
221          * Moreover, Cisco "wise men" put GRE key to the third word
222          * in GRE header. It makes impossible maintaining even soft
223          * state for keyed
224          * GRE tunnels with enabled checksum. Tell them "thank you".
225          *
226          * Well, I wonder, rfc1812 was written by Cisco employee,
227          * what the hell these idiots break standards established
228          * by themselves???
229          */
230
231         const struct iphdr *iph = (struct iphdr *)skb->data;
232         const int type = icmp_hdr(skb)->type;
233         const int code = icmp_hdr(skb)->code;
234         struct tnl_ptk_info tpi;
235
236         if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
237                              iph->ihl * 4) < 0)
238                 return;
239
240         if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
241                 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
242                                  skb->dev->ifindex, 0, IPPROTO_GRE, 0);
243                 return;
244         }
245         if (type == ICMP_REDIRECT) {
246                 ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
247                               IPPROTO_GRE, 0);
248                 return;
249         }
250
251         ipgre_err(skb, info, &tpi);
252 }
253
254 static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
255                       int gre_hdr_len)
256 {
257         struct net *net = dev_net(skb->dev);
258         struct metadata_dst *tun_dst = NULL;
259         struct erspan_base_hdr *ershdr;
260         struct ip_tunnel_net *itn;
261         struct ip_tunnel *tunnel;
262         const struct iphdr *iph;
263         struct erspan_md2 *md2;
264         int ver;
265         int len;
266
267         itn = net_generic(net, erspan_net_id);
268
269         iph = ip_hdr(skb);
270         ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
271         ver = ershdr->ver;
272
273         tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
274                                   tpi->flags | TUNNEL_KEY,
275                                   iph->saddr, iph->daddr, tpi->key);
276
277         if (tunnel) {
278                 len = gre_hdr_len + erspan_hdr_len(ver);
279                 if (unlikely(!pskb_may_pull(skb, len)))
280                         return PACKET_REJECT;
281
282                 if (__iptunnel_pull_header(skb,
283                                            len,
284                                            htons(ETH_P_TEB),
285                                            false, false) < 0)
286                         goto drop;
287
288                 if (tunnel->collect_md) {
289                         struct erspan_metadata *pkt_md, *md;
290                         struct ip_tunnel_info *info;
291                         unsigned char *gh;
292                         __be64 tun_id;
293                         __be16 flags;
294
295                         tpi->flags |= TUNNEL_KEY;
296                         flags = tpi->flags;
297                         tun_id = key32_to_tunnel_id(tpi->key);
298
299                         tun_dst = ip_tun_rx_dst(skb, flags,
300                                                 tun_id, sizeof(*md));
301                         if (!tun_dst)
302                                 return PACKET_REJECT;
303
304                         /* skb can be uncloned in __iptunnel_pull_header, so
305                          * old pkt_md is no longer valid and we need to reset
306                          * it
307                          */
308                         gh = skb_network_header(skb) +
309                              skb_network_header_len(skb);
310                         pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
311                                                             sizeof(*ershdr));
312                         md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
313                         md->version = ver;
314                         md2 = &md->u.md2;
315                         memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
316                                                        ERSPAN_V2_MDSIZE);
317
318                         info = &tun_dst->u.tun_info;
319                         info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
320                         info->options_len = sizeof(*md);
321                 }
322
323                 skb_reset_mac_header(skb);
324                 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
325                 return PACKET_RCVD;
326         }
327         return PACKET_REJECT;
328
329 drop:
330         kfree_skb(skb);
331         return PACKET_RCVD;
332 }
333
334 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
335                        struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
336 {
337         struct metadata_dst *tun_dst = NULL;
338         const struct iphdr *iph;
339         struct ip_tunnel *tunnel;
340
341         iph = ip_hdr(skb);
342         tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
343                                   iph->saddr, iph->daddr, tpi->key);
344
345         if (tunnel) {
346                 if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
347                                            raw_proto, false) < 0)
348                         goto drop;
349
350                 if (tunnel->dev->type != ARPHRD_NONE)
351                         skb_pop_mac_header(skb);
352                 else
353                         skb_reset_mac_header(skb);
354                 if (tunnel->collect_md) {
355                         __be16 flags;
356                         __be64 tun_id;
357
358                         flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
359                         tun_id = key32_to_tunnel_id(tpi->key);
360                         tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
361                         if (!tun_dst)
362                                 return PACKET_REJECT;
363                 }
364
365                 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
366                 return PACKET_RCVD;
367         }
368         return PACKET_NEXT;
369
370 drop:
371         kfree_skb(skb);
372         return PACKET_RCVD;
373 }
374
375 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
376                      int hdr_len)
377 {
378         struct net *net = dev_net(skb->dev);
379         struct ip_tunnel_net *itn;
380         int res;
381
382         if (tpi->proto == htons(ETH_P_TEB))
383                 itn = net_generic(net, gre_tap_net_id);
384         else
385                 itn = net_generic(net, ipgre_net_id);
386
387         res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
388         if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
389                 /* ipgre tunnels in collect metadata mode should receive
390                  * also ETH_P_TEB traffic.
391                  */
392                 itn = net_generic(net, ipgre_net_id);
393                 res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
394         }
395         return res;
396 }
397
398 static int gre_rcv(struct sk_buff *skb)
399 {
400         struct tnl_ptk_info tpi;
401         bool csum_err = false;
402         int hdr_len;
403
404 #ifdef CONFIG_NET_IPGRE_BROADCAST
405         if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
406                 /* Looped back packet, drop it! */
407                 if (rt_is_output_route(skb_rtable(skb)))
408                         goto drop;
409         }
410 #endif
411
412         hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
413         if (hdr_len < 0)
414                 goto drop;
415
416         if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
417                      tpi.proto == htons(ETH_P_ERSPAN2))) {
418                 if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
419                         return 0;
420                 goto out;
421         }
422
423         if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
424                 return 0;
425
426 out:
427         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
428 drop:
429         kfree_skb(skb);
430         return 0;
431 }
432
433 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
434                        const struct iphdr *tnl_params,
435                        __be16 proto)
436 {
437         struct ip_tunnel *tunnel = netdev_priv(dev);
438
439         if (tunnel->parms.o_flags & TUNNEL_SEQ)
440                 tunnel->o_seqno++;
441
442         /* Push GRE header. */
443         gre_build_header(skb, tunnel->tun_hlen,
444                          tunnel->parms.o_flags, proto, tunnel->parms.o_key,
445                          htonl(tunnel->o_seqno));
446
447         ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
448 }
449
450 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
451 {
452         return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
453 }
454
455 static struct rtable *gre_get_rt(struct sk_buff *skb,
456                                  struct net_device *dev,
457                                  struct flowi4 *fl,
458                                  const struct ip_tunnel_key *key)
459 {
460         struct net *net = dev_net(dev);
461
462         memset(fl, 0, sizeof(*fl));
463         fl->daddr = key->u.ipv4.dst;
464         fl->saddr = key->u.ipv4.src;
465         fl->flowi4_tos = RT_TOS(key->tos);
466         fl->flowi4_mark = skb->mark;
467         fl->flowi4_proto = IPPROTO_GRE;
468
469         return ip_route_output_key(net, fl);
470 }
471
472 static struct rtable *prepare_fb_xmit(struct sk_buff *skb,
473                                       struct net_device *dev,
474                                       struct flowi4 *fl,
475                                       int tunnel_hlen)
476 {
477         struct ip_tunnel_info *tun_info;
478         const struct ip_tunnel_key *key;
479         struct rtable *rt = NULL;
480         int min_headroom;
481         bool use_cache;
482         int err;
483
484         tun_info = skb_tunnel_info(skb);
485         key = &tun_info->key;
486         use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
487
488         if (use_cache)
489                 rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl->saddr);
490         if (!rt) {
491                 rt = gre_get_rt(skb, dev, fl, key);
492                 if (IS_ERR(rt))
493                         goto err_free_skb;
494                 if (use_cache)
495                         dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
496                                           fl->saddr);
497         }
498
499         min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
500                         + tunnel_hlen + sizeof(struct iphdr);
501         if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
502                 int head_delta = SKB_DATA_ALIGN(min_headroom -
503                                                 skb_headroom(skb) +
504                                                 16);
505                 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
506                                        0, GFP_ATOMIC);
507                 if (unlikely(err))
508                         goto err_free_rt;
509         }
510         return rt;
511
512 err_free_rt:
513         ip_rt_put(rt);
514 err_free_skb:
515         kfree_skb(skb);
516         dev->stats.tx_dropped++;
517         return NULL;
518 }
519
520 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
521                         __be16 proto)
522 {
523         struct ip_tunnel *tunnel = netdev_priv(dev);
524         struct ip_tunnel_info *tun_info;
525         const struct ip_tunnel_key *key;
526         struct rtable *rt = NULL;
527         struct flowi4 fl;
528         int tunnel_hlen;
529         __be16 df, flags;
530
531         tun_info = skb_tunnel_info(skb);
532         if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
533                      ip_tunnel_info_af(tun_info) != AF_INET))
534                 goto err_free_skb;
535
536         key = &tun_info->key;
537         tunnel_hlen = gre_calc_hlen(key->tun_flags);
538
539         rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
540         if (!rt)
541                 return;
542
543         /* Push Tunnel header. */
544         if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
545                 goto err_free_rt;
546
547         flags = tun_info->key.tun_flags &
548                 (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
549         gre_build_header(skb, tunnel_hlen, flags, proto,
550                          tunnel_id_to_key32(tun_info->key.tun_id),
551                          (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
552
553         df = key->tun_flags & TUNNEL_DONT_FRAGMENT ?  htons(IP_DF) : 0;
554
555         iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
556                       key->tos, key->ttl, df, false);
557         return;
558
559 err_free_rt:
560         ip_rt_put(rt);
561 err_free_skb:
562         kfree_skb(skb);
563         dev->stats.tx_dropped++;
564 }
565
566 static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
567 {
568         struct ip_tunnel *tunnel = netdev_priv(dev);
569         struct ip_tunnel_info *tun_info;
570         const struct ip_tunnel_key *key;
571         struct erspan_metadata *md;
572         struct rtable *rt = NULL;
573         bool truncate = false;
574         __be16 df, proto;
575         struct flowi4 fl;
576         int tunnel_hlen;
577         int version;
578         int nhoff;
579         int thoff;
580
581         tun_info = skb_tunnel_info(skb);
582         if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
583                      ip_tunnel_info_af(tun_info) != AF_INET))
584                 goto err_free_skb;
585
586         key = &tun_info->key;
587         if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
588                 goto err_free_rt;
589         if (tun_info->options_len < sizeof(*md))
590                 goto err_free_rt;
591         md = ip_tunnel_info_opts(tun_info);
592
593         /* ERSPAN has fixed 8 byte GRE header */
594         version = md->version;
595         tunnel_hlen = 8 + erspan_hdr_len(version);
596
597         rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
598         if (!rt)
599                 return;
600
601         if (gre_handle_offloads(skb, false))
602                 goto err_free_rt;
603
604         if (skb->len > dev->mtu + dev->hard_header_len) {
605                 pskb_trim(skb, dev->mtu + dev->hard_header_len);
606                 truncate = true;
607         }
608
609         nhoff = skb_network_header(skb) - skb_mac_header(skb);
610         if (skb->protocol == htons(ETH_P_IP) &&
611             (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
612                 truncate = true;
613
614         thoff = skb_transport_header(skb) - skb_mac_header(skb);
615         if (skb->protocol == htons(ETH_P_IPV6) &&
616             (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff))
617                 truncate = true;
618
619         if (version == 1) {
620                 erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
621                                     ntohl(md->u.index), truncate, true);
622                 proto = htons(ETH_P_ERSPAN);
623         } else if (version == 2) {
624                 erspan_build_header_v2(skb,
625                                        ntohl(tunnel_id_to_key32(key->tun_id)),
626                                        md->u.md2.dir,
627                                        get_hwid(&md->u.md2),
628                                        truncate, true);
629                 proto = htons(ETH_P_ERSPAN2);
630         } else {
631                 goto err_free_rt;
632         }
633
634         gre_build_header(skb, 8, TUNNEL_SEQ,
635                          proto, 0, htonl(tunnel->o_seqno++));
636
637         df = key->tun_flags & TUNNEL_DONT_FRAGMENT ?  htons(IP_DF) : 0;
638
639         iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
640                       key->tos, key->ttl, df, false);
641         return;
642
643 err_free_rt:
644         ip_rt_put(rt);
645 err_free_skb:
646         kfree_skb(skb);
647         dev->stats.tx_dropped++;
648 }
649
650 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
651 {
652         struct ip_tunnel_info *info = skb_tunnel_info(skb);
653         struct rtable *rt;
654         struct flowi4 fl4;
655
656         if (ip_tunnel_info_af(info) != AF_INET)
657                 return -EINVAL;
658
659         rt = gre_get_rt(skb, dev, &fl4, &info->key);
660         if (IS_ERR(rt))
661                 return PTR_ERR(rt);
662
663         ip_rt_put(rt);
664         info->key.u.ipv4.src = fl4.saddr;
665         return 0;
666 }
667
668 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
669                               struct net_device *dev)
670 {
671         struct ip_tunnel *tunnel = netdev_priv(dev);
672         const struct iphdr *tnl_params;
673
674         if (!pskb_inet_may_pull(skb))
675                 goto free_skb;
676
677         if (tunnel->collect_md) {
678                 gre_fb_xmit(skb, dev, skb->protocol);
679                 return NETDEV_TX_OK;
680         }
681
682         if (dev->header_ops) {
683                 const int pull_len = tunnel->hlen + sizeof(struct iphdr);
684
685                 if (skb_cow_head(skb, 0))
686                         goto free_skb;
687
688                 tnl_params = (const struct iphdr *)skb->data;
689
690                 if (pull_len > skb_transport_offset(skb))
691                         goto free_skb;
692
693                 /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
694                  * to gre header.
695                  */
696                 skb_pull(skb, pull_len);
697                 skb_reset_mac_header(skb);
698         } else {
699                 if (skb_cow_head(skb, dev->needed_headroom))
700                         goto free_skb;
701
702                 tnl_params = &tunnel->parms.iph;
703         }
704
705         if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
706                 goto free_skb;
707
708         __gre_xmit(skb, dev, tnl_params, skb->protocol);
709         return NETDEV_TX_OK;
710
711 free_skb:
712         kfree_skb(skb);
713         dev->stats.tx_dropped++;
714         return NETDEV_TX_OK;
715 }
716
717 static netdev_tx_t erspan_xmit(struct sk_buff *skb,
718                                struct net_device *dev)
719 {
720         struct ip_tunnel *tunnel = netdev_priv(dev);
721         bool truncate = false;
722         __be16 proto;
723
724         if (!pskb_inet_may_pull(skb))
725                 goto free_skb;
726
727         if (tunnel->collect_md) {
728                 erspan_fb_xmit(skb, dev);
729                 return NETDEV_TX_OK;
730         }
731
732         if (gre_handle_offloads(skb, false))
733                 goto free_skb;
734
735         if (skb_cow_head(skb, dev->needed_headroom))
736                 goto free_skb;
737
738         if (skb->len > dev->mtu + dev->hard_header_len) {
739                 pskb_trim(skb, dev->mtu + dev->hard_header_len);
740                 truncate = true;
741         }
742
743         /* Push ERSPAN header */
744         if (tunnel->erspan_ver == 1) {
745                 erspan_build_header(skb, ntohl(tunnel->parms.o_key),
746                                     tunnel->index,
747                                     truncate, true);
748                 proto = htons(ETH_P_ERSPAN);
749         } else if (tunnel->erspan_ver == 2) {
750                 erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
751                                        tunnel->dir, tunnel->hwid,
752                                        truncate, true);
753                 proto = htons(ETH_P_ERSPAN2);
754         } else {
755                 goto free_skb;
756         }
757
758         tunnel->parms.o_flags &= ~TUNNEL_KEY;
759         __gre_xmit(skb, dev, &tunnel->parms.iph, proto);
760         return NETDEV_TX_OK;
761
762 free_skb:
763         kfree_skb(skb);
764         dev->stats.tx_dropped++;
765         return NETDEV_TX_OK;
766 }
767
768 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
769                                 struct net_device *dev)
770 {
771         struct ip_tunnel *tunnel = netdev_priv(dev);
772
773         if (!pskb_inet_may_pull(skb))
774                 goto free_skb;
775
776         if (tunnel->collect_md) {
777                 gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
778                 return NETDEV_TX_OK;
779         }
780
781         if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
782                 goto free_skb;
783
784         if (skb_cow_head(skb, dev->needed_headroom))
785                 goto free_skb;
786
787         __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
788         return NETDEV_TX_OK;
789
790 free_skb:
791         kfree_skb(skb);
792         dev->stats.tx_dropped++;
793         return NETDEV_TX_OK;
794 }
795
796 static void ipgre_link_update(struct net_device *dev, bool set_mtu)
797 {
798         struct ip_tunnel *tunnel = netdev_priv(dev);
799         int len;
800
801         len = tunnel->tun_hlen;
802         tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
803         len = tunnel->tun_hlen - len;
804         tunnel->hlen = tunnel->hlen + len;
805
806         if (dev->header_ops)
807                 dev->hard_header_len += len;
808         else
809                 dev->needed_headroom += len;
810
811         if (set_mtu)
812                 dev->mtu = max_t(int, dev->mtu - len, 68);
813
814         if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
815                 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
816                     tunnel->encap.type == TUNNEL_ENCAP_NONE) {
817                         dev->features |= NETIF_F_GSO_SOFTWARE;
818                         dev->hw_features |= NETIF_F_GSO_SOFTWARE;
819                 } else {
820                         dev->features &= ~NETIF_F_GSO_SOFTWARE;
821                         dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
822                 }
823                 dev->features |= NETIF_F_LLTX;
824         } else {
825                 dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
826                 dev->features &= ~(NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE);
827         }
828 }
829
830 static int ipgre_tunnel_ioctl(struct net_device *dev,
831                               struct ifreq *ifr, int cmd)
832 {
833         struct ip_tunnel_parm p;
834         int err;
835
836         if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
837                 return -EFAULT;
838
839         if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
840                 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
841                     p.iph.ihl != 5 || (p.iph.frag_off & htons(~IP_DF)) ||
842                     ((p.i_flags | p.o_flags) & (GRE_VERSION | GRE_ROUTING)))
843                         return -EINVAL;
844         }
845
846         p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
847         p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
848
849         err = ip_tunnel_ioctl(dev, &p, cmd);
850         if (err)
851                 return err;
852
853         if (cmd == SIOCCHGTUNNEL) {
854                 struct ip_tunnel *t = netdev_priv(dev);
855
856                 t->parms.i_flags = p.i_flags;
857                 t->parms.o_flags = p.o_flags;
858
859                 if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
860                         ipgre_link_update(dev, true);
861         }
862
863         p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
864         p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
865
866         if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
867                 return -EFAULT;
868
869         return 0;
870 }
871
872 /* Nice toy. Unfortunately, useless in real life :-)
873    It allows to construct virtual multiprotocol broadcast "LAN"
874    over the Internet, provided multicast routing is tuned.
875
876
877    I have no idea was this bicycle invented before me,
878    so that I had to set ARPHRD_IPGRE to a random value.
879    I have an impression, that Cisco could make something similar,
880    but this feature is apparently missing in IOS<=11.2(8).
881
882    I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
883    with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
884
885    ping -t 255 224.66.66.66
886
887    If nobody answers, mbone does not work.
888
889    ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
890    ip addr add 10.66.66.<somewhat>/24 dev Universe
891    ifconfig Universe up
892    ifconfig Universe add fe80::<Your_real_addr>/10
893    ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
894    ftp 10.66.66.66
895    ...
896    ftp fec0:6666:6666::193.233.7.65
897    ...
898  */
899 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
900                         unsigned short type,
901                         const void *daddr, const void *saddr, unsigned int len)
902 {
903         struct ip_tunnel *t = netdev_priv(dev);
904         struct iphdr *iph;
905         struct gre_base_hdr *greh;
906
907         iph = skb_push(skb, t->hlen + sizeof(*iph));
908         greh = (struct gre_base_hdr *)(iph+1);
909         greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
910         greh->protocol = htons(type);
911
912         memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
913
914         /* Set the source hardware address. */
915         if (saddr)
916                 memcpy(&iph->saddr, saddr, 4);
917         if (daddr)
918                 memcpy(&iph->daddr, daddr, 4);
919         if (iph->daddr)
920                 return t->hlen + sizeof(*iph);
921
922         return -(t->hlen + sizeof(*iph));
923 }
924
925 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
926 {
927         const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
928         memcpy(haddr, &iph->saddr, 4);
929         return 4;
930 }
931
932 static const struct header_ops ipgre_header_ops = {
933         .create = ipgre_header,
934         .parse  = ipgre_header_parse,
935 };
936
937 #ifdef CONFIG_NET_IPGRE_BROADCAST
938 static int ipgre_open(struct net_device *dev)
939 {
940         struct ip_tunnel *t = netdev_priv(dev);
941
942         if (ipv4_is_multicast(t->parms.iph.daddr)) {
943                 struct flowi4 fl4;
944                 struct rtable *rt;
945
946                 rt = ip_route_output_gre(t->net, &fl4,
947                                          t->parms.iph.daddr,
948                                          t->parms.iph.saddr,
949                                          t->parms.o_key,
950                                          RT_TOS(t->parms.iph.tos),
951                                          t->parms.link);
952                 if (IS_ERR(rt))
953                         return -EADDRNOTAVAIL;
954                 dev = rt->dst.dev;
955                 ip_rt_put(rt);
956                 if (!__in_dev_get_rtnl(dev))
957                         return -EADDRNOTAVAIL;
958                 t->mlink = dev->ifindex;
959                 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
960         }
961         return 0;
962 }
963
964 static int ipgre_close(struct net_device *dev)
965 {
966         struct ip_tunnel *t = netdev_priv(dev);
967
968         if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
969                 struct in_device *in_dev;
970                 in_dev = inetdev_by_index(t->net, t->mlink);
971                 if (in_dev)
972                         ip_mc_dec_group(in_dev, t->parms.iph.daddr);
973         }
974         return 0;
975 }
976 #endif
977
978 static const struct net_device_ops ipgre_netdev_ops = {
979         .ndo_init               = ipgre_tunnel_init,
980         .ndo_uninit             = ip_tunnel_uninit,
981 #ifdef CONFIG_NET_IPGRE_BROADCAST
982         .ndo_open               = ipgre_open,
983         .ndo_stop               = ipgre_close,
984 #endif
985         .ndo_start_xmit         = ipgre_xmit,
986         .ndo_do_ioctl           = ipgre_tunnel_ioctl,
987         .ndo_change_mtu         = ip_tunnel_change_mtu,
988         .ndo_get_stats64        = ip_tunnel_get_stats64,
989         .ndo_get_iflink         = ip_tunnel_get_iflink,
990 };
991
992 #define GRE_FEATURES (NETIF_F_SG |              \
993                       NETIF_F_FRAGLIST |        \
994                       NETIF_F_HIGHDMA |         \
995                       NETIF_F_HW_CSUM)
996
997 static void ipgre_tunnel_setup(struct net_device *dev)
998 {
999         dev->netdev_ops         = &ipgre_netdev_ops;
1000         dev->type               = ARPHRD_IPGRE;
1001         ip_tunnel_setup(dev, ipgre_net_id);
1002 }
1003
1004 static void __gre_tunnel_init(struct net_device *dev)
1005 {
1006         struct ip_tunnel *tunnel;
1007
1008         tunnel = netdev_priv(dev);
1009         tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
1010         tunnel->parms.iph.protocol = IPPROTO_GRE;
1011
1012         tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
1013         dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph);
1014
1015         dev->features           |= GRE_FEATURES;
1016         dev->hw_features        |= GRE_FEATURES;
1017
1018         if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
1019                 /* TCP offload with GRE SEQ is not supported, nor
1020                  * can we support 2 levels of outer headers requiring
1021                  * an update.
1022                  */
1023                 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
1024                     (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
1025                         dev->features    |= NETIF_F_GSO_SOFTWARE;
1026                         dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1027                 }
1028
1029                 /* Can use a lockless transmit, unless we generate
1030                  * output sequences
1031                  */
1032                 dev->features |= NETIF_F_LLTX;
1033         }
1034 }
1035
1036 static int ipgre_tunnel_init(struct net_device *dev)
1037 {
1038         struct ip_tunnel *tunnel = netdev_priv(dev);
1039         struct iphdr *iph = &tunnel->parms.iph;
1040
1041         __gre_tunnel_init(dev);
1042
1043         memcpy(dev->dev_addr, &iph->saddr, 4);
1044         memcpy(dev->broadcast, &iph->daddr, 4);
1045
1046         dev->flags              = IFF_NOARP;
1047         netif_keep_dst(dev);
1048         dev->addr_len           = 4;
1049
1050         if (iph->daddr && !tunnel->collect_md) {
1051 #ifdef CONFIG_NET_IPGRE_BROADCAST
1052                 if (ipv4_is_multicast(iph->daddr)) {
1053                         if (!iph->saddr)
1054                                 return -EINVAL;
1055                         dev->flags = IFF_BROADCAST;
1056                         dev->header_ops = &ipgre_header_ops;
1057                         dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1058                         dev->needed_headroom = 0;
1059                 }
1060 #endif
1061         } else if (!tunnel->collect_md) {
1062                 dev->header_ops = &ipgre_header_ops;
1063                 dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1064                 dev->needed_headroom = 0;
1065         }
1066
1067         return ip_tunnel_init(dev);
1068 }
1069
1070 static const struct gre_protocol ipgre_protocol = {
1071         .handler     = gre_rcv,
1072         .err_handler = gre_err,
1073 };
1074
1075 static int __net_init ipgre_init_net(struct net *net)
1076 {
1077         return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
1078 }
1079
1080 static void __net_exit ipgre_exit_batch_net(struct list_head *list_net)
1081 {
1082         ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops);
1083 }
1084
1085 static struct pernet_operations ipgre_net_ops = {
1086         .init = ipgre_init_net,
1087         .exit_batch = ipgre_exit_batch_net,
1088         .id   = &ipgre_net_id,
1089         .size = sizeof(struct ip_tunnel_net),
1090 };
1091
1092 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1093                                  struct netlink_ext_ack *extack)
1094 {
1095         __be16 flags;
1096
1097         if (!data)
1098                 return 0;
1099
1100         flags = 0;
1101         if (data[IFLA_GRE_IFLAGS])
1102                 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1103         if (data[IFLA_GRE_OFLAGS])
1104                 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1105         if (flags & (GRE_VERSION|GRE_ROUTING))
1106                 return -EINVAL;
1107
1108         if (data[IFLA_GRE_COLLECT_METADATA] &&
1109             data[IFLA_GRE_ENCAP_TYPE] &&
1110             nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1111                 return -EINVAL;
1112
1113         return 0;
1114 }
1115
1116 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1117                               struct netlink_ext_ack *extack)
1118 {
1119         __be32 daddr;
1120
1121         if (tb[IFLA_ADDRESS]) {
1122                 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1123                         return -EINVAL;
1124                 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1125                         return -EADDRNOTAVAIL;
1126         }
1127
1128         if (!data)
1129                 goto out;
1130
1131         if (data[IFLA_GRE_REMOTE]) {
1132                 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1133                 if (!daddr)
1134                         return -EINVAL;
1135         }
1136
1137 out:
1138         return ipgre_tunnel_validate(tb, data, extack);
1139 }
1140
1141 static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1142                            struct netlink_ext_ack *extack)
1143 {
1144         __be16 flags = 0;
1145         int ret;
1146
1147         if (!data)
1148                 return 0;
1149
1150         ret = ipgre_tap_validate(tb, data, extack);
1151         if (ret)
1152                 return ret;
1153
1154         /* ERSPAN should only have GRE sequence and key flag */
1155         if (data[IFLA_GRE_OFLAGS])
1156                 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1157         if (data[IFLA_GRE_IFLAGS])
1158                 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1159         if (!data[IFLA_GRE_COLLECT_METADATA] &&
1160             flags != (GRE_SEQ | GRE_KEY))
1161                 return -EINVAL;
1162
1163         /* ERSPAN Session ID only has 10-bit. Since we reuse
1164          * 32-bit key field as ID, check it's range.
1165          */
1166         if (data[IFLA_GRE_IKEY] &&
1167             (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1168                 return -EINVAL;
1169
1170         if (data[IFLA_GRE_OKEY] &&
1171             (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1172                 return -EINVAL;
1173
1174         return 0;
1175 }
1176
1177 static int ipgre_netlink_parms(struct net_device *dev,
1178                                 struct nlattr *data[],
1179                                 struct nlattr *tb[],
1180                                 struct ip_tunnel_parm *parms,
1181                                 __u32 *fwmark)
1182 {
1183         struct ip_tunnel *t = netdev_priv(dev);
1184
1185         memset(parms, 0, sizeof(*parms));
1186
1187         parms->iph.protocol = IPPROTO_GRE;
1188
1189         if (!data)
1190                 return 0;
1191
1192         if (data[IFLA_GRE_LINK])
1193                 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1194
1195         if (data[IFLA_GRE_IFLAGS])
1196                 parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
1197
1198         if (data[IFLA_GRE_OFLAGS])
1199                 parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
1200
1201         if (data[IFLA_GRE_IKEY])
1202                 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1203
1204         if (data[IFLA_GRE_OKEY])
1205                 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1206
1207         if (data[IFLA_GRE_LOCAL])
1208                 parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1209
1210         if (data[IFLA_GRE_REMOTE])
1211                 parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1212
1213         if (data[IFLA_GRE_TTL])
1214                 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1215
1216         if (data[IFLA_GRE_TOS])
1217                 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1218
1219         if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1220                 if (t->ignore_df)
1221                         return -EINVAL;
1222                 parms->iph.frag_off = htons(IP_DF);
1223         }
1224
1225         if (data[IFLA_GRE_COLLECT_METADATA]) {
1226                 t->collect_md = true;
1227                 if (dev->type == ARPHRD_IPGRE)
1228                         dev->type = ARPHRD_NONE;
1229         }
1230
1231         if (data[IFLA_GRE_IGNORE_DF]) {
1232                 if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1233                   && (parms->iph.frag_off & htons(IP_DF)))
1234                         return -EINVAL;
1235                 t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1236         }
1237
1238         if (data[IFLA_GRE_FWMARK])
1239                 *fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1240
1241         return 0;
1242 }
1243
1244 static int erspan_netlink_parms(struct net_device *dev,
1245                                 struct nlattr *data[],
1246                                 struct nlattr *tb[],
1247                                 struct ip_tunnel_parm *parms,
1248                                 __u32 *fwmark)
1249 {
1250         struct ip_tunnel *t = netdev_priv(dev);
1251         int err;
1252
1253         err = ipgre_netlink_parms(dev, data, tb, parms, fwmark);
1254         if (err)
1255                 return err;
1256         if (!data)
1257                 return 0;
1258
1259         if (data[IFLA_GRE_ERSPAN_VER]) {
1260                 t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1261
1262                 if (t->erspan_ver != 1 && t->erspan_ver != 2)
1263                         return -EINVAL;
1264         }
1265
1266         if (t->erspan_ver == 1) {
1267                 if (data[IFLA_GRE_ERSPAN_INDEX]) {
1268                         t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1269                         if (t->index & ~INDEX_MASK)
1270                                 return -EINVAL;
1271                 }
1272         } else if (t->erspan_ver == 2) {
1273                 if (data[IFLA_GRE_ERSPAN_DIR]) {
1274                         t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1275                         if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1276                                 return -EINVAL;
1277                 }
1278                 if (data[IFLA_GRE_ERSPAN_HWID]) {
1279                         t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1280                         if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1281                                 return -EINVAL;
1282                 }
1283         }
1284
1285         return 0;
1286 }
1287
1288 /* This function returns true when ENCAP attributes are present in the nl msg */
1289 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1290                                       struct ip_tunnel_encap *ipencap)
1291 {
1292         bool ret = false;
1293
1294         memset(ipencap, 0, sizeof(*ipencap));
1295
1296         if (!data)
1297                 return ret;
1298
1299         if (data[IFLA_GRE_ENCAP_TYPE]) {
1300                 ret = true;
1301                 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1302         }
1303
1304         if (data[IFLA_GRE_ENCAP_FLAGS]) {
1305                 ret = true;
1306                 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1307         }
1308
1309         if (data[IFLA_GRE_ENCAP_SPORT]) {
1310                 ret = true;
1311                 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1312         }
1313
1314         if (data[IFLA_GRE_ENCAP_DPORT]) {
1315                 ret = true;
1316                 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1317         }
1318
1319         return ret;
1320 }
1321
1322 static int gre_tap_init(struct net_device *dev)
1323 {
1324         __gre_tunnel_init(dev);
1325         dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1326         netif_keep_dst(dev);
1327
1328         return ip_tunnel_init(dev);
1329 }
1330
1331 static const struct net_device_ops gre_tap_netdev_ops = {
1332         .ndo_init               = gre_tap_init,
1333         .ndo_uninit             = ip_tunnel_uninit,
1334         .ndo_start_xmit         = gre_tap_xmit,
1335         .ndo_set_mac_address    = eth_mac_addr,
1336         .ndo_validate_addr      = eth_validate_addr,
1337         .ndo_change_mtu         = ip_tunnel_change_mtu,
1338         .ndo_get_stats64        = ip_tunnel_get_stats64,
1339         .ndo_get_iflink         = ip_tunnel_get_iflink,
1340         .ndo_fill_metadata_dst  = gre_fill_metadata_dst,
1341 };
1342
1343 static int erspan_tunnel_init(struct net_device *dev)
1344 {
1345         struct ip_tunnel *tunnel = netdev_priv(dev);
1346
1347         tunnel->tun_hlen = 8;
1348         tunnel->parms.iph.protocol = IPPROTO_GRE;
1349         tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1350                        erspan_hdr_len(tunnel->erspan_ver);
1351
1352         dev->features           |= GRE_FEATURES;
1353         dev->hw_features        |= GRE_FEATURES;
1354         dev->priv_flags         |= IFF_LIVE_ADDR_CHANGE;
1355         netif_keep_dst(dev);
1356
1357         return ip_tunnel_init(dev);
1358 }
1359
1360 static const struct net_device_ops erspan_netdev_ops = {
1361         .ndo_init               = erspan_tunnel_init,
1362         .ndo_uninit             = ip_tunnel_uninit,
1363         .ndo_start_xmit         = erspan_xmit,
1364         .ndo_set_mac_address    = eth_mac_addr,
1365         .ndo_validate_addr      = eth_validate_addr,
1366         .ndo_change_mtu         = ip_tunnel_change_mtu,
1367         .ndo_get_stats64        = ip_tunnel_get_stats64,
1368         .ndo_get_iflink         = ip_tunnel_get_iflink,
1369         .ndo_fill_metadata_dst  = gre_fill_metadata_dst,
1370 };
1371
1372 static void ipgre_tap_setup(struct net_device *dev)
1373 {
1374         ether_setup(dev);
1375         dev->max_mtu = 0;
1376         dev->netdev_ops = &gre_tap_netdev_ops;
1377         dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1378         dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1379         ip_tunnel_setup(dev, gre_tap_net_id);
1380 }
1381
1382 bool is_gretap_dev(const struct net_device *dev)
1383 {
1384         return dev->netdev_ops == &gre_tap_netdev_ops;
1385 }
1386 EXPORT_SYMBOL_GPL(is_gretap_dev);
1387
1388 static int
1389 ipgre_newlink_encap_setup(struct net_device *dev, struct nlattr *data[])
1390 {
1391         struct ip_tunnel_encap ipencap;
1392
1393         if (ipgre_netlink_encap_parms(data, &ipencap)) {
1394                 struct ip_tunnel *t = netdev_priv(dev);
1395                 int err = ip_tunnel_encap_setup(t, &ipencap);
1396
1397                 if (err < 0)
1398                         return err;
1399         }
1400
1401         return 0;
1402 }
1403
1404 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1405                          struct nlattr *tb[], struct nlattr *data[],
1406                          struct netlink_ext_ack *extack)
1407 {
1408         struct ip_tunnel_parm p;
1409         __u32 fwmark = 0;
1410         int err;
1411
1412         err = ipgre_newlink_encap_setup(dev, data);
1413         if (err)
1414                 return err;
1415
1416         err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1417         if (err < 0)
1418                 return err;
1419         return ip_tunnel_newlink(dev, tb, &p, fwmark);
1420 }
1421
1422 static int erspan_newlink(struct net *src_net, struct net_device *dev,
1423                           struct nlattr *tb[], struct nlattr *data[],
1424                           struct netlink_ext_ack *extack)
1425 {
1426         struct ip_tunnel_parm p;
1427         __u32 fwmark = 0;
1428         int err;
1429
1430         err = ipgre_newlink_encap_setup(dev, data);
1431         if (err)
1432                 return err;
1433
1434         err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1435         if (err)
1436                 return err;
1437         return ip_tunnel_newlink(dev, tb, &p, fwmark);
1438 }
1439
1440 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1441                             struct nlattr *data[],
1442                             struct netlink_ext_ack *extack)
1443 {
1444         struct ip_tunnel *t = netdev_priv(dev);
1445         __u32 fwmark = t->fwmark;
1446         struct ip_tunnel_parm p;
1447         int err;
1448
1449         err = ipgre_newlink_encap_setup(dev, data);
1450         if (err)
1451                 return err;
1452
1453         err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1454         if (err < 0)
1455                 return err;
1456
1457         err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1458         if (err < 0)
1459                 return err;
1460
1461         t->parms.i_flags = p.i_flags;
1462         t->parms.o_flags = p.o_flags;
1463
1464         ipgre_link_update(dev, !tb[IFLA_MTU]);
1465
1466         return 0;
1467 }
1468
1469 static int erspan_changelink(struct net_device *dev, struct nlattr *tb[],
1470                              struct nlattr *data[],
1471                              struct netlink_ext_ack *extack)
1472 {
1473         struct ip_tunnel *t = netdev_priv(dev);
1474         __u32 fwmark = t->fwmark;
1475         struct ip_tunnel_parm p;
1476         int err;
1477
1478         err = ipgre_newlink_encap_setup(dev, data);
1479         if (err)
1480                 return err;
1481
1482         err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1483         if (err < 0)
1484                 return err;
1485
1486         err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1487         if (err < 0)
1488                 return err;
1489
1490         t->parms.i_flags = p.i_flags;
1491         t->parms.o_flags = p.o_flags;
1492
1493         return 0;
1494 }
1495
1496 static size_t ipgre_get_size(const struct net_device *dev)
1497 {
1498         return
1499                 /* IFLA_GRE_LINK */
1500                 nla_total_size(4) +
1501                 /* IFLA_GRE_IFLAGS */
1502                 nla_total_size(2) +
1503                 /* IFLA_GRE_OFLAGS */
1504                 nla_total_size(2) +
1505                 /* IFLA_GRE_IKEY */
1506                 nla_total_size(4) +
1507                 /* IFLA_GRE_OKEY */
1508                 nla_total_size(4) +
1509                 /* IFLA_GRE_LOCAL */
1510                 nla_total_size(4) +
1511                 /* IFLA_GRE_REMOTE */
1512                 nla_total_size(4) +
1513                 /* IFLA_GRE_TTL */
1514                 nla_total_size(1) +
1515                 /* IFLA_GRE_TOS */
1516                 nla_total_size(1) +
1517                 /* IFLA_GRE_PMTUDISC */
1518                 nla_total_size(1) +
1519                 /* IFLA_GRE_ENCAP_TYPE */
1520                 nla_total_size(2) +
1521                 /* IFLA_GRE_ENCAP_FLAGS */
1522                 nla_total_size(2) +
1523                 /* IFLA_GRE_ENCAP_SPORT */
1524                 nla_total_size(2) +
1525                 /* IFLA_GRE_ENCAP_DPORT */
1526                 nla_total_size(2) +
1527                 /* IFLA_GRE_COLLECT_METADATA */
1528                 nla_total_size(0) +
1529                 /* IFLA_GRE_IGNORE_DF */
1530                 nla_total_size(1) +
1531                 /* IFLA_GRE_FWMARK */
1532                 nla_total_size(4) +
1533                 /* IFLA_GRE_ERSPAN_INDEX */
1534                 nla_total_size(4) +
1535                 /* IFLA_GRE_ERSPAN_VER */
1536                 nla_total_size(1) +
1537                 /* IFLA_GRE_ERSPAN_DIR */
1538                 nla_total_size(1) +
1539                 /* IFLA_GRE_ERSPAN_HWID */
1540                 nla_total_size(2) +
1541                 0;
1542 }
1543
1544 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1545 {
1546         struct ip_tunnel *t = netdev_priv(dev);
1547         struct ip_tunnel_parm *p = &t->parms;
1548         __be16 o_flags = p->o_flags;
1549
1550         if (t->erspan_ver == 1 || t->erspan_ver == 2) {
1551                 if (!t->collect_md)
1552                         o_flags |= TUNNEL_KEY;
1553
1554                 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1555                         goto nla_put_failure;
1556
1557                 if (t->erspan_ver == 1) {
1558                         if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1559                                 goto nla_put_failure;
1560                 } else {
1561                         if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1562                                 goto nla_put_failure;
1563                         if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1564                                 goto nla_put_failure;
1565                 }
1566         }
1567
1568         if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1569             nla_put_be16(skb, IFLA_GRE_IFLAGS,
1570                          gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1571             nla_put_be16(skb, IFLA_GRE_OFLAGS,
1572                          gre_tnl_flags_to_gre_flags(o_flags)) ||
1573             nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1574             nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1575             nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1576             nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1577             nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1578             nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1579             nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1580                        !!(p->iph.frag_off & htons(IP_DF))) ||
1581             nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1582                 goto nla_put_failure;
1583
1584         if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1585                         t->encap.type) ||
1586             nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1587                          t->encap.sport) ||
1588             nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1589                          t->encap.dport) ||
1590             nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1591                         t->encap.flags))
1592                 goto nla_put_failure;
1593
1594         if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1595                 goto nla_put_failure;
1596
1597         if (t->collect_md) {
1598                 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1599                         goto nla_put_failure;
1600         }
1601
1602         return 0;
1603
1604 nla_put_failure:
1605         return -EMSGSIZE;
1606 }
1607
1608 static void erspan_setup(struct net_device *dev)
1609 {
1610         struct ip_tunnel *t = netdev_priv(dev);
1611
1612         ether_setup(dev);
1613         dev->max_mtu = 0;
1614         dev->netdev_ops = &erspan_netdev_ops;
1615         dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1616         dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1617         ip_tunnel_setup(dev, erspan_net_id);
1618         t->erspan_ver = 1;
1619 }
1620
1621 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1622         [IFLA_GRE_LINK]         = { .type = NLA_U32 },
1623         [IFLA_GRE_IFLAGS]       = { .type = NLA_U16 },
1624         [IFLA_GRE_OFLAGS]       = { .type = NLA_U16 },
1625         [IFLA_GRE_IKEY]         = { .type = NLA_U32 },
1626         [IFLA_GRE_OKEY]         = { .type = NLA_U32 },
1627         [IFLA_GRE_LOCAL]        = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1628         [IFLA_GRE_REMOTE]       = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1629         [IFLA_GRE_TTL]          = { .type = NLA_U8 },
1630         [IFLA_GRE_TOS]          = { .type = NLA_U8 },
1631         [IFLA_GRE_PMTUDISC]     = { .type = NLA_U8 },
1632         [IFLA_GRE_ENCAP_TYPE]   = { .type = NLA_U16 },
1633         [IFLA_GRE_ENCAP_FLAGS]  = { .type = NLA_U16 },
1634         [IFLA_GRE_ENCAP_SPORT]  = { .type = NLA_U16 },
1635         [IFLA_GRE_ENCAP_DPORT]  = { .type = NLA_U16 },
1636         [IFLA_GRE_COLLECT_METADATA]     = { .type = NLA_FLAG },
1637         [IFLA_GRE_IGNORE_DF]    = { .type = NLA_U8 },
1638         [IFLA_GRE_FWMARK]       = { .type = NLA_U32 },
1639         [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 },
1640         [IFLA_GRE_ERSPAN_VER]   = { .type = NLA_U8 },
1641         [IFLA_GRE_ERSPAN_DIR]   = { .type = NLA_U8 },
1642         [IFLA_GRE_ERSPAN_HWID]  = { .type = NLA_U16 },
1643 };
1644
1645 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1646         .kind           = "gre",
1647         .maxtype        = IFLA_GRE_MAX,
1648         .policy         = ipgre_policy,
1649         .priv_size      = sizeof(struct ip_tunnel),
1650         .setup          = ipgre_tunnel_setup,
1651         .validate       = ipgre_tunnel_validate,
1652         .newlink        = ipgre_newlink,
1653         .changelink     = ipgre_changelink,
1654         .dellink        = ip_tunnel_dellink,
1655         .get_size       = ipgre_get_size,
1656         .fill_info      = ipgre_fill_info,
1657         .get_link_net   = ip_tunnel_get_link_net,
1658 };
1659
1660 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1661         .kind           = "gretap",
1662         .maxtype        = IFLA_GRE_MAX,
1663         .policy         = ipgre_policy,
1664         .priv_size      = sizeof(struct ip_tunnel),
1665         .setup          = ipgre_tap_setup,
1666         .validate       = ipgre_tap_validate,
1667         .newlink        = ipgre_newlink,
1668         .changelink     = ipgre_changelink,
1669         .dellink        = ip_tunnel_dellink,
1670         .get_size       = ipgre_get_size,
1671         .fill_info      = ipgre_fill_info,
1672         .get_link_net   = ip_tunnel_get_link_net,
1673 };
1674
1675 static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1676         .kind           = "erspan",
1677         .maxtype        = IFLA_GRE_MAX,
1678         .policy         = ipgre_policy,
1679         .priv_size      = sizeof(struct ip_tunnel),
1680         .setup          = erspan_setup,
1681         .validate       = erspan_validate,
1682         .newlink        = erspan_newlink,
1683         .changelink     = erspan_changelink,
1684         .dellink        = ip_tunnel_dellink,
1685         .get_size       = ipgre_get_size,
1686         .fill_info      = ipgre_fill_info,
1687         .get_link_net   = ip_tunnel_get_link_net,
1688 };
1689
1690 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1691                                         u8 name_assign_type)
1692 {
1693         struct nlattr *tb[IFLA_MAX + 1];
1694         struct net_device *dev;
1695         LIST_HEAD(list_kill);
1696         struct ip_tunnel *t;
1697         int err;
1698
1699         memset(&tb, 0, sizeof(tb));
1700
1701         dev = rtnl_create_link(net, name, name_assign_type,
1702                                &ipgre_tap_ops, tb);
1703         if (IS_ERR(dev))
1704                 return dev;
1705
1706         /* Configure flow based GRE device. */
1707         t = netdev_priv(dev);
1708         t->collect_md = true;
1709
1710         err = ipgre_newlink(net, dev, tb, NULL, NULL);
1711         if (err < 0) {
1712                 free_netdev(dev);
1713                 return ERR_PTR(err);
1714         }
1715
1716         /* openvswitch users expect packet sizes to be unrestricted,
1717          * so set the largest MTU we can.
1718          */
1719         err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1720         if (err)
1721                 goto out;
1722
1723         err = rtnl_configure_link(dev, NULL);
1724         if (err < 0)
1725                 goto out;
1726
1727         return dev;
1728 out:
1729         ip_tunnel_dellink(dev, &list_kill);
1730         unregister_netdevice_many(&list_kill);
1731         return ERR_PTR(err);
1732 }
1733 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1734
1735 static int __net_init ipgre_tap_init_net(struct net *net)
1736 {
1737         return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1738 }
1739
1740 static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net)
1741 {
1742         ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops);
1743 }
1744
1745 static struct pernet_operations ipgre_tap_net_ops = {
1746         .init = ipgre_tap_init_net,
1747         .exit_batch = ipgre_tap_exit_batch_net,
1748         .id   = &gre_tap_net_id,
1749         .size = sizeof(struct ip_tunnel_net),
1750 };
1751
1752 static int __net_init erspan_init_net(struct net *net)
1753 {
1754         return ip_tunnel_init_net(net, erspan_net_id,
1755                                   &erspan_link_ops, "erspan0");
1756 }
1757
1758 static void __net_exit erspan_exit_batch_net(struct list_head *net_list)
1759 {
1760         ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops);
1761 }
1762
1763 static struct pernet_operations erspan_net_ops = {
1764         .init = erspan_init_net,
1765         .exit_batch = erspan_exit_batch_net,
1766         .id   = &erspan_net_id,
1767         .size = sizeof(struct ip_tunnel_net),
1768 };
1769
1770 static int __init ipgre_init(void)
1771 {
1772         int err;
1773
1774         pr_info("GRE over IPv4 tunneling driver\n");
1775
1776         err = register_pernet_device(&ipgre_net_ops);
1777         if (err < 0)
1778                 return err;
1779
1780         err = register_pernet_device(&ipgre_tap_net_ops);
1781         if (err < 0)
1782                 goto pnet_tap_failed;
1783
1784         err = register_pernet_device(&erspan_net_ops);
1785         if (err < 0)
1786                 goto pnet_erspan_failed;
1787
1788         err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1789         if (err < 0) {
1790                 pr_info("%s: can't add protocol\n", __func__);
1791                 goto add_proto_failed;
1792         }
1793
1794         err = rtnl_link_register(&ipgre_link_ops);
1795         if (err < 0)
1796                 goto rtnl_link_failed;
1797
1798         err = rtnl_link_register(&ipgre_tap_ops);
1799         if (err < 0)
1800                 goto tap_ops_failed;
1801
1802         err = rtnl_link_register(&erspan_link_ops);
1803         if (err < 0)
1804                 goto erspan_link_failed;
1805
1806         return 0;
1807
1808 erspan_link_failed:
1809         rtnl_link_unregister(&ipgre_tap_ops);
1810 tap_ops_failed:
1811         rtnl_link_unregister(&ipgre_link_ops);
1812 rtnl_link_failed:
1813         gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1814 add_proto_failed:
1815         unregister_pernet_device(&erspan_net_ops);
1816 pnet_erspan_failed:
1817         unregister_pernet_device(&ipgre_tap_net_ops);
1818 pnet_tap_failed:
1819         unregister_pernet_device(&ipgre_net_ops);
1820         return err;
1821 }
1822
1823 static void __exit ipgre_fini(void)
1824 {
1825         rtnl_link_unregister(&ipgre_tap_ops);
1826         rtnl_link_unregister(&ipgre_link_ops);
1827         rtnl_link_unregister(&erspan_link_ops);
1828         gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1829         unregister_pernet_device(&ipgre_tap_net_ops);
1830         unregister_pernet_device(&ipgre_net_ops);
1831         unregister_pernet_device(&erspan_net_ops);
1832 }
1833
1834 module_init(ipgre_init);
1835 module_exit(ipgre_fini);
1836 MODULE_LICENSE("GPL");
1837 MODULE_ALIAS_RTNL_LINK("gre");
1838 MODULE_ALIAS_RTNL_LINK("gretap");
1839 MODULE_ALIAS_RTNL_LINK("erspan");
1840 MODULE_ALIAS_NETDEV("gre0");
1841 MODULE_ALIAS_NETDEV("gretap0");
1842 MODULE_ALIAS_NETDEV("erspan0");