1 // SPDX-License-Identifier: GPL-2.0
2 /* Bareudp: UDP tunnel encasulation for different Payload types like
4 * Copyright (c) 2019 Nokia, Inc.
5 * Authors: Martin Varghese, <martin.varghese@nokia.com>
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/etherdevice.h>
13 #include <linux/hash.h>
14 #include <net/dst_metadata.h>
15 #include <net/gro_cells.h>
16 #include <net/rtnetlink.h>
17 #include <net/protocol.h>
18 #include <net/ip6_tunnel.h>
19 #include <net/ip_tunnels.h>
20 #include <net/udp_tunnel.h>
21 #include <net/bareudp.h>
23 #define BAREUDP_BASE_HLEN sizeof(struct udphdr)
24 #define BAREUDP_IPV4_HLEN (sizeof(struct iphdr) + \
25 sizeof(struct udphdr))
26 #define BAREUDP_IPV6_HLEN (sizeof(struct ipv6hdr) + \
27 sizeof(struct udphdr))
29 static bool log_ecn_error = true;
30 module_param(log_ecn_error, bool, 0644);
31 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
33 /* per-network namespace private data for this module */
35 static unsigned int bareudp_net_id;
38 struct list_head bareudp_list;
45 bool multi_proto_mode;
48 /* Pseudo network device */
50 struct net *net; /* netns for packet i/o */
51 struct net_device *dev; /* netdev for bareudp tunnel */
55 bool multi_proto_mode;
56 struct socket __rcu *sock;
57 struct list_head next; /* bareudp node on namespace list */
58 struct gro_cells gro_cells;
61 static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
63 struct metadata_dst *tun_dst = NULL;
64 struct bareudp_dev *bareudp;
65 unsigned short family;
71 bareudp = rcu_dereference_sk_user_data(sk);
75 if (skb->protocol == htons(ETH_P_IP))
80 if (bareudp->ethertype == htons(ETH_P_IP)) {
83 if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion,
85 bareudp->dev->stats.rx_dropped++;
91 proto = htons(ETH_P_IP);
92 } else if (ipversion == 6 && bareudp->multi_proto_mode) {
93 proto = htons(ETH_P_IPV6);
95 bareudp->dev->stats.rx_dropped++;
98 } else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) {
99 struct iphdr *tunnel_hdr;
101 tunnel_hdr = (struct iphdr *)skb_network_header(skb);
102 if (tunnel_hdr->version == 4) {
103 if (!ipv4_is_multicast(tunnel_hdr->daddr)) {
104 proto = bareudp->ethertype;
105 } else if (bareudp->multi_proto_mode &&
106 ipv4_is_multicast(tunnel_hdr->daddr)) {
107 proto = htons(ETH_P_MPLS_MC);
109 bareudp->dev->stats.rx_dropped++;
114 struct ipv6hdr *tunnel_hdr_v6;
116 tunnel_hdr_v6 = (struct ipv6hdr *)skb_network_header(skb);
118 ipv6_addr_type((struct in6_addr *)&tunnel_hdr_v6->daddr);
119 if (!(addr_type & IPV6_ADDR_MULTICAST)) {
120 proto = bareudp->ethertype;
121 } else if (bareudp->multi_proto_mode &&
122 (addr_type & IPV6_ADDR_MULTICAST)) {
123 proto = htons(ETH_P_MPLS_MC);
125 bareudp->dev->stats.rx_dropped++;
130 proto = bareudp->ethertype;
133 if (iptunnel_pull_header(skb, BAREUDP_BASE_HLEN,
135 !net_eq(bareudp->net,
136 dev_net(bareudp->dev)))) {
137 bareudp->dev->stats.rx_dropped++;
140 tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0);
142 bareudp->dev->stats.rx_dropped++;
145 skb_dst_set(skb, &tun_dst->dst);
146 skb->dev = bareudp->dev;
147 oiph = skb_network_header(skb);
148 skb_reset_network_header(skb);
149 skb_reset_mac_header(skb);
151 if (!ipv6_mod_enabled() || family == AF_INET)
152 err = IP_ECN_decapsulate(oiph, skb);
154 err = IP6_ECN_decapsulate(oiph, skb);
158 if (!ipv6_mod_enabled() || family == AF_INET)
159 net_info_ratelimited("non-ECT from %pI4 "
161 &((struct iphdr *)oiph)->saddr,
162 ((struct iphdr *)oiph)->tos);
164 net_info_ratelimited("non-ECT from %pI6\n",
165 &((struct ipv6hdr *)oiph)->saddr);
168 ++bareudp->dev->stats.rx_frame_errors;
169 ++bareudp->dev->stats.rx_errors;
175 err = gro_cells_receive(&bareudp->gro_cells, skb);
176 if (likely(err == NET_RX_SUCCESS))
177 dev_sw_netstats_rx_add(bareudp->dev, len);
181 /* Consume bad packet */
187 static int bareudp_err_lookup(struct sock *sk, struct sk_buff *skb)
192 static int bareudp_init(struct net_device *dev)
194 struct bareudp_dev *bareudp = netdev_priv(dev);
197 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
201 err = gro_cells_init(&bareudp->gro_cells, dev);
203 free_percpu(dev->tstats);
209 static void bareudp_uninit(struct net_device *dev)
211 struct bareudp_dev *bareudp = netdev_priv(dev);
213 gro_cells_destroy(&bareudp->gro_cells);
214 free_percpu(dev->tstats);
217 static struct socket *bareudp_create_sock(struct net *net, __be16 port)
219 struct udp_port_cfg udp_conf;
223 memset(&udp_conf, 0, sizeof(udp_conf));
225 if (ipv6_mod_enabled())
226 udp_conf.family = AF_INET6;
228 udp_conf.family = AF_INET;
230 udp_conf.local_udp_port = port;
231 /* Open UDP socket */
232 err = udp_sock_create(net, &udp_conf, &sock);
236 udp_allow_gso(sock->sk);
240 /* Create new listen socket if needed */
241 static int bareudp_socket_create(struct bareudp_dev *bareudp, __be16 port)
243 struct udp_tunnel_sock_cfg tunnel_cfg;
246 sock = bareudp_create_sock(bareudp->net, port);
248 return PTR_ERR(sock);
250 /* Mark socket as an encapsulation socket */
251 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
252 tunnel_cfg.sk_user_data = bareudp;
253 tunnel_cfg.encap_type = 1;
254 tunnel_cfg.encap_rcv = bareudp_udp_encap_recv;
255 tunnel_cfg.encap_err_lookup = bareudp_err_lookup;
256 tunnel_cfg.encap_destroy = NULL;
257 setup_udp_tunnel_sock(bareudp->net, sock, &tunnel_cfg);
259 rcu_assign_pointer(bareudp->sock, sock);
263 static int bareudp_open(struct net_device *dev)
265 struct bareudp_dev *bareudp = netdev_priv(dev);
268 ret = bareudp_socket_create(bareudp, bareudp->port);
272 static void bareudp_sock_release(struct bareudp_dev *bareudp)
276 sock = bareudp->sock;
277 rcu_assign_pointer(bareudp->sock, NULL);
279 udp_tunnel_sock_release(sock);
282 static int bareudp_stop(struct net_device *dev)
284 struct bareudp_dev *bareudp = netdev_priv(dev);
286 bareudp_sock_release(bareudp);
290 static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
291 struct bareudp_dev *bareudp,
292 const struct ip_tunnel_info *info)
294 bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
295 bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
296 struct socket *sock = rcu_dereference(bareudp->sock);
297 bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
298 const struct ip_tunnel_key *key = &info->key;
309 sport = udp_flow_src_port(bareudp->net, skb,
310 bareudp->sport_min, USHRT_MAX,
312 rt = udp_tunnel_dst_lookup(skb, dev, bareudp->net, 0, &saddr, &info->key,
313 sport, bareudp->port, key->tos,
315 (struct dst_cache *)&info->dst_cache : NULL);
320 skb_tunnel_check_pmtu(skb, &rt->dst,
321 BAREUDP_IPV4_HLEN + info->options_len, false);
323 tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
325 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
326 skb_scrub_packet(skb, xnet);
329 if (!skb_pull(skb, skb_network_offset(skb)))
332 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len +
333 BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr);
335 err = skb_cow_head(skb, min_headroom);
339 err = udp_tunnel_handle_offloads(skb, udp_sum);
343 skb_set_inner_protocol(skb, bareudp->ethertype);
344 udp_tunnel_xmit_skb(rt, sock->sk, skb, saddr, info->key.u.ipv4.dst,
345 tos, ttl, df, sport, bareudp->port,
346 !net_eq(bareudp->net, dev_net(bareudp->dev)),
347 !(info->key.tun_flags & TUNNEL_CSUM));
351 dst_release(&rt->dst);
355 static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
356 struct bareudp_dev *bareudp,
357 const struct ip_tunnel_info *info)
359 bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
360 bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
361 struct socket *sock = rcu_dereference(bareudp->sock);
362 bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
363 const struct ip_tunnel_key *key = &info->key;
364 struct dst_entry *dst = NULL;
365 struct in6_addr saddr, daddr;
374 sport = udp_flow_src_port(bareudp->net, skb,
375 bareudp->sport_min, USHRT_MAX,
377 dst = udp_tunnel6_dst_lookup(skb, dev, bareudp->net, sock, 0, &saddr,
378 key, sport, bareudp->port, key->tos,
380 (struct dst_cache *) &info->dst_cache : NULL);
384 skb_tunnel_check_pmtu(skb, dst, BAREUDP_IPV6_HLEN + info->options_len,
387 prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
390 skb_scrub_packet(skb, xnet);
393 if (!skb_pull(skb, skb_network_offset(skb)))
396 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len +
397 BAREUDP_BASE_HLEN + info->options_len + sizeof(struct ipv6hdr);
399 err = skb_cow_head(skb, min_headroom);
403 err = udp_tunnel_handle_offloads(skb, udp_sum);
407 daddr = info->key.u.ipv6.dst;
408 udp_tunnel6_xmit_skb(dst, sock->sk, skb, dev,
409 &saddr, &daddr, prio, ttl,
410 info->key.label, sport, bareudp->port,
411 !(info->key.tun_flags & TUNNEL_CSUM));
419 static bool bareudp_proto_valid(struct bareudp_dev *bareudp, __be16 proto)
421 if (bareudp->ethertype == proto)
424 if (!bareudp->multi_proto_mode)
427 if (bareudp->ethertype == htons(ETH_P_MPLS_UC) &&
428 proto == htons(ETH_P_MPLS_MC))
431 if (bareudp->ethertype == htons(ETH_P_IP) &&
432 proto == htons(ETH_P_IPV6))
438 static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
440 struct bareudp_dev *bareudp = netdev_priv(dev);
441 struct ip_tunnel_info *info = NULL;
444 if (!bareudp_proto_valid(bareudp, skb->protocol)) {
449 info = skb_tunnel_info(skb);
450 if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
456 if (ipv6_mod_enabled() && info->mode & IP_TUNNEL_INFO_IPV6)
457 err = bareudp6_xmit_skb(skb, dev, bareudp, info);
459 err = bareudp_xmit_skb(skb, dev, bareudp, info);
469 dev->stats.collisions++;
470 else if (err == -ENETUNREACH)
471 dev->stats.tx_carrier_errors++;
473 dev->stats.tx_errors++;
477 static int bareudp_fill_metadata_dst(struct net_device *dev,
480 struct ip_tunnel_info *info = skb_tunnel_info(skb);
481 struct bareudp_dev *bareudp = netdev_priv(dev);
485 use_cache = ip_tunnel_dst_cache_usable(skb, info);
486 sport = udp_flow_src_port(bareudp->net, skb,
487 bareudp->sport_min, USHRT_MAX,
490 if (!ipv6_mod_enabled() || ip_tunnel_info_af(info) == AF_INET) {
494 rt = udp_tunnel_dst_lookup(skb, dev, bareudp->net, 0, &saddr,
495 &info->key, sport, bareudp->port,
497 use_cache ? &info->dst_cache : NULL);
502 info->key.u.ipv4.src = saddr;
503 } else if (ip_tunnel_info_af(info) == AF_INET6) {
504 struct dst_entry *dst;
505 struct in6_addr saddr;
506 struct socket *sock = rcu_dereference(bareudp->sock);
508 dst = udp_tunnel6_dst_lookup(skb, dev, bareudp->net, sock,
509 0, &saddr, &info->key,
510 sport, bareudp->port, info->key.tos,
511 use_cache ? &info->dst_cache : NULL);
516 info->key.u.ipv6.src = saddr;
521 info->key.tp_src = sport;
522 info->key.tp_dst = bareudp->port;
526 static const struct net_device_ops bareudp_netdev_ops = {
527 .ndo_init = bareudp_init,
528 .ndo_uninit = bareudp_uninit,
529 .ndo_open = bareudp_open,
530 .ndo_stop = bareudp_stop,
531 .ndo_start_xmit = bareudp_xmit,
532 .ndo_get_stats64 = dev_get_tstats64,
533 .ndo_fill_metadata_dst = bareudp_fill_metadata_dst,
536 static const struct nla_policy bareudp_policy[IFLA_BAREUDP_MAX + 1] = {
537 [IFLA_BAREUDP_PORT] = { .type = NLA_U16 },
538 [IFLA_BAREUDP_ETHERTYPE] = { .type = NLA_U16 },
539 [IFLA_BAREUDP_SRCPORT_MIN] = { .type = NLA_U16 },
540 [IFLA_BAREUDP_MULTIPROTO_MODE] = { .type = NLA_FLAG },
543 /* Info for udev, that this is a virtual tunnel endpoint */
544 static const struct device_type bareudp_type = {
548 /* Initialize the device structure. */
549 static void bareudp_setup(struct net_device *dev)
551 dev->netdev_ops = &bareudp_netdev_ops;
552 dev->needs_free_netdev = true;
553 SET_NETDEV_DEVTYPE(dev, &bareudp_type);
554 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
555 dev->features |= NETIF_F_RXCSUM;
556 dev->features |= NETIF_F_LLTX;
557 dev->features |= NETIF_F_GSO_SOFTWARE;
558 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
559 dev->hw_features |= NETIF_F_RXCSUM;
560 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
561 dev->hard_header_len = 0;
563 dev->mtu = ETH_DATA_LEN;
564 dev->min_mtu = IPV4_MIN_MTU;
565 dev->max_mtu = IP_MAX_MTU - BAREUDP_BASE_HLEN;
566 dev->type = ARPHRD_NONE;
568 dev->priv_flags |= IFF_NO_QUEUE;
569 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
572 static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[],
573 struct netlink_ext_ack *extack)
576 NL_SET_ERR_MSG(extack,
577 "Not enough attributes provided to perform the operation");
583 static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf,
584 struct netlink_ext_ack *extack)
586 memset(conf, 0, sizeof(*conf));
588 if (!data[IFLA_BAREUDP_PORT]) {
589 NL_SET_ERR_MSG(extack, "port not specified");
592 if (!data[IFLA_BAREUDP_ETHERTYPE]) {
593 NL_SET_ERR_MSG(extack, "ethertype not specified");
597 conf->port = nla_get_u16(data[IFLA_BAREUDP_PORT]);
598 conf->ethertype = nla_get_u16(data[IFLA_BAREUDP_ETHERTYPE]);
600 if (data[IFLA_BAREUDP_SRCPORT_MIN])
601 conf->sport_min = nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]);
603 if (data[IFLA_BAREUDP_MULTIPROTO_MODE])
604 conf->multi_proto_mode = true;
609 static struct bareudp_dev *bareudp_find_dev(struct bareudp_net *bn,
610 const struct bareudp_conf *conf)
612 struct bareudp_dev *bareudp, *t = NULL;
614 list_for_each_entry(bareudp, &bn->bareudp_list, next) {
615 if (conf->port == bareudp->port)
621 static int bareudp_configure(struct net *net, struct net_device *dev,
622 struct bareudp_conf *conf,
623 struct netlink_ext_ack *extack)
625 struct bareudp_net *bn = net_generic(net, bareudp_net_id);
626 struct bareudp_dev *t, *bareudp = netdev_priv(dev);
631 t = bareudp_find_dev(bn, conf);
633 NL_SET_ERR_MSG(extack, "Another bareudp device using the same port already exists");
637 if (conf->multi_proto_mode &&
638 (conf->ethertype != htons(ETH_P_MPLS_UC) &&
639 conf->ethertype != htons(ETH_P_IP))) {
640 NL_SET_ERR_MSG(extack, "Cannot set multiproto mode for this ethertype (only IPv4 and unicast MPLS are supported)");
644 bareudp->port = conf->port;
645 bareudp->ethertype = conf->ethertype;
646 bareudp->sport_min = conf->sport_min;
647 bareudp->multi_proto_mode = conf->multi_proto_mode;
649 err = register_netdevice(dev);
653 list_add(&bareudp->next, &bn->bareudp_list);
657 static int bareudp_link_config(struct net_device *dev,
663 err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
670 static void bareudp_dellink(struct net_device *dev, struct list_head *head)
672 struct bareudp_dev *bareudp = netdev_priv(dev);
674 list_del(&bareudp->next);
675 unregister_netdevice_queue(dev, head);
678 static int bareudp_newlink(struct net *net, struct net_device *dev,
679 struct nlattr *tb[], struct nlattr *data[],
680 struct netlink_ext_ack *extack)
682 struct bareudp_conf conf;
685 err = bareudp2info(data, &conf, extack);
689 err = bareudp_configure(net, dev, &conf, extack);
693 err = bareudp_link_config(dev, tb);
700 bareudp_dellink(dev, NULL);
704 static size_t bareudp_get_size(const struct net_device *dev)
706 return nla_total_size(sizeof(__be16)) + /* IFLA_BAREUDP_PORT */
707 nla_total_size(sizeof(__be16)) + /* IFLA_BAREUDP_ETHERTYPE */
708 nla_total_size(sizeof(__u16)) + /* IFLA_BAREUDP_SRCPORT_MIN */
709 nla_total_size(0) + /* IFLA_BAREUDP_MULTIPROTO_MODE */
713 static int bareudp_fill_info(struct sk_buff *skb, const struct net_device *dev)
715 struct bareudp_dev *bareudp = netdev_priv(dev);
717 if (nla_put_be16(skb, IFLA_BAREUDP_PORT, bareudp->port))
718 goto nla_put_failure;
719 if (nla_put_be16(skb, IFLA_BAREUDP_ETHERTYPE, bareudp->ethertype))
720 goto nla_put_failure;
721 if (nla_put_u16(skb, IFLA_BAREUDP_SRCPORT_MIN, bareudp->sport_min))
722 goto nla_put_failure;
723 if (bareudp->multi_proto_mode &&
724 nla_put_flag(skb, IFLA_BAREUDP_MULTIPROTO_MODE))
725 goto nla_put_failure;
733 static struct rtnl_link_ops bareudp_link_ops __read_mostly = {
735 .maxtype = IFLA_BAREUDP_MAX,
736 .policy = bareudp_policy,
737 .priv_size = sizeof(struct bareudp_dev),
738 .setup = bareudp_setup,
739 .validate = bareudp_validate,
740 .newlink = bareudp_newlink,
741 .dellink = bareudp_dellink,
742 .get_size = bareudp_get_size,
743 .fill_info = bareudp_fill_info,
746 static __net_init int bareudp_init_net(struct net *net)
748 struct bareudp_net *bn = net_generic(net, bareudp_net_id);
750 INIT_LIST_HEAD(&bn->bareudp_list);
754 static void bareudp_destroy_tunnels(struct net *net, struct list_head *head)
756 struct bareudp_net *bn = net_generic(net, bareudp_net_id);
757 struct bareudp_dev *bareudp, *next;
759 list_for_each_entry_safe(bareudp, next, &bn->bareudp_list, next)
760 unregister_netdevice_queue(bareudp->dev, head);
763 static void __net_exit bareudp_exit_batch_net(struct list_head *net_list)
769 list_for_each_entry(net, net_list, exit_list)
770 bareudp_destroy_tunnels(net, &list);
772 /* unregister the devices gathered above */
773 unregister_netdevice_many(&list);
777 static struct pernet_operations bareudp_net_ops = {
778 .init = bareudp_init_net,
779 .exit_batch = bareudp_exit_batch_net,
780 .id = &bareudp_net_id,
781 .size = sizeof(struct bareudp_net),
784 static int __init bareudp_init_module(void)
788 rc = register_pernet_subsys(&bareudp_net_ops);
792 rc = rtnl_link_register(&bareudp_link_ops);
798 unregister_pernet_subsys(&bareudp_net_ops);
802 late_initcall(bareudp_init_module);
804 static void __exit bareudp_cleanup_module(void)
806 rtnl_link_unregister(&bareudp_link_ops);
807 unregister_pernet_subsys(&bareudp_net_ops);
809 module_exit(bareudp_cleanup_module);
811 MODULE_ALIAS_RTNL_LINK("bareudp");
812 MODULE_LICENSE("GPL");
813 MODULE_AUTHOR("Martin Varghese <martin.varghese@nokia.com>");
814 MODULE_DESCRIPTION("Interface driver for UDP encapsulated traffic");