GNU Linux-libre 5.19-rc6-gnu
[releases.git] / drivers / net / wireguard / device.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4  */
5
6 #include "queueing.h"
7 #include "socket.h"
8 #include "timers.h"
9 #include "device.h"
10 #include "ratelimiter.h"
11 #include "peer.h"
12 #include "messages.h"
13
14 #include <linux/module.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/inet.h>
17 #include <linux/netdevice.h>
18 #include <linux/inetdevice.h>
19 #include <linux/if_arp.h>
20 #include <linux/icmp.h>
21 #include <linux/suspend.h>
22 #include <net/dst_metadata.h>
23 #include <net/icmp.h>
24 #include <net/rtnetlink.h>
25 #include <net/ip_tunnels.h>
26 #include <net/addrconf.h>
27
28 static LIST_HEAD(device_list);
29
30 static int wg_open(struct net_device *dev)
31 {
32         struct in_device *dev_v4 = __in_dev_get_rtnl(dev);
33         struct inet6_dev *dev_v6 = __in6_dev_get(dev);
34         struct wg_device *wg = netdev_priv(dev);
35         struct wg_peer *peer;
36         int ret;
37
38         if (dev_v4) {
39                 /* At some point we might put this check near the ip_rt_send_
40                  * redirect call of ip_forward in net/ipv4/ip_forward.c, similar
41                  * to the current secpath check.
42                  */
43                 IN_DEV_CONF_SET(dev_v4, SEND_REDIRECTS, false);
44                 IPV4_DEVCONF_ALL(dev_net(dev), SEND_REDIRECTS) = false;
45         }
46         if (dev_v6)
47                 dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE;
48
49         mutex_lock(&wg->device_update_lock);
50         ret = wg_socket_init(wg, wg->incoming_port);
51         if (ret < 0)
52                 goto out;
53         list_for_each_entry(peer, &wg->peer_list, peer_list) {
54                 wg_packet_send_staged_packets(peer);
55                 if (peer->persistent_keepalive_interval)
56                         wg_packet_send_keepalive(peer);
57         }
58 out:
59         mutex_unlock(&wg->device_update_lock);
60         return ret;
61 }
62
63 static int wg_pm_notification(struct notifier_block *nb, unsigned long action, void *data)
64 {
65         struct wg_device *wg;
66         struct wg_peer *peer;
67
68         /* If the machine is constantly suspending and resuming, as part of
69          * its normal operation rather than as a somewhat rare event, then we
70          * don't actually want to clear keys.
71          */
72         if (IS_ENABLED(CONFIG_PM_AUTOSLEEP) || IS_ENABLED(CONFIG_ANDROID))
73                 return 0;
74
75         if (action != PM_HIBERNATION_PREPARE && action != PM_SUSPEND_PREPARE)
76                 return 0;
77
78         rtnl_lock();
79         list_for_each_entry(wg, &device_list, device_list) {
80                 mutex_lock(&wg->device_update_lock);
81                 list_for_each_entry(peer, &wg->peer_list, peer_list) {
82                         del_timer(&peer->timer_zero_key_material);
83                         wg_noise_handshake_clear(&peer->handshake);
84                         wg_noise_keypairs_clear(&peer->keypairs);
85                 }
86                 mutex_unlock(&wg->device_update_lock);
87         }
88         rtnl_unlock();
89         rcu_barrier();
90         return 0;
91 }
92
93 static struct notifier_block pm_notifier = { .notifier_call = wg_pm_notification };
94
95 static int wg_vm_notification(struct notifier_block *nb, unsigned long action, void *data)
96 {
97         struct wg_device *wg;
98         struct wg_peer *peer;
99
100         rtnl_lock();
101         list_for_each_entry(wg, &device_list, device_list) {
102                 mutex_lock(&wg->device_update_lock);
103                 list_for_each_entry(peer, &wg->peer_list, peer_list)
104                         wg_noise_expire_current_peer_keypairs(peer);
105                 mutex_unlock(&wg->device_update_lock);
106         }
107         rtnl_unlock();
108         return 0;
109 }
110
111 static struct notifier_block vm_notifier = { .notifier_call = wg_vm_notification };
112
113 static int wg_stop(struct net_device *dev)
114 {
115         struct wg_device *wg = netdev_priv(dev);
116         struct wg_peer *peer;
117         struct sk_buff *skb;
118
119         mutex_lock(&wg->device_update_lock);
120         list_for_each_entry(peer, &wg->peer_list, peer_list) {
121                 wg_packet_purge_staged_packets(peer);
122                 wg_timers_stop(peer);
123                 wg_noise_handshake_clear(&peer->handshake);
124                 wg_noise_keypairs_clear(&peer->keypairs);
125                 wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
126         }
127         mutex_unlock(&wg->device_update_lock);
128         while ((skb = ptr_ring_consume(&wg->handshake_queue.ring)) != NULL)
129                 kfree_skb(skb);
130         atomic_set(&wg->handshake_queue_len, 0);
131         wg_socket_reinit(wg, NULL, NULL);
132         return 0;
133 }
134
135 static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
136 {
137         struct wg_device *wg = netdev_priv(dev);
138         struct sk_buff_head packets;
139         struct wg_peer *peer;
140         struct sk_buff *next;
141         sa_family_t family;
142         u32 mtu;
143         int ret;
144
145         if (unlikely(!wg_check_packet_protocol(skb))) {
146                 ret = -EPROTONOSUPPORT;
147                 net_dbg_ratelimited("%s: Invalid IP packet\n", dev->name);
148                 goto err;
149         }
150
151         peer = wg_allowedips_lookup_dst(&wg->peer_allowedips, skb);
152         if (unlikely(!peer)) {
153                 ret = -ENOKEY;
154                 if (skb->protocol == htons(ETH_P_IP))
155                         net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI4\n",
156                                             dev->name, &ip_hdr(skb)->daddr);
157                 else if (skb->protocol == htons(ETH_P_IPV6))
158                         net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n",
159                                             dev->name, &ipv6_hdr(skb)->daddr);
160                 goto err_icmp;
161         }
162
163         family = READ_ONCE(peer->endpoint.addr.sa_family);
164         if (unlikely(family != AF_INET && family != AF_INET6)) {
165                 ret = -EDESTADDRREQ;
166                 net_dbg_ratelimited("%s: No valid endpoint has been configured or discovered for peer %llu\n",
167                                     dev->name, peer->internal_id);
168                 goto err_peer;
169         }
170
171         mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
172
173         __skb_queue_head_init(&packets);
174         if (!skb_is_gso(skb)) {
175                 skb_mark_not_on_list(skb);
176         } else {
177                 struct sk_buff *segs = skb_gso_segment(skb, 0);
178
179                 if (IS_ERR(segs)) {
180                         ret = PTR_ERR(segs);
181                         goto err_peer;
182                 }
183                 dev_kfree_skb(skb);
184                 skb = segs;
185         }
186
187         skb_list_walk_safe(skb, skb, next) {
188                 skb_mark_not_on_list(skb);
189
190                 skb = skb_share_check(skb, GFP_ATOMIC);
191                 if (unlikely(!skb))
192                         continue;
193
194                 /* We only need to keep the original dst around for icmp,
195                  * so at this point we're in a position to drop it.
196                  */
197                 skb_dst_drop(skb);
198
199                 PACKET_CB(skb)->mtu = mtu;
200
201                 __skb_queue_tail(&packets, skb);
202         }
203
204         spin_lock_bh(&peer->staged_packet_queue.lock);
205         /* If the queue is getting too big, we start removing the oldest packets
206          * until it's small again. We do this before adding the new packet, so
207          * we don't remove GSO segments that are in excess.
208          */
209         while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) {
210                 dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue));
211                 ++dev->stats.tx_dropped;
212         }
213         skb_queue_splice_tail(&packets, &peer->staged_packet_queue);
214         spin_unlock_bh(&peer->staged_packet_queue.lock);
215
216         wg_packet_send_staged_packets(peer);
217
218         wg_peer_put(peer);
219         return NETDEV_TX_OK;
220
221 err_peer:
222         wg_peer_put(peer);
223 err_icmp:
224         if (skb->protocol == htons(ETH_P_IP))
225                 icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
226         else if (skb->protocol == htons(ETH_P_IPV6))
227                 icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
228 err:
229         ++dev->stats.tx_errors;
230         kfree_skb(skb);
231         return ret;
232 }
233
234 static const struct net_device_ops netdev_ops = {
235         .ndo_open               = wg_open,
236         .ndo_stop               = wg_stop,
237         .ndo_start_xmit         = wg_xmit,
238         .ndo_get_stats64        = dev_get_tstats64
239 };
240
241 static void wg_destruct(struct net_device *dev)
242 {
243         struct wg_device *wg = netdev_priv(dev);
244
245         rtnl_lock();
246         list_del(&wg->device_list);
247         rtnl_unlock();
248         mutex_lock(&wg->device_update_lock);
249         rcu_assign_pointer(wg->creating_net, NULL);
250         wg->incoming_port = 0;
251         wg_socket_reinit(wg, NULL, NULL);
252         /* The final references are cleared in the below calls to destroy_workqueue. */
253         wg_peer_remove_all(wg);
254         destroy_workqueue(wg->handshake_receive_wq);
255         destroy_workqueue(wg->handshake_send_wq);
256         destroy_workqueue(wg->packet_crypt_wq);
257         wg_packet_queue_free(&wg->handshake_queue, true);
258         wg_packet_queue_free(&wg->decrypt_queue, false);
259         wg_packet_queue_free(&wg->encrypt_queue, false);
260         rcu_barrier(); /* Wait for all the peers to be actually freed. */
261         wg_ratelimiter_uninit();
262         memzero_explicit(&wg->static_identity, sizeof(wg->static_identity));
263         free_percpu(dev->tstats);
264         kvfree(wg->index_hashtable);
265         kvfree(wg->peer_hashtable);
266         mutex_unlock(&wg->device_update_lock);
267
268         pr_debug("%s: Interface destroyed\n", dev->name);
269         free_netdev(dev);
270 }
271
272 static const struct device_type device_type = { .name = KBUILD_MODNAME };
273
274 static void wg_setup(struct net_device *dev)
275 {
276         struct wg_device *wg = netdev_priv(dev);
277         enum { WG_NETDEV_FEATURES = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
278                                     NETIF_F_SG | NETIF_F_GSO |
279                                     NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA };
280         const int overhead = MESSAGE_MINIMUM_LENGTH + sizeof(struct udphdr) +
281                              max(sizeof(struct ipv6hdr), sizeof(struct iphdr));
282
283         dev->netdev_ops = &netdev_ops;
284         dev->header_ops = &ip_tunnel_header_ops;
285         dev->hard_header_len = 0;
286         dev->addr_len = 0;
287         dev->needed_headroom = DATA_PACKET_HEAD_ROOM;
288         dev->needed_tailroom = noise_encrypted_len(MESSAGE_PADDING_MULTIPLE);
289         dev->type = ARPHRD_NONE;
290         dev->flags = IFF_POINTOPOINT | IFF_NOARP;
291         dev->priv_flags |= IFF_NO_QUEUE;
292         dev->features |= NETIF_F_LLTX;
293         dev->features |= WG_NETDEV_FEATURES;
294         dev->hw_features |= WG_NETDEV_FEATURES;
295         dev->hw_enc_features |= WG_NETDEV_FEATURES;
296         dev->mtu = ETH_DATA_LEN - overhead;
297         dev->max_mtu = round_down(INT_MAX, MESSAGE_PADDING_MULTIPLE) - overhead;
298
299         SET_NETDEV_DEVTYPE(dev, &device_type);
300
301         /* We need to keep the dst around in case of icmp replies. */
302         netif_keep_dst(dev);
303
304         memset(wg, 0, sizeof(*wg));
305         wg->dev = dev;
306 }
307
308 static int wg_newlink(struct net *src_net, struct net_device *dev,
309                       struct nlattr *tb[], struct nlattr *data[],
310                       struct netlink_ext_ack *extack)
311 {
312         struct wg_device *wg = netdev_priv(dev);
313         int ret = -ENOMEM;
314
315         rcu_assign_pointer(wg->creating_net, src_net);
316         init_rwsem(&wg->static_identity.lock);
317         mutex_init(&wg->socket_update_lock);
318         mutex_init(&wg->device_update_lock);
319         wg_allowedips_init(&wg->peer_allowedips);
320         wg_cookie_checker_init(&wg->cookie_checker, wg);
321         INIT_LIST_HEAD(&wg->peer_list);
322         wg->device_update_gen = 1;
323
324         wg->peer_hashtable = wg_pubkey_hashtable_alloc();
325         if (!wg->peer_hashtable)
326                 return ret;
327
328         wg->index_hashtable = wg_index_hashtable_alloc();
329         if (!wg->index_hashtable)
330                 goto err_free_peer_hashtable;
331
332         dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
333         if (!dev->tstats)
334                 goto err_free_index_hashtable;
335
336         wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s",
337                         WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name);
338         if (!wg->handshake_receive_wq)
339                 goto err_free_tstats;
340
341         wg->handshake_send_wq = alloc_workqueue("wg-kex-%s",
342                         WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name);
343         if (!wg->handshake_send_wq)
344                 goto err_destroy_handshake_receive;
345
346         wg->packet_crypt_wq = alloc_workqueue("wg-crypt-%s",
347                         WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0, dev->name);
348         if (!wg->packet_crypt_wq)
349                 goto err_destroy_handshake_send;
350
351         ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker,
352                                    MAX_QUEUED_PACKETS);
353         if (ret < 0)
354                 goto err_destroy_packet_crypt;
355
356         ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker,
357                                    MAX_QUEUED_PACKETS);
358         if (ret < 0)
359                 goto err_free_encrypt_queue;
360
361         ret = wg_packet_queue_init(&wg->handshake_queue, wg_packet_handshake_receive_worker,
362                                    MAX_QUEUED_INCOMING_HANDSHAKES);
363         if (ret < 0)
364                 goto err_free_decrypt_queue;
365
366         ret = wg_ratelimiter_init();
367         if (ret < 0)
368                 goto err_free_handshake_queue;
369
370         ret = register_netdevice(dev);
371         if (ret < 0)
372                 goto err_uninit_ratelimiter;
373
374         list_add(&wg->device_list, &device_list);
375
376         /* We wait until the end to assign priv_destructor, so that
377          * register_netdevice doesn't call it for us if it fails.
378          */
379         dev->priv_destructor = wg_destruct;
380
381         pr_debug("%s: Interface created\n", dev->name);
382         return ret;
383
384 err_uninit_ratelimiter:
385         wg_ratelimiter_uninit();
386 err_free_handshake_queue:
387         wg_packet_queue_free(&wg->handshake_queue, false);
388 err_free_decrypt_queue:
389         wg_packet_queue_free(&wg->decrypt_queue, false);
390 err_free_encrypt_queue:
391         wg_packet_queue_free(&wg->encrypt_queue, false);
392 err_destroy_packet_crypt:
393         destroy_workqueue(wg->packet_crypt_wq);
394 err_destroy_handshake_send:
395         destroy_workqueue(wg->handshake_send_wq);
396 err_destroy_handshake_receive:
397         destroy_workqueue(wg->handshake_receive_wq);
398 err_free_tstats:
399         free_percpu(dev->tstats);
400 err_free_index_hashtable:
401         kvfree(wg->index_hashtable);
402 err_free_peer_hashtable:
403         kvfree(wg->peer_hashtable);
404         return ret;
405 }
406
407 static struct rtnl_link_ops link_ops __read_mostly = {
408         .kind                   = KBUILD_MODNAME,
409         .priv_size              = sizeof(struct wg_device),
410         .setup                  = wg_setup,
411         .newlink                = wg_newlink,
412 };
413
414 static void wg_netns_pre_exit(struct net *net)
415 {
416         struct wg_device *wg;
417         struct wg_peer *peer;
418
419         rtnl_lock();
420         list_for_each_entry(wg, &device_list, device_list) {
421                 if (rcu_access_pointer(wg->creating_net) == net) {
422                         pr_debug("%s: Creating namespace exiting\n", wg->dev->name);
423                         netif_carrier_off(wg->dev);
424                         mutex_lock(&wg->device_update_lock);
425                         rcu_assign_pointer(wg->creating_net, NULL);
426                         wg_socket_reinit(wg, NULL, NULL);
427                         list_for_each_entry(peer, &wg->peer_list, peer_list)
428                                 wg_socket_clear_peer_endpoint_src(peer);
429                         mutex_unlock(&wg->device_update_lock);
430                 }
431         }
432         rtnl_unlock();
433 }
434
435 static struct pernet_operations pernet_ops = {
436         .pre_exit = wg_netns_pre_exit
437 };
438
439 int __init wg_device_init(void)
440 {
441         int ret;
442
443         ret = register_pm_notifier(&pm_notifier);
444         if (ret)
445                 return ret;
446
447         ret = register_random_vmfork_notifier(&vm_notifier);
448         if (ret)
449                 goto error_pm;
450
451         ret = register_pernet_device(&pernet_ops);
452         if (ret)
453                 goto error_vm;
454
455         ret = rtnl_link_register(&link_ops);
456         if (ret)
457                 goto error_pernet;
458
459         return 0;
460
461 error_pernet:
462         unregister_pernet_device(&pernet_ops);
463 error_vm:
464         unregister_random_vmfork_notifier(&vm_notifier);
465 error_pm:
466         unregister_pm_notifier(&pm_notifier);
467         return ret;
468 }
469
470 void wg_device_uninit(void)
471 {
472         rtnl_link_unregister(&link_ops);
473         unregister_pernet_device(&pernet_ops);
474         unregister_random_vmfork_notifier(&vm_notifier);
475         unregister_pm_notifier(&pm_notifier);
476         rcu_barrier();
477 }