1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * NETLINK Kernel-user communication protocol.
5 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 * Patrick McHardy <kaber@trash.net>
9 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
10 * added netlink_proto_exit
11 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
12 * use nlk_sk, as sk->protinfo is on a diet 8)
13 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
14 * - inc module use count of module that owns
15 * the kernel socket in case userspace opens
16 * socket of same protocol
17 * - remove all module support, since netlink is
18 * mandatory if CONFIG_NET=y these days
21 #include <linux/module.h>
23 #include <linux/capability.h>
24 #include <linux/kernel.h>
25 #include <linux/init.h>
26 #include <linux/signal.h>
27 #include <linux/sched.h>
28 #include <linux/errno.h>
29 #include <linux/string.h>
30 #include <linux/stat.h>
31 #include <linux/socket.h>
33 #include <linux/fcntl.h>
34 #include <linux/termios.h>
35 #include <linux/sockios.h>
36 #include <linux/net.h>
38 #include <linux/slab.h>
39 #include <linux/uaccess.h>
40 #include <linux/skbuff.h>
41 #include <linux/netdevice.h>
42 #include <linux/rtnetlink.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/notifier.h>
46 #include <linux/security.h>
47 #include <linux/jhash.h>
48 #include <linux/jiffies.h>
49 #include <linux/random.h>
50 #include <linux/bitops.h>
52 #include <linux/types.h>
53 #include <linux/audit.h>
54 #include <linux/mutex.h>
55 #include <linux/vmalloc.h>
56 #include <linux/if_arp.h>
57 #include <linux/rhashtable.h>
58 #include <asm/cacheflush.h>
59 #include <linux/hash.h>
60 #include <linux/genetlink.h>
61 #include <linux/net_namespace.h>
62 #include <linux/nospec.h>
64 #include <net/net_namespace.h>
65 #include <net/netns/generic.h>
68 #include <net/netlink.h>
70 #include "af_netlink.h"
74 unsigned long masks[0];
78 #define NETLINK_S_CONGESTED 0x0
80 static inline int netlink_is_kernel(struct sock *sk)
82 return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET;
85 struct netlink_table *nl_table __read_mostly;
86 EXPORT_SYMBOL_GPL(nl_table);
88 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
90 static struct lock_class_key nlk_cb_mutex_keys[MAX_LINKS];
92 static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
95 "nlk_cb_mutex-USERSOCK",
96 "nlk_cb_mutex-FIREWALL",
97 "nlk_cb_mutex-SOCK_DIAG",
100 "nlk_cb_mutex-SELINUX",
101 "nlk_cb_mutex-ISCSI",
102 "nlk_cb_mutex-AUDIT",
103 "nlk_cb_mutex-FIB_LOOKUP",
104 "nlk_cb_mutex-CONNECTOR",
105 "nlk_cb_mutex-NETFILTER",
106 "nlk_cb_mutex-IP6_FW",
107 "nlk_cb_mutex-DNRTMSG",
108 "nlk_cb_mutex-KOBJECT_UEVENT",
109 "nlk_cb_mutex-GENERIC",
111 "nlk_cb_mutex-SCSITRANSPORT",
112 "nlk_cb_mutex-ECRYPTFS",
114 "nlk_cb_mutex-CRYPTO",
125 "nlk_cb_mutex-MAX_LINKS"
128 static int netlink_dump(struct sock *sk);
130 /* nl_table locking explained:
131 * Lookup and traversal are protected with an RCU read-side lock. Insertion
132 * and removal are protected with per bucket lock while using RCU list
133 * modification primitives and may run in parallel to RCU protected lookups.
134 * Destruction of the Netlink socket may only occur *after* nl_table_lock has
135 * been acquired * either during or after the socket has been removed from
136 * the list and after an RCU grace period.
138 DEFINE_RWLOCK(nl_table_lock);
139 EXPORT_SYMBOL_GPL(nl_table_lock);
140 static atomic_t nl_table_users = ATOMIC_INIT(0);
142 #define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
144 static BLOCKING_NOTIFIER_HEAD(netlink_chain);
147 static const struct rhashtable_params netlink_rhashtable_params;
149 static inline u32 netlink_group_mask(u32 group)
153 return group ? 1 << (group - 1) : 0;
156 static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
159 unsigned int len = skb_end_offset(skb);
162 new = alloc_skb(len, gfp_mask);
166 NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
167 NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
168 NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
170 skb_put_data(new, skb->data, len);
174 static unsigned int netlink_tap_net_id;
176 struct netlink_tap_net {
177 struct list_head netlink_tap_all;
178 struct mutex netlink_tap_lock;
181 int netlink_add_tap(struct netlink_tap *nt)
183 struct net *net = dev_net(nt->dev);
184 struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
186 if (unlikely(nt->dev->type != ARPHRD_NETLINK))
189 mutex_lock(&nn->netlink_tap_lock);
190 list_add_rcu(&nt->list, &nn->netlink_tap_all);
191 mutex_unlock(&nn->netlink_tap_lock);
193 __module_get(nt->module);
197 EXPORT_SYMBOL_GPL(netlink_add_tap);
199 static int __netlink_remove_tap(struct netlink_tap *nt)
201 struct net *net = dev_net(nt->dev);
202 struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
204 struct netlink_tap *tmp;
206 mutex_lock(&nn->netlink_tap_lock);
208 list_for_each_entry(tmp, &nn->netlink_tap_all, list) {
210 list_del_rcu(&nt->list);
216 pr_warn("__netlink_remove_tap: %p not found\n", nt);
218 mutex_unlock(&nn->netlink_tap_lock);
221 module_put(nt->module);
223 return found ? 0 : -ENODEV;
226 int netlink_remove_tap(struct netlink_tap *nt)
230 ret = __netlink_remove_tap(nt);
235 EXPORT_SYMBOL_GPL(netlink_remove_tap);
237 static __net_init int netlink_tap_init_net(struct net *net)
239 struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
241 INIT_LIST_HEAD(&nn->netlink_tap_all);
242 mutex_init(&nn->netlink_tap_lock);
246 static struct pernet_operations netlink_tap_net_ops = {
247 .init = netlink_tap_init_net,
248 .id = &netlink_tap_net_id,
249 .size = sizeof(struct netlink_tap_net),
252 static bool netlink_filter_tap(const struct sk_buff *skb)
254 struct sock *sk = skb->sk;
256 /* We take the more conservative approach and
257 * whitelist socket protocols that may pass.
259 switch (sk->sk_protocol) {
261 case NETLINK_USERSOCK:
262 case NETLINK_SOCK_DIAG:
265 case NETLINK_FIB_LOOKUP:
266 case NETLINK_NETFILTER:
267 case NETLINK_GENERIC:
274 static int __netlink_deliver_tap_skb(struct sk_buff *skb,
275 struct net_device *dev)
277 struct sk_buff *nskb;
278 struct sock *sk = skb->sk;
281 if (!net_eq(dev_net(dev), sock_net(sk)))
286 if (is_vmalloc_addr(skb->head))
287 nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
289 nskb = skb_clone(skb, GFP_ATOMIC);
292 nskb->protocol = htons((u16) sk->sk_protocol);
293 nskb->pkt_type = netlink_is_kernel(sk) ?
294 PACKET_KERNEL : PACKET_USER;
295 skb_reset_network_header(nskb);
296 ret = dev_queue_xmit(nskb);
297 if (unlikely(ret > 0))
298 ret = net_xmit_errno(ret);
305 static void __netlink_deliver_tap(struct sk_buff *skb, struct netlink_tap_net *nn)
308 struct netlink_tap *tmp;
310 if (!netlink_filter_tap(skb))
313 list_for_each_entry_rcu(tmp, &nn->netlink_tap_all, list) {
314 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
320 static void netlink_deliver_tap(struct net *net, struct sk_buff *skb)
322 struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
326 if (unlikely(!list_empty(&nn->netlink_tap_all)))
327 __netlink_deliver_tap(skb, nn);
332 static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
335 if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
336 netlink_deliver_tap(sock_net(dst), skb);
339 static void netlink_overrun(struct sock *sk)
341 struct netlink_sock *nlk = nlk_sk(sk);
343 if (!(nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)) {
344 if (!test_and_set_bit(NETLINK_S_CONGESTED,
345 &nlk_sk(sk)->state)) {
346 sk->sk_err = ENOBUFS;
347 sk->sk_error_report(sk);
350 atomic_inc(&sk->sk_drops);
353 static void netlink_rcv_wake(struct sock *sk)
355 struct netlink_sock *nlk = nlk_sk(sk);
357 if (skb_queue_empty(&sk->sk_receive_queue))
358 clear_bit(NETLINK_S_CONGESTED, &nlk->state);
359 if (!test_bit(NETLINK_S_CONGESTED, &nlk->state))
360 wake_up_interruptible(&nlk->wait);
363 static void netlink_skb_destructor(struct sk_buff *skb)
365 if (is_vmalloc_addr(skb->head)) {
367 !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
376 static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
378 WARN_ON(skb->sk != NULL);
380 skb->destructor = netlink_skb_destructor;
381 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
382 sk_mem_charge(sk, skb->truesize);
385 static void netlink_sock_destruct(struct sock *sk)
387 struct netlink_sock *nlk = nlk_sk(sk);
389 if (nlk->cb_running) {
391 nlk->cb.done(&nlk->cb);
392 module_put(nlk->cb.module);
393 kfree_skb(nlk->cb.skb);
396 skb_queue_purge(&sk->sk_receive_queue);
398 if (!sock_flag(sk, SOCK_DEAD)) {
399 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
403 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
404 WARN_ON(refcount_read(&sk->sk_wmem_alloc));
405 WARN_ON(nlk_sk(sk)->groups);
408 static void netlink_sock_destruct_work(struct work_struct *work)
410 struct netlink_sock *nlk = container_of(work, struct netlink_sock,
416 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
417 * SMP. Look, when several writers sleep and reader wakes them up, all but one
418 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
419 * this, _but_ remember, it adds useless work on UP machines.
422 void netlink_table_grab(void)
423 __acquires(nl_table_lock)
427 write_lock_irq(&nl_table_lock);
429 if (atomic_read(&nl_table_users)) {
430 DECLARE_WAITQUEUE(wait, current);
432 add_wait_queue_exclusive(&nl_table_wait, &wait);
434 set_current_state(TASK_UNINTERRUPTIBLE);
435 if (atomic_read(&nl_table_users) == 0)
437 write_unlock_irq(&nl_table_lock);
439 write_lock_irq(&nl_table_lock);
442 __set_current_state(TASK_RUNNING);
443 remove_wait_queue(&nl_table_wait, &wait);
447 void netlink_table_ungrab(void)
448 __releases(nl_table_lock)
450 write_unlock_irq(&nl_table_lock);
451 wake_up(&nl_table_wait);
455 netlink_lock_table(void)
459 /* read_lock() synchronizes us to netlink_table_grab */
461 read_lock_irqsave(&nl_table_lock, flags);
462 atomic_inc(&nl_table_users);
463 read_unlock_irqrestore(&nl_table_lock, flags);
467 netlink_unlock_table(void)
469 if (atomic_dec_and_test(&nl_table_users))
470 wake_up(&nl_table_wait);
473 struct netlink_compare_arg
479 /* Doing sizeof directly may yield 4 extra bytes on 64-bit. */
480 #define netlink_compare_arg_len \
481 (offsetof(struct netlink_compare_arg, portid) + sizeof(u32))
483 static inline int netlink_compare(struct rhashtable_compare_arg *arg,
486 const struct netlink_compare_arg *x = arg->key;
487 const struct netlink_sock *nlk = ptr;
489 return nlk->portid != x->portid ||
490 !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet));
493 static void netlink_compare_arg_init(struct netlink_compare_arg *arg,
494 struct net *net, u32 portid)
496 memset(arg, 0, sizeof(*arg));
497 write_pnet(&arg->pnet, net);
498 arg->portid = portid;
501 static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
504 struct netlink_compare_arg arg;
506 netlink_compare_arg_init(&arg, net, portid);
507 return rhashtable_lookup_fast(&table->hash, &arg,
508 netlink_rhashtable_params);
511 static int __netlink_insert(struct netlink_table *table, struct sock *sk)
513 struct netlink_compare_arg arg;
515 netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid);
516 return rhashtable_lookup_insert_key(&table->hash, &arg,
518 netlink_rhashtable_params);
521 static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
523 struct netlink_table *table = &nl_table[protocol];
527 sk = __netlink_lookup(table, portid, net);
535 static const struct proto_ops netlink_ops;
538 netlink_update_listeners(struct sock *sk)
540 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
543 struct listeners *listeners;
545 listeners = nl_deref_protected(tbl->listeners);
549 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
551 sk_for_each_bound(sk, &tbl->mc_list) {
552 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
553 mask |= nlk_sk(sk)->groups[i];
555 listeners->masks[i] = mask;
557 /* this function is only called with the netlink table "grabbed", which
558 * makes sure updates are visible before bind or setsockopt return. */
561 static int netlink_insert(struct sock *sk, u32 portid)
563 struct netlink_table *table = &nl_table[sk->sk_protocol];
568 err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY;
569 if (nlk_sk(sk)->bound)
572 /* portid can be read locklessly from netlink_getname(). */
573 WRITE_ONCE(nlk_sk(sk)->portid, portid);
577 err = __netlink_insert(table, sk);
579 /* In case the hashtable backend returns with -EBUSY
580 * from here, it must not escape to the caller.
582 if (unlikely(err == -EBUSY))
590 /* We need to ensure that the socket is hashed and visible. */
592 /* Paired with lockless reads from netlink_bind(),
593 * netlink_connect() and netlink_sendmsg().
595 WRITE_ONCE(nlk_sk(sk)->bound, portid);
602 static void netlink_remove(struct sock *sk)
604 struct netlink_table *table;
606 table = &nl_table[sk->sk_protocol];
607 if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
608 netlink_rhashtable_params)) {
609 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
613 netlink_table_grab();
614 if (nlk_sk(sk)->subscriptions) {
615 __sk_del_bind_node(sk);
616 netlink_update_listeners(sk);
618 if (sk->sk_protocol == NETLINK_GENERIC)
619 atomic_inc(&genl_sk_destructing_cnt);
620 netlink_table_ungrab();
623 static struct proto netlink_proto = {
625 .owner = THIS_MODULE,
626 .obj_size = sizeof(struct netlink_sock),
629 static int __netlink_create(struct net *net, struct socket *sock,
630 struct mutex *cb_mutex, int protocol,
634 struct netlink_sock *nlk;
636 sock->ops = &netlink_ops;
638 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, kern);
642 sock_init_data(sock, sk);
646 nlk->cb_mutex = cb_mutex;
648 nlk->cb_mutex = &nlk->cb_def_mutex;
649 mutex_init(nlk->cb_mutex);
650 lockdep_set_class_and_name(nlk->cb_mutex,
651 nlk_cb_mutex_keys + protocol,
652 nlk_cb_mutex_key_strings[protocol]);
654 init_waitqueue_head(&nlk->wait);
656 sk->sk_destruct = netlink_sock_destruct;
657 sk->sk_protocol = protocol;
661 static int netlink_create(struct net *net, struct socket *sock, int protocol,
664 struct module *module = NULL;
665 struct mutex *cb_mutex;
666 struct netlink_sock *nlk;
667 int (*bind)(struct net *net, int group);
668 void (*unbind)(struct net *net, int group);
671 sock->state = SS_UNCONNECTED;
673 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
674 return -ESOCKTNOSUPPORT;
676 if (protocol < 0 || protocol >= MAX_LINKS)
677 return -EPROTONOSUPPORT;
678 protocol = array_index_nospec(protocol, MAX_LINKS);
680 netlink_lock_table();
681 #ifdef CONFIG_MODULES
682 if (!nl_table[protocol].registered) {
683 netlink_unlock_table();
684 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
685 netlink_lock_table();
688 if (nl_table[protocol].registered &&
689 try_module_get(nl_table[protocol].module))
690 module = nl_table[protocol].module;
692 err = -EPROTONOSUPPORT;
693 cb_mutex = nl_table[protocol].cb_mutex;
694 bind = nl_table[protocol].bind;
695 unbind = nl_table[protocol].unbind;
696 netlink_unlock_table();
701 err = __netlink_create(net, sock, cb_mutex, protocol, kern);
706 sock_prot_inuse_add(net, &netlink_proto, 1);
709 nlk = nlk_sk(sock->sk);
710 nlk->module = module;
711 nlk->netlink_bind = bind;
712 nlk->netlink_unbind = unbind;
721 static void deferred_put_nlk_sk(struct rcu_head *head)
723 struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
724 struct sock *sk = &nlk->sk;
729 if (!refcount_dec_and_test(&sk->sk_refcnt))
732 if (nlk->cb_running && nlk->cb.done) {
733 INIT_WORK(&nlk->work, netlink_sock_destruct_work);
734 schedule_work(&nlk->work);
741 static int netlink_release(struct socket *sock)
743 struct sock *sk = sock->sk;
744 struct netlink_sock *nlk;
754 * OK. Socket is unlinked, any packets that arrive now
758 /* must not acquire netlink_table_lock in any way again before unbind
759 * and notifying genetlink is done as otherwise it might deadlock
761 if (nlk->netlink_unbind) {
764 for (i = 0; i < nlk->ngroups; i++)
765 if (test_bit(i, nlk->groups))
766 nlk->netlink_unbind(sock_net(sk), i + 1);
768 if (sk->sk_protocol == NETLINK_GENERIC &&
769 atomic_dec_return(&genl_sk_destructing_cnt) == 0)
770 wake_up(&genl_sk_destructing_waitq);
773 wake_up_interruptible_all(&nlk->wait);
775 skb_queue_purge(&sk->sk_write_queue);
777 if (nlk->portid && nlk->bound) {
778 struct netlink_notify n = {
780 .protocol = sk->sk_protocol,
781 .portid = nlk->portid,
783 blocking_notifier_call_chain(&netlink_chain,
784 NETLINK_URELEASE, &n);
787 module_put(nlk->module);
789 if (netlink_is_kernel(sk)) {
790 netlink_table_grab();
791 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
792 if (--nl_table[sk->sk_protocol].registered == 0) {
793 struct listeners *old;
795 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
796 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
798 nl_table[sk->sk_protocol].module = NULL;
799 nl_table[sk->sk_protocol].bind = NULL;
800 nl_table[sk->sk_protocol].unbind = NULL;
801 nl_table[sk->sk_protocol].flags = 0;
802 nl_table[sk->sk_protocol].registered = 0;
804 netlink_table_ungrab();
808 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
810 call_rcu(&nlk->rcu, deferred_put_nlk_sk);
814 static int netlink_autobind(struct socket *sock)
816 struct sock *sk = sock->sk;
817 struct net *net = sock_net(sk);
818 struct netlink_table *table = &nl_table[sk->sk_protocol];
819 s32 portid = task_tgid_vnr(current);
827 ok = !__netlink_lookup(table, portid, net);
830 /* Bind collision, search negative portid values. */
832 /* rover will be in range [S32_MIN, -4097] */
833 rover = S32_MIN + prandom_u32_max(-4096 - S32_MIN);
834 else if (rover >= -4096)
840 err = netlink_insert(sk, portid);
841 if (err == -EADDRINUSE)
844 /* If 2 threads race to autobind, that is fine. */
852 * __netlink_ns_capable - General netlink message capability test
853 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
854 * @user_ns: The user namespace of the capability to use
855 * @cap: The capability to use
857 * Test to see if the opener of the socket we received the message
858 * from had when the netlink socket was created and the sender of the
859 * message has has the capability @cap in the user namespace @user_ns.
861 bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
862 struct user_namespace *user_ns, int cap)
864 return ((nsp->flags & NETLINK_SKB_DST) ||
865 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
866 ns_capable(user_ns, cap);
868 EXPORT_SYMBOL(__netlink_ns_capable);
871 * netlink_ns_capable - General netlink message capability test
872 * @skb: socket buffer holding a netlink command from userspace
873 * @user_ns: The user namespace of the capability to use
874 * @cap: The capability to use
876 * Test to see if the opener of the socket we received the message
877 * from had when the netlink socket was created and the sender of the
878 * message has has the capability @cap in the user namespace @user_ns.
880 bool netlink_ns_capable(const struct sk_buff *skb,
881 struct user_namespace *user_ns, int cap)
883 return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
885 EXPORT_SYMBOL(netlink_ns_capable);
888 * netlink_capable - Netlink global message capability test
889 * @skb: socket buffer holding a netlink command from userspace
890 * @cap: The capability to use
892 * Test to see if the opener of the socket we received the message
893 * from had when the netlink socket was created and the sender of the
894 * message has has the capability @cap in all user namespaces.
896 bool netlink_capable(const struct sk_buff *skb, int cap)
898 return netlink_ns_capable(skb, &init_user_ns, cap);
900 EXPORT_SYMBOL(netlink_capable);
903 * netlink_net_capable - Netlink network namespace message capability test
904 * @skb: socket buffer holding a netlink command from userspace
905 * @cap: The capability to use
907 * Test to see if the opener of the socket we received the message
908 * from had when the netlink socket was created and the sender of the
909 * message has has the capability @cap over the network namespace of
910 * the socket we received the message from.
912 bool netlink_net_capable(const struct sk_buff *skb, int cap)
914 return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
916 EXPORT_SYMBOL(netlink_net_capable);
918 static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
920 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
921 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
925 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
927 struct netlink_sock *nlk = nlk_sk(sk);
929 if (nlk->subscriptions && !subscriptions)
930 __sk_del_bind_node(sk);
931 else if (!nlk->subscriptions && subscriptions)
932 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
933 nlk->subscriptions = subscriptions;
936 static int netlink_realloc_groups(struct sock *sk)
938 struct netlink_sock *nlk = nlk_sk(sk);
940 unsigned long *new_groups;
943 netlink_table_grab();
945 groups = nl_table[sk->sk_protocol].groups;
946 if (!nl_table[sk->sk_protocol].registered) {
951 if (nlk->ngroups >= groups)
954 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
955 if (new_groups == NULL) {
959 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
960 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
962 nlk->groups = new_groups;
963 nlk->ngroups = groups;
965 netlink_table_ungrab();
969 static void netlink_undo_bind(int group, long unsigned int groups,
972 struct netlink_sock *nlk = nlk_sk(sk);
975 if (!nlk->netlink_unbind)
978 for (undo = 0; undo < group; undo++)
979 if (test_bit(undo, &groups))
980 nlk->netlink_unbind(sock_net(sk), undo + 1);
983 static int netlink_bind(struct socket *sock, struct sockaddr *addr,
986 struct sock *sk = sock->sk;
987 struct net *net = sock_net(sk);
988 struct netlink_sock *nlk = nlk_sk(sk);
989 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
991 unsigned long groups;
994 if (addr_len < sizeof(struct sockaddr_nl))
997 if (nladdr->nl_family != AF_NETLINK)
999 groups = nladdr->nl_groups;
1001 /* Only superuser is allowed to listen multicasts */
1003 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
1005 err = netlink_realloc_groups(sk);
1010 if (nlk->ngroups < BITS_PER_LONG)
1011 groups &= (1UL << nlk->ngroups) - 1;
1013 /* Paired with WRITE_ONCE() in netlink_insert() */
1014 bound = READ_ONCE(nlk->bound);
1016 /* Ensure nlk->portid is up-to-date. */
1019 if (nladdr->nl_pid != nlk->portid)
1023 netlink_lock_table();
1024 if (nlk->netlink_bind && groups) {
1027 /* nl_groups is a u32, so cap the maximum groups we can bind */
1028 for (group = 0; group < BITS_PER_TYPE(u32); group++) {
1029 if (!test_bit(group, &groups))
1031 err = nlk->netlink_bind(net, group + 1);
1034 netlink_undo_bind(group, groups, sk);
1039 /* No need for barriers here as we return to user-space without
1040 * using any of the bound attributes.
1043 err = nladdr->nl_pid ?
1044 netlink_insert(sk, nladdr->nl_pid) :
1045 netlink_autobind(sock);
1047 netlink_undo_bind(BITS_PER_TYPE(u32), groups, sk);
1052 if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1054 netlink_unlock_table();
1056 netlink_table_grab();
1057 netlink_update_subscriptions(sk, nlk->subscriptions +
1059 hweight32(nlk->groups[0]));
1060 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
1061 netlink_update_listeners(sk);
1062 netlink_table_ungrab();
1067 netlink_unlock_table();
1071 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1072 int alen, int flags)
1075 struct sock *sk = sock->sk;
1076 struct netlink_sock *nlk = nlk_sk(sk);
1077 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1079 if (alen < sizeof(addr->sa_family))
1082 if (addr->sa_family == AF_UNSPEC) {
1083 /* paired with READ_ONCE() in netlink_getsockbyportid() */
1084 WRITE_ONCE(sk->sk_state, NETLINK_UNCONNECTED);
1085 /* dst_portid and dst_group can be read locklessly */
1086 WRITE_ONCE(nlk->dst_portid, 0);
1087 WRITE_ONCE(nlk->dst_group, 0);
1090 if (addr->sa_family != AF_NETLINK)
1093 if (alen < sizeof(struct sockaddr_nl))
1096 if ((nladdr->nl_groups || nladdr->nl_pid) &&
1097 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1100 /* No need for barriers here as we return to user-space without
1101 * using any of the bound attributes.
1102 * Paired with WRITE_ONCE() in netlink_insert().
1104 if (!READ_ONCE(nlk->bound))
1105 err = netlink_autobind(sock);
1108 /* paired with READ_ONCE() in netlink_getsockbyportid() */
1109 WRITE_ONCE(sk->sk_state, NETLINK_CONNECTED);
1110 /* dst_portid and dst_group can be read locklessly */
1111 WRITE_ONCE(nlk->dst_portid, nladdr->nl_pid);
1112 WRITE_ONCE(nlk->dst_group, ffs(nladdr->nl_groups));
1118 static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1121 struct sock *sk = sock->sk;
1122 struct netlink_sock *nlk = nlk_sk(sk);
1123 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
1125 nladdr->nl_family = AF_NETLINK;
1129 /* Paired with WRITE_ONCE() in netlink_connect() */
1130 nladdr->nl_pid = READ_ONCE(nlk->dst_portid);
1131 nladdr->nl_groups = netlink_group_mask(READ_ONCE(nlk->dst_group));
1133 /* Paired with WRITE_ONCE() in netlink_insert() */
1134 nladdr->nl_pid = READ_ONCE(nlk->portid);
1135 netlink_lock_table();
1136 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1137 netlink_unlock_table();
1139 return sizeof(*nladdr);
1142 static int netlink_ioctl(struct socket *sock, unsigned int cmd,
1145 /* try to hand this ioctl down to the NIC drivers.
1147 return -ENOIOCTLCMD;
1150 static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
1153 struct netlink_sock *nlk;
1155 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
1157 return ERR_PTR(-ECONNREFUSED);
1159 /* Don't bother queuing skb if kernel socket has no input function */
1161 /* dst_portid and sk_state can be changed in netlink_connect() */
1162 if (READ_ONCE(sock->sk_state) == NETLINK_CONNECTED &&
1163 READ_ONCE(nlk->dst_portid) != nlk_sk(ssk)->portid) {
1165 return ERR_PTR(-ECONNREFUSED);
1170 struct sock *netlink_getsockbyfilp(struct file *filp)
1172 struct inode *inode = file_inode(filp);
1175 if (!S_ISSOCK(inode->i_mode))
1176 return ERR_PTR(-ENOTSOCK);
1178 sock = SOCKET_I(inode)->sk;
1179 if (sock->sk_family != AF_NETLINK)
1180 return ERR_PTR(-EINVAL);
1186 static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1189 struct sk_buff *skb;
1192 if (size <= NLMSG_GOODSIZE || broadcast)
1193 return alloc_skb(size, GFP_KERNEL);
1195 size = SKB_DATA_ALIGN(size) +
1196 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1198 data = vmalloc(size);
1202 skb = __build_skb(data, size);
1206 skb->destructor = netlink_skb_destructor;
1212 * Attach a skb to a netlink socket.
1213 * The caller must hold a reference to the destination socket. On error, the
1214 * reference is dropped. The skb is not send to the destination, just all
1215 * all error checks are performed and memory in the queue is reserved.
1217 * < 0: error. skb freed, reference to sock dropped.
1219 * 1: repeat lookup - reference dropped while waiting for socket memory.
1221 int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
1222 long *timeo, struct sock *ssk)
1224 struct netlink_sock *nlk;
1228 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1229 test_bit(NETLINK_S_CONGESTED, &nlk->state))) {
1230 DECLARE_WAITQUEUE(wait, current);
1232 if (!ssk || netlink_is_kernel(ssk))
1233 netlink_overrun(sk);
1239 __set_current_state(TASK_INTERRUPTIBLE);
1240 add_wait_queue(&nlk->wait, &wait);
1242 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1243 test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
1244 !sock_flag(sk, SOCK_DEAD))
1245 *timeo = schedule_timeout(*timeo);
1247 __set_current_state(TASK_RUNNING);
1248 remove_wait_queue(&nlk->wait, &wait);
1251 if (signal_pending(current)) {
1253 return sock_intr_errno(*timeo);
1257 netlink_skb_set_owner_r(skb, sk);
1261 static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1265 netlink_deliver_tap(sock_net(sk), skb);
1267 skb_queue_tail(&sk->sk_receive_queue, skb);
1268 sk->sk_data_ready(sk);
1272 int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1274 int len = __netlink_sendskb(sk, skb);
1280 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1286 static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
1290 WARN_ON(skb->sk != NULL);
1291 delta = skb->end - skb->tail;
1292 if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
1295 if (skb_shared(skb)) {
1296 struct sk_buff *nskb = skb_clone(skb, allocation);
1303 pskb_expand_head(skb, 0, -delta,
1304 (allocation & ~__GFP_DIRECT_RECLAIM) |
1305 __GFP_NOWARN | __GFP_NORETRY);
1309 static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1313 struct netlink_sock *nlk = nlk_sk(sk);
1315 ret = -ECONNREFUSED;
1316 if (nlk->netlink_rcv != NULL) {
1318 netlink_skb_set_owner_r(skb, sk);
1319 NETLINK_CB(skb).sk = ssk;
1320 netlink_deliver_tap_kernel(sk, ssk, skb);
1321 nlk->netlink_rcv(skb);
1330 int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
1331 u32 portid, int nonblock)
1337 skb = netlink_trim(skb, gfp_any());
1339 timeo = sock_sndtimeo(ssk, nonblock);
1341 sk = netlink_getsockbyportid(ssk, portid);
1346 if (netlink_is_kernel(sk))
1347 return netlink_unicast_kernel(sk, skb, ssk);
1349 if (sk_filter(sk, skb)) {
1356 err = netlink_attachskb(sk, skb, &timeo, ssk);
1362 return netlink_sendskb(sk, skb);
1364 EXPORT_SYMBOL(netlink_unicast);
1366 int netlink_has_listeners(struct sock *sk, unsigned int group)
1369 struct listeners *listeners;
1371 BUG_ON(!netlink_is_kernel(sk));
1374 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1376 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
1377 res = test_bit(group - 1, listeners->masks);
1383 EXPORT_SYMBOL_GPL(netlink_has_listeners);
1385 bool netlink_strict_get_check(struct sk_buff *skb)
1387 const struct netlink_sock *nlk = nlk_sk(NETLINK_CB(skb).sk);
1389 return nlk->flags & NETLINK_F_STRICT_CHK;
1391 EXPORT_SYMBOL_GPL(netlink_strict_get_check);
1393 static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1395 struct netlink_sock *nlk = nlk_sk(sk);
1397 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
1398 !test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
1399 netlink_skb_set_owner_r(skb, sk);
1400 __netlink_sendskb(sk, skb);
1401 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
1406 struct netlink_broadcast_data {
1407 struct sock *exclude_sk;
1412 int delivery_failure;
1416 struct sk_buff *skb, *skb2;
1417 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1421 static void do_one_broadcast(struct sock *sk,
1422 struct netlink_broadcast_data *p)
1424 struct netlink_sock *nlk = nlk_sk(sk);
1427 if (p->exclude_sk == sk)
1430 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1431 !test_bit(p->group - 1, nlk->groups))
1434 if (!net_eq(sock_net(sk), p->net)) {
1435 if (!(nlk->flags & NETLINK_F_LISTEN_ALL_NSID))
1438 if (!peernet_has_id(sock_net(sk), p->net))
1441 if (!file_ns_capable(sk->sk_socket->file, p->net->user_ns,
1447 netlink_overrun(sk);
1452 if (p->skb2 == NULL) {
1453 if (skb_shared(p->skb)) {
1454 p->skb2 = skb_clone(p->skb, p->allocation);
1456 p->skb2 = skb_get(p->skb);
1458 * skb ownership may have been set when
1459 * delivered to a previous socket.
1461 skb_orphan(p->skb2);
1464 if (p->skb2 == NULL) {
1465 netlink_overrun(sk);
1466 /* Clone failed. Notify ALL listeners. */
1468 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
1469 p->delivery_failure = 1;
1472 if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
1477 if (sk_filter(sk, p->skb2)) {
1482 NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
1483 if (NETLINK_CB(p->skb2).nsid != NETNSA_NSID_NOT_ASSIGNED)
1484 NETLINK_CB(p->skb2).nsid_is_set = true;
1485 val = netlink_broadcast_deliver(sk, p->skb2);
1487 netlink_overrun(sk);
1488 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
1489 p->delivery_failure = 1;
1491 p->congested |= val;
1499 int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
1500 u32 group, gfp_t allocation,
1501 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
1504 struct net *net = sock_net(ssk);
1505 struct netlink_broadcast_data info;
1508 skb = netlink_trim(skb, allocation);
1510 info.exclude_sk = ssk;
1512 info.portid = portid;
1515 info.delivery_failure = 0;
1518 info.allocation = allocation;
1521 info.tx_filter = filter;
1522 info.tx_data = filter_data;
1524 /* While we sleep in clone, do not allow to change socket list */
1526 netlink_lock_table();
1528 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1529 do_one_broadcast(sk, &info);
1533 netlink_unlock_table();
1535 if (info.delivery_failure) {
1536 kfree_skb(info.skb2);
1539 consume_skb(info.skb2);
1541 if (info.delivered) {
1542 if (info.congested && gfpflags_allow_blocking(allocation))
1548 EXPORT_SYMBOL(netlink_broadcast_filtered);
1550 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
1551 u32 group, gfp_t allocation)
1553 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
1556 EXPORT_SYMBOL(netlink_broadcast);
1558 struct netlink_set_err_data {
1559 struct sock *exclude_sk;
1565 static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
1567 struct netlink_sock *nlk = nlk_sk(sk);
1570 if (sk == p->exclude_sk)
1573 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
1576 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1577 !test_bit(p->group - 1, nlk->groups))
1580 if (p->code == ENOBUFS && nlk->flags & NETLINK_F_RECV_NO_ENOBUFS) {
1585 sk->sk_err = p->code;
1586 sk->sk_error_report(sk);
1592 * netlink_set_err - report error to broadcast listeners
1593 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
1594 * @portid: the PORTID of a process that we want to skip (if any)
1595 * @group: the broadcast group that will notice the error
1596 * @code: error code, must be negative (as usual in kernelspace)
1598 * This function returns the number of broadcast listeners that have set the
1599 * NETLINK_NO_ENOBUFS socket option.
1601 int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
1603 struct netlink_set_err_data info;
1604 unsigned long flags;
1608 info.exclude_sk = ssk;
1609 info.portid = portid;
1611 /* sk->sk_err wants a positive error value */
1614 read_lock_irqsave(&nl_table_lock, flags);
1616 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1617 ret += do_one_set_err(sk, &info);
1619 read_unlock_irqrestore(&nl_table_lock, flags);
1622 EXPORT_SYMBOL(netlink_set_err);
1624 /* must be called with netlink table grabbed */
1625 static void netlink_update_socket_mc(struct netlink_sock *nlk,
1629 int old, new = !!is_new, subscriptions;
1631 old = test_bit(group - 1, nlk->groups);
1632 subscriptions = nlk->subscriptions - old + new;
1634 __set_bit(group - 1, nlk->groups);
1636 __clear_bit(group - 1, nlk->groups);
1637 netlink_update_subscriptions(&nlk->sk, subscriptions);
1638 netlink_update_listeners(&nlk->sk);
1641 static int netlink_setsockopt(struct socket *sock, int level, int optname,
1642 char __user *optval, unsigned int optlen)
1644 struct sock *sk = sock->sk;
1645 struct netlink_sock *nlk = nlk_sk(sk);
1646 unsigned int val = 0;
1649 if (level != SOL_NETLINK)
1650 return -ENOPROTOOPT;
1652 if (optlen >= sizeof(int) &&
1653 get_user(val, (unsigned int __user *)optval))
1657 case NETLINK_PKTINFO:
1659 nlk->flags |= NETLINK_F_RECV_PKTINFO;
1661 nlk->flags &= ~NETLINK_F_RECV_PKTINFO;
1664 case NETLINK_ADD_MEMBERSHIP:
1665 case NETLINK_DROP_MEMBERSHIP: {
1666 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
1668 err = netlink_realloc_groups(sk);
1671 if (!val || val - 1 >= nlk->ngroups)
1673 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
1674 err = nlk->netlink_bind(sock_net(sk), val);
1678 netlink_table_grab();
1679 netlink_update_socket_mc(nlk, val,
1680 optname == NETLINK_ADD_MEMBERSHIP);
1681 netlink_table_ungrab();
1682 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
1683 nlk->netlink_unbind(sock_net(sk), val);
1688 case NETLINK_BROADCAST_ERROR:
1690 nlk->flags |= NETLINK_F_BROADCAST_SEND_ERROR;
1692 nlk->flags &= ~NETLINK_F_BROADCAST_SEND_ERROR;
1695 case NETLINK_NO_ENOBUFS:
1697 nlk->flags |= NETLINK_F_RECV_NO_ENOBUFS;
1698 clear_bit(NETLINK_S_CONGESTED, &nlk->state);
1699 wake_up_interruptible(&nlk->wait);
1701 nlk->flags &= ~NETLINK_F_RECV_NO_ENOBUFS;
1705 case NETLINK_LISTEN_ALL_NSID:
1706 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST))
1710 nlk->flags |= NETLINK_F_LISTEN_ALL_NSID;
1712 nlk->flags &= ~NETLINK_F_LISTEN_ALL_NSID;
1715 case NETLINK_CAP_ACK:
1717 nlk->flags |= NETLINK_F_CAP_ACK;
1719 nlk->flags &= ~NETLINK_F_CAP_ACK;
1722 case NETLINK_EXT_ACK:
1724 nlk->flags |= NETLINK_F_EXT_ACK;
1726 nlk->flags &= ~NETLINK_F_EXT_ACK;
1729 case NETLINK_GET_STRICT_CHK:
1731 nlk->flags |= NETLINK_F_STRICT_CHK;
1733 nlk->flags &= ~NETLINK_F_STRICT_CHK;
1742 static int netlink_getsockopt(struct socket *sock, int level, int optname,
1743 char __user *optval, int __user *optlen)
1745 struct sock *sk = sock->sk;
1746 struct netlink_sock *nlk = nlk_sk(sk);
1750 if (level != SOL_NETLINK)
1751 return -ENOPROTOOPT;
1753 if (get_user(len, optlen))
1759 case NETLINK_PKTINFO:
1760 flag = NETLINK_F_RECV_PKTINFO;
1762 case NETLINK_BROADCAST_ERROR:
1763 flag = NETLINK_F_BROADCAST_SEND_ERROR;
1765 case NETLINK_NO_ENOBUFS:
1766 flag = NETLINK_F_RECV_NO_ENOBUFS;
1768 case NETLINK_LIST_MEMBERSHIPS: {
1769 int pos, idx, shift, err = 0;
1771 netlink_lock_table();
1772 for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
1773 if (len - pos < sizeof(u32))
1776 idx = pos / sizeof(unsigned long);
1777 shift = (pos % sizeof(unsigned long)) * 8;
1778 if (put_user((u32)(nlk->groups[idx] >> shift),
1779 (u32 __user *)(optval + pos))) {
1784 if (put_user(ALIGN(BITS_TO_BYTES(nlk->ngroups), sizeof(u32)), optlen))
1786 netlink_unlock_table();
1789 case NETLINK_CAP_ACK:
1790 flag = NETLINK_F_CAP_ACK;
1792 case NETLINK_EXT_ACK:
1793 flag = NETLINK_F_EXT_ACK;
1795 case NETLINK_GET_STRICT_CHK:
1796 flag = NETLINK_F_STRICT_CHK;
1799 return -ENOPROTOOPT;
1802 if (len < sizeof(int))
1806 val = nlk->flags & flag ? 1 : 0;
1808 if (put_user(len, optlen) ||
1809 copy_to_user(optval, &val, len))
1815 static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
1817 struct nl_pktinfo info;
1819 info.group = NETLINK_CB(skb).dst_group;
1820 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
1823 static void netlink_cmsg_listen_all_nsid(struct sock *sk, struct msghdr *msg,
1824 struct sk_buff *skb)
1826 if (!NETLINK_CB(skb).nsid_is_set)
1829 put_cmsg(msg, SOL_NETLINK, NETLINK_LISTEN_ALL_NSID, sizeof(int),
1830 &NETLINK_CB(skb).nsid);
1833 static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1835 struct sock *sk = sock->sk;
1836 struct netlink_sock *nlk = nlk_sk(sk);
1837 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
1840 struct sk_buff *skb;
1842 struct scm_cookie scm;
1843 u32 netlink_skb_flags = 0;
1845 if (msg->msg_flags&MSG_OOB)
1849 pr_warn_once("Zero length message leads to an empty skb\n");
1853 err = scm_send(sock, msg, &scm, true);
1857 if (msg->msg_namelen) {
1859 if (msg->msg_namelen < sizeof(struct sockaddr_nl))
1861 if (addr->nl_family != AF_NETLINK)
1863 dst_portid = addr->nl_pid;
1864 dst_group = ffs(addr->nl_groups);
1866 if ((dst_group || dst_portid) &&
1867 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1869 netlink_skb_flags |= NETLINK_SKB_DST;
1871 /* Paired with WRITE_ONCE() in netlink_connect() */
1872 dst_portid = READ_ONCE(nlk->dst_portid);
1873 dst_group = READ_ONCE(nlk->dst_group);
1876 /* Paired with WRITE_ONCE() in netlink_insert() */
1877 if (!READ_ONCE(nlk->bound)) {
1878 err = netlink_autobind(sock);
1882 /* Ensure nlk is hashed and visible. */
1887 if (len > sk->sk_sndbuf - 32)
1890 skb = netlink_alloc_large_skb(len, dst_group);
1894 NETLINK_CB(skb).portid = nlk->portid;
1895 NETLINK_CB(skb).dst_group = dst_group;
1896 NETLINK_CB(skb).creds = scm.creds;
1897 NETLINK_CB(skb).flags = netlink_skb_flags;
1900 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1905 err = security_netlink_send(sk, skb);
1912 refcount_inc(&skb->users);
1913 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
1915 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
1922 static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1925 struct scm_cookie scm;
1926 struct sock *sk = sock->sk;
1927 struct netlink_sock *nlk = nlk_sk(sk);
1928 int noblock = flags&MSG_DONTWAIT;
1930 struct sk_buff *skb, *data_skb;
1938 skb = skb_recv_datagram(sk, flags, noblock, &err);
1944 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
1945 if (unlikely(skb_shinfo(skb)->frag_list)) {
1947 * If this skb has a frag_list, then here that means that we
1948 * will have to use the frag_list skb's data for compat tasks
1949 * and the regular skb's data for normal (non-compat) tasks.
1951 * If we need to send the compat skb, assign it to the
1952 * 'data_skb' variable so that it will be used below for data
1953 * copying. We keep 'skb' for everything else, including
1954 * freeing both later.
1956 if (flags & MSG_CMSG_COMPAT)
1957 data_skb = skb_shinfo(skb)->frag_list;
1961 /* Record the max length of recvmsg() calls for future allocations */
1962 nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
1963 nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
1964 SKB_WITH_OVERHEAD(32768));
1966 copied = data_skb->len;
1968 msg->msg_flags |= MSG_TRUNC;
1972 err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
1974 if (msg->msg_name) {
1975 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
1976 addr->nl_family = AF_NETLINK;
1978 addr->nl_pid = NETLINK_CB(skb).portid;
1979 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
1980 msg->msg_namelen = sizeof(*addr);
1983 if (nlk->flags & NETLINK_F_RECV_PKTINFO)
1984 netlink_cmsg_recv_pktinfo(msg, skb);
1985 if (nlk->flags & NETLINK_F_LISTEN_ALL_NSID)
1986 netlink_cmsg_listen_all_nsid(sk, msg, skb);
1988 memset(&scm, 0, sizeof(scm));
1989 scm.creds = *NETLINK_CREDS(skb);
1990 if (flags & MSG_TRUNC)
1991 copied = data_skb->len;
1993 skb_free_datagram(sk, skb);
1995 if (READ_ONCE(nlk->cb_running) &&
1996 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
1997 ret = netlink_dump(sk);
2000 sk->sk_error_report(sk);
2004 scm_recv(sock, msg, &scm, flags);
2006 netlink_rcv_wake(sk);
2007 return err ? : copied;
2010 static void netlink_data_ready(struct sock *sk)
2016 * We export these functions to other modules. They provide a
2017 * complete set of kernel non-blocking support for message
2022 __netlink_kernel_create(struct net *net, int unit, struct module *module,
2023 struct netlink_kernel_cfg *cfg)
2025 struct socket *sock;
2027 struct netlink_sock *nlk;
2028 struct listeners *listeners = NULL;
2029 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2030 unsigned int groups;
2034 if (unit < 0 || unit >= MAX_LINKS)
2037 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2040 if (__netlink_create(net, sock, cb_mutex, unit, 1) < 0)
2041 goto out_sock_release_nosk;
2045 if (!cfg || cfg->groups < 32)
2048 groups = cfg->groups;
2050 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2052 goto out_sock_release;
2054 sk->sk_data_ready = netlink_data_ready;
2055 if (cfg && cfg->input)
2056 nlk_sk(sk)->netlink_rcv = cfg->input;
2058 if (netlink_insert(sk, 0))
2059 goto out_sock_release;
2062 nlk->flags |= NETLINK_F_KERNEL_SOCKET;
2064 netlink_table_grab();
2065 if (!nl_table[unit].registered) {
2066 nl_table[unit].groups = groups;
2067 rcu_assign_pointer(nl_table[unit].listeners, listeners);
2068 nl_table[unit].cb_mutex = cb_mutex;
2069 nl_table[unit].module = module;
2071 nl_table[unit].bind = cfg->bind;
2072 nl_table[unit].unbind = cfg->unbind;
2073 nl_table[unit].flags = cfg->flags;
2075 nl_table[unit].compare = cfg->compare;
2077 nl_table[unit].registered = 1;
2080 nl_table[unit].registered++;
2082 netlink_table_ungrab();
2087 netlink_kernel_release(sk);
2090 out_sock_release_nosk:
2094 EXPORT_SYMBOL(__netlink_kernel_create);
2097 netlink_kernel_release(struct sock *sk)
2099 if (sk == NULL || sk->sk_socket == NULL)
2102 sock_release(sk->sk_socket);
2104 EXPORT_SYMBOL(netlink_kernel_release);
2106 int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
2108 struct listeners *new, *old;
2109 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
2114 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
2115 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2118 old = nl_deref_protected(tbl->listeners);
2119 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2120 rcu_assign_pointer(tbl->listeners, new);
2122 kfree_rcu(old, rcu);
2124 tbl->groups = groups;
2130 * netlink_change_ngroups - change number of multicast groups
2132 * This changes the number of multicast groups that are available
2133 * on a certain netlink family. Note that it is not possible to
2134 * change the number of groups to below 32. Also note that it does
2135 * not implicitly call netlink_clear_multicast_users() when the
2136 * number of groups is reduced.
2138 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2139 * @groups: The new number of groups.
2141 int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2145 netlink_table_grab();
2146 err = __netlink_change_ngroups(sk, groups);
2147 netlink_table_ungrab();
2152 void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2155 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2157 sk_for_each_bound(sk, &tbl->mc_list)
2158 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2162 __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
2164 struct nlmsghdr *nlh;
2165 int size = nlmsg_msg_size(len);
2167 nlh = skb_put(skb, NLMSG_ALIGN(size));
2168 nlh->nlmsg_type = type;
2169 nlh->nlmsg_len = size;
2170 nlh->nlmsg_flags = flags;
2171 nlh->nlmsg_pid = portid;
2172 nlh->nlmsg_seq = seq;
2173 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
2174 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
2177 EXPORT_SYMBOL(__nlmsg_put);
2180 * It looks a bit ugly.
2181 * It would be better to create kernel thread.
2184 static int netlink_dump(struct sock *sk)
2186 struct netlink_sock *nlk = nlk_sk(sk);
2187 struct netlink_ext_ack extack = {};
2188 struct netlink_callback *cb;
2189 struct sk_buff *skb = NULL;
2190 struct nlmsghdr *nlh;
2191 struct module *module;
2196 mutex_lock(nlk->cb_mutex);
2197 if (!nlk->cb_running) {
2202 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2205 /* NLMSG_GOODSIZE is small to avoid high order allocations being
2206 * required, but it makes sense to _attempt_ a 16K bytes allocation
2207 * to reduce number of system calls on dump operations, if user
2208 * ever provided a big enough buffer.
2211 alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2213 if (alloc_min_size < nlk->max_recvmsg_len) {
2214 alloc_size = nlk->max_recvmsg_len;
2215 skb = alloc_skb(alloc_size,
2216 (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) |
2217 __GFP_NOWARN | __GFP_NORETRY);
2220 alloc_size = alloc_min_size;
2221 skb = alloc_skb(alloc_size, GFP_KERNEL);
2226 /* Trim skb to allocated size. User is expected to provide buffer as
2227 * large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at
2228 * netlink_recvmsg())). dump will pack as many smaller messages as
2229 * could fit within the allocated skb. skb is typically allocated
2230 * with larger space than required (could be as much as near 2x the
2231 * requested size with align to next power of 2 approach). Allowing
2232 * dump to use the excess space makes it difficult for a user to have a
2233 * reasonable static buffer based on the expected largest dump of a
2234 * single netdev. The outcome is MSG_TRUNC error.
2236 skb_reserve(skb, skb_tailroom(skb) - alloc_size);
2238 /* Make sure malicious BPF programs can not read unitialized memory
2239 * from skb->head -> skb->data
2241 skb_reset_network_header(skb);
2242 skb_reset_mac_header(skb);
2244 netlink_skb_set_owner_r(skb, sk);
2246 if (nlk->dump_done_errno > 0) {
2247 cb->extack = &extack;
2248 nlk->dump_done_errno = cb->dump(skb, cb);
2252 if (nlk->dump_done_errno > 0 ||
2253 skb_tailroom(skb) < nlmsg_total_size(sizeof(nlk->dump_done_errno))) {
2254 mutex_unlock(nlk->cb_mutex);
2256 if (sk_filter(sk, skb))
2259 __netlink_sendskb(sk, skb);
2263 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE,
2264 sizeof(nlk->dump_done_errno),
2265 NLM_F_MULTI | cb->answer_flags);
2269 nl_dump_check_consistent(cb, nlh);
2271 memcpy(nlmsg_data(nlh), &nlk->dump_done_errno,
2272 sizeof(nlk->dump_done_errno));
2274 if (extack._msg && nlk->flags & NETLINK_F_EXT_ACK) {
2275 nlh->nlmsg_flags |= NLM_F_ACK_TLVS;
2276 if (!nla_put_string(skb, NLMSGERR_ATTR_MSG, extack._msg))
2277 nlmsg_end(skb, nlh);
2280 if (sk_filter(sk, skb))
2283 __netlink_sendskb(sk, skb);
2288 WRITE_ONCE(nlk->cb_running, false);
2289 module = cb->module;
2291 mutex_unlock(nlk->cb_mutex);
2297 mutex_unlock(nlk->cb_mutex);
2302 int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2303 const struct nlmsghdr *nlh,
2304 struct netlink_dump_control *control)
2306 struct netlink_sock *nlk, *nlk2;
2307 struct netlink_callback *cb;
2311 refcount_inc(&skb->users);
2313 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2315 ret = -ECONNREFUSED;
2320 mutex_lock(nlk->cb_mutex);
2321 /* A dump is in progress... */
2322 if (nlk->cb_running) {
2326 /* add reference of module which cb->dump belongs to */
2327 if (!try_module_get(control->module)) {
2328 ret = -EPROTONOSUPPORT;
2333 memset(cb, 0, sizeof(*cb));
2334 cb->dump = control->dump;
2335 cb->done = control->done;
2337 cb->data = control->data;
2338 cb->module = control->module;
2339 cb->min_dump_alloc = control->min_dump_alloc;
2342 nlk2 = nlk_sk(NETLINK_CB(skb).sk);
2343 cb->strict_check = !!(nlk2->flags & NETLINK_F_STRICT_CHK);
2345 if (control->start) {
2346 ret = control->start(cb);
2351 WRITE_ONCE(nlk->cb_running, true);
2352 nlk->dump_done_errno = INT_MAX;
2354 mutex_unlock(nlk->cb_mutex);
2356 ret = netlink_dump(sk);
2363 /* We successfully started a dump, by returning -EINTR we
2364 * signal not to send ACK even if it was requested.
2369 module_put(control->module);
2372 mutex_unlock(nlk->cb_mutex);
2377 EXPORT_SYMBOL(__netlink_dump_start);
2379 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
2380 const struct netlink_ext_ack *extack)
2382 struct sk_buff *skb;
2383 struct nlmsghdr *rep;
2384 struct nlmsgerr *errmsg;
2385 size_t payload = sizeof(*errmsg);
2387 struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk);
2388 unsigned int flags = 0;
2389 bool nlk_has_extack = nlk->flags & NETLINK_F_EXT_ACK;
2391 /* Error messages get the original request appened, unless the user
2392 * requests to cap the error message, and get extra error data if
2395 if (nlk_has_extack && extack && extack->_msg)
2396 tlvlen += nla_total_size(strlen(extack->_msg) + 1);
2399 if (!(nlk->flags & NETLINK_F_CAP_ACK))
2400 payload += nlmsg_len(nlh);
2402 flags |= NLM_F_CAPPED;
2403 if (nlk_has_extack && extack && extack->bad_attr)
2404 tlvlen += nla_total_size(sizeof(u32));
2406 flags |= NLM_F_CAPPED;
2408 if (nlk_has_extack && extack && extack->cookie_len)
2409 tlvlen += nla_total_size(extack->cookie_len);
2413 flags |= NLM_F_ACK_TLVS;
2415 skb = nlmsg_new(payload + tlvlen, GFP_KERNEL);
2417 NETLINK_CB(in_skb).sk->sk_err = ENOBUFS;
2418 NETLINK_CB(in_skb).sk->sk_error_report(NETLINK_CB(in_skb).sk);
2422 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2423 NLMSG_ERROR, payload, flags);
2424 errmsg = nlmsg_data(rep);
2425 errmsg->error = err;
2426 memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh));
2428 if (nlk_has_extack && extack) {
2430 WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG,
2434 if (extack->bad_attr &&
2435 !WARN_ON((u8 *)extack->bad_attr < in_skb->data ||
2436 (u8 *)extack->bad_attr >= in_skb->data +
2438 WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS,
2439 (u8 *)extack->bad_attr -
2442 if (extack->cookie_len)
2443 WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE,
2449 nlmsg_end(skb, rep);
2451 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
2453 EXPORT_SYMBOL(netlink_ack);
2455 int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
2457 struct netlink_ext_ack *))
2459 struct netlink_ext_ack extack;
2460 struct nlmsghdr *nlh;
2463 while (skb->len >= nlmsg_total_size(0)) {
2466 memset(&extack, 0, sizeof(extack));
2467 nlh = nlmsg_hdr(skb);
2470 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
2473 /* Only requests are handled by the kernel */
2474 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
2477 /* Skip control messages */
2478 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
2481 err = cb(skb, nlh, &extack);
2486 if (nlh->nlmsg_flags & NLM_F_ACK || err)
2487 netlink_ack(skb, nlh, err, &extack);
2490 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
2491 if (msglen > skb->len)
2493 skb_pull(skb, msglen);
2498 EXPORT_SYMBOL(netlink_rcv_skb);
2501 * nlmsg_notify - send a notification netlink message
2502 * @sk: netlink socket to use
2503 * @skb: notification message
2504 * @portid: destination netlink portid for reports or 0
2505 * @group: destination multicast group or 0
2506 * @report: 1 to report back, 0 to disable
2507 * @flags: allocation flags
2509 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
2510 unsigned int group, int report, gfp_t flags)
2515 int exclude_portid = 0;
2518 refcount_inc(&skb->users);
2519 exclude_portid = portid;
2522 /* errors reported via destination sk->sk_err, but propagate
2523 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
2524 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
2532 err2 = nlmsg_unicast(sk, skb, portid);
2539 EXPORT_SYMBOL(nlmsg_notify);
2541 #ifdef CONFIG_PROC_FS
2542 struct nl_seq_iter {
2543 struct seq_net_private p;
2544 struct rhashtable_iter hti;
2548 static void netlink_walk_start(struct nl_seq_iter *iter)
2550 rhashtable_walk_enter(&nl_table[iter->link].hash, &iter->hti);
2551 rhashtable_walk_start(&iter->hti);
2554 static void netlink_walk_stop(struct nl_seq_iter *iter)
2556 rhashtable_walk_stop(&iter->hti);
2557 rhashtable_walk_exit(&iter->hti);
2560 static void *__netlink_seq_next(struct seq_file *seq)
2562 struct nl_seq_iter *iter = seq->private;
2563 struct netlink_sock *nlk;
2567 nlk = rhashtable_walk_next(&iter->hti);
2570 if (PTR_ERR(nlk) == -EAGAIN)
2579 netlink_walk_stop(iter);
2580 if (++iter->link >= MAX_LINKS)
2583 netlink_walk_start(iter);
2585 } while (sock_net(&nlk->sk) != seq_file_net(seq));
2590 static void *netlink_seq_start(struct seq_file *seq, loff_t *posp)
2592 struct nl_seq_iter *iter = seq->private;
2593 void *obj = SEQ_START_TOKEN;
2598 netlink_walk_start(iter);
2600 for (pos = *posp; pos && obj && !IS_ERR(obj); pos--)
2601 obj = __netlink_seq_next(seq);
2606 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2609 return __netlink_seq_next(seq);
2612 static void netlink_seq_stop(struct seq_file *seq, void *v)
2614 struct nl_seq_iter *iter = seq->private;
2616 if (iter->link >= MAX_LINKS)
2619 netlink_walk_stop(iter);
2623 static int netlink_seq_show(struct seq_file *seq, void *v)
2625 if (v == SEQ_START_TOKEN) {
2627 "sk Eth Pid Groups "
2628 "Rmem Wmem Dump Locks Drops Inode\n");
2631 struct netlink_sock *nlk = nlk_sk(s);
2633 seq_printf(seq, "%pK %-3d %-10u %08x %-8d %-8d %-5d %-8d %-8u %-8lu\n",
2637 nlk->groups ? (u32)nlk->groups[0] : 0,
2638 sk_rmem_alloc_get(s),
2639 sk_wmem_alloc_get(s),
2640 READ_ONCE(nlk->cb_running),
2641 refcount_read(&s->sk_refcnt),
2642 atomic_read(&s->sk_drops),
2650 static const struct seq_operations netlink_seq_ops = {
2651 .start = netlink_seq_start,
2652 .next = netlink_seq_next,
2653 .stop = netlink_seq_stop,
2654 .show = netlink_seq_show,
2658 int netlink_register_notifier(struct notifier_block *nb)
2660 return blocking_notifier_chain_register(&netlink_chain, nb);
2662 EXPORT_SYMBOL(netlink_register_notifier);
2664 int netlink_unregister_notifier(struct notifier_block *nb)
2666 return blocking_notifier_chain_unregister(&netlink_chain, nb);
2668 EXPORT_SYMBOL(netlink_unregister_notifier);
2670 static const struct proto_ops netlink_ops = {
2671 .family = PF_NETLINK,
2672 .owner = THIS_MODULE,
2673 .release = netlink_release,
2674 .bind = netlink_bind,
2675 .connect = netlink_connect,
2676 .socketpair = sock_no_socketpair,
2677 .accept = sock_no_accept,
2678 .getname = netlink_getname,
2679 .poll = datagram_poll,
2680 .ioctl = netlink_ioctl,
2681 .listen = sock_no_listen,
2682 .shutdown = sock_no_shutdown,
2683 .setsockopt = netlink_setsockopt,
2684 .getsockopt = netlink_getsockopt,
2685 .sendmsg = netlink_sendmsg,
2686 .recvmsg = netlink_recvmsg,
2687 .mmap = sock_no_mmap,
2688 .sendpage = sock_no_sendpage,
2691 static const struct net_proto_family netlink_family_ops = {
2692 .family = PF_NETLINK,
2693 .create = netlink_create,
2694 .owner = THIS_MODULE, /* for consistency 8) */
2697 static int __net_init netlink_net_init(struct net *net)
2699 #ifdef CONFIG_PROC_FS
2700 if (!proc_create_net("netlink", 0, net->proc_net, &netlink_seq_ops,
2701 sizeof(struct nl_seq_iter)))
2707 static void __net_exit netlink_net_exit(struct net *net)
2709 #ifdef CONFIG_PROC_FS
2710 remove_proc_entry("netlink", net->proc_net);
2714 static void __init netlink_add_usersock_entry(void)
2716 struct listeners *listeners;
2719 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2721 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
2723 netlink_table_grab();
2725 nl_table[NETLINK_USERSOCK].groups = groups;
2726 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
2727 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
2728 nl_table[NETLINK_USERSOCK].registered = 1;
2729 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
2731 netlink_table_ungrab();
2734 static struct pernet_operations __net_initdata netlink_net_ops = {
2735 .init = netlink_net_init,
2736 .exit = netlink_net_exit,
2739 static inline u32 netlink_hash(const void *data, u32 len, u32 seed)
2741 const struct netlink_sock *nlk = data;
2742 struct netlink_compare_arg arg;
2744 netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid);
2745 return jhash2((u32 *)&arg, netlink_compare_arg_len / sizeof(u32), seed);
2748 static const struct rhashtable_params netlink_rhashtable_params = {
2749 .head_offset = offsetof(struct netlink_sock, node),
2750 .key_len = netlink_compare_arg_len,
2751 .obj_hashfn = netlink_hash,
2752 .obj_cmpfn = netlink_compare,
2753 .automatic_shrinking = true,
2756 static int __init netlink_proto_init(void)
2759 int err = proto_register(&netlink_proto, 0);
2764 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
2766 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
2770 for (i = 0; i < MAX_LINKS; i++) {
2771 if (rhashtable_init(&nl_table[i].hash,
2772 &netlink_rhashtable_params) < 0) {
2774 rhashtable_destroy(&nl_table[i].hash);
2780 netlink_add_usersock_entry();
2782 sock_register(&netlink_family_ops);
2783 register_pernet_subsys(&netlink_net_ops);
2784 register_pernet_subsys(&netlink_tap_net_ops);
2785 /* The netlink device handler may be needed early. */
2790 panic("netlink_init: Cannot allocate nl_table\n");
2793 core_initcall(netlink_proto_init);