1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * NET4: Implementation of BSD Unix domain sockets.
5 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
8 * Linus Torvalds : Assorted bug cures.
9 * Niibe Yutaka : async I/O support.
10 * Carsten Paeth : PF_UNIX check, address fixes.
11 * Alan Cox : Limit size of allocated blocks.
12 * Alan Cox : Fixed the stupid socketpair bug.
13 * Alan Cox : BSD compatibility fine tuning.
14 * Alan Cox : Fixed a bug in connect when interrupted.
15 * Alan Cox : Sorted out a proper draft version of
16 * file descriptor passing hacked up from
18 * Marty Leisner : Fixes to fd passing
19 * Nick Nevin : recvmsg bugfix.
20 * Alan Cox : Started proper garbage collector
21 * Heiko EiBfeldt : Missing verify_area check
22 * Alan Cox : Started POSIXisms
23 * Andreas Schwab : Replace inode by dentry for proper
25 * Kirk Petersen : Made this a module
26 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
28 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
29 * by above two patches.
30 * Andrea Arcangeli : If possible we block in connect(2)
31 * if the max backlog of the listen socket
32 * is been reached. This won't break
33 * old apps and it will avoid huge amount
34 * of socks hashed (this for unix_gc()
35 * performances reasons).
36 * Security fix that limits the max
37 * number of socks to 2*max_files and
38 * the number of skb queueable in the
40 * Artur Skawina : Hash function optimizations
41 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
42 * Malcolm Beattie : Set peercred for socketpair
43 * Michal Ostrowski : Module initialization cleanup.
44 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
45 * the core infrastructure is doing that
46 * for all net proto families now (2.5.69+)
48 * Known differences from reference BSD that was tested:
51 * ECONNREFUSED is not returned from one end of a connected() socket to the
52 * other the moment one end closes.
53 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
54 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
56 * accept() returns a path name even if the connecting socket has closed
57 * in the meantime (BSD loses the path and gives up).
58 * accept() returns 0 length path for an unbound connector. BSD returns 16
59 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
60 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
61 * BSD af_unix apparently has connect forgetting to block properly.
62 * (need to check this with the POSIX spec in detail)
64 * Differences from 2.0.0-11-... (ANK)
65 * Bug fixes and improvements.
66 * - client shutdown killed server socket.
67 * - removed all useless cli/sti pairs.
69 * Semantic changes/extensions.
70 * - generic control message passing.
71 * - SCM_CREDENTIALS control message.
72 * - "Abstract" (not FS based) socket bindings.
73 * Abstract names are sequences of bytes (not zero terminated)
74 * started by 0, so that this name space does not intersect
78 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
80 #include <linux/module.h>
81 #include <linux/kernel.h>
82 #include <linux/signal.h>
83 #include <linux/sched/signal.h>
84 #include <linux/errno.h>
85 #include <linux/string.h>
86 #include <linux/stat.h>
87 #include <linux/dcache.h>
88 #include <linux/namei.h>
89 #include <linux/socket.h>
91 #include <linux/fcntl.h>
92 #include <linux/filter.h>
93 #include <linux/termios.h>
94 #include <linux/sockios.h>
95 #include <linux/net.h>
98 #include <linux/slab.h>
99 #include <linux/uaccess.h>
100 #include <linux/skbuff.h>
101 #include <linux/netdevice.h>
102 #include <net/net_namespace.h>
103 #include <net/sock.h>
104 #include <net/tcp_states.h>
105 #include <net/af_unix.h>
106 #include <linux/proc_fs.h>
107 #include <linux/seq_file.h>
109 #include <linux/init.h>
110 #include <linux/poll.h>
111 #include <linux/rtnetlink.h>
112 #include <linux/mount.h>
113 #include <net/checksum.h>
114 #include <linux/security.h>
115 #include <linux/freezer.h>
116 #include <linux/file.h>
117 #include <linux/btf_ids.h>
121 static atomic_long_t unix_nr_socks;
122 static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2];
123 static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
125 /* SMP locking strategy:
126 * hash table is protected with spinlock.
127 * each socket state is protected by separate spinlock.
130 static unsigned int unix_unbound_hash(struct sock *sk)
132 unsigned long hash = (unsigned long)sk;
138 return hash & UNIX_HASH_MOD;
141 static unsigned int unix_bsd_hash(struct inode *i)
143 return i->i_ino & UNIX_HASH_MOD;
146 static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr,
147 int addr_len, int type)
149 __wsum csum = csum_partial(sunaddr, addr_len, 0);
152 hash = (__force unsigned int)csum_fold(csum);
156 return UNIX_HASH_MOD + 1 + (hash & UNIX_HASH_MOD);
159 static void unix_table_double_lock(struct net *net,
160 unsigned int hash1, unsigned int hash2)
162 if (hash1 == hash2) {
163 spin_lock(&net->unx.table.locks[hash1]);
170 spin_lock(&net->unx.table.locks[hash1]);
171 spin_lock_nested(&net->unx.table.locks[hash2], SINGLE_DEPTH_NESTING);
174 static void unix_table_double_unlock(struct net *net,
175 unsigned int hash1, unsigned int hash2)
177 if (hash1 == hash2) {
178 spin_unlock(&net->unx.table.locks[hash1]);
182 spin_unlock(&net->unx.table.locks[hash1]);
183 spin_unlock(&net->unx.table.locks[hash2]);
186 #ifdef CONFIG_SECURITY_NETWORK
187 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
189 UNIXCB(skb).secid = scm->secid;
192 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
194 scm->secid = UNIXCB(skb).secid;
197 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
199 return (scm->secid == UNIXCB(skb).secid);
202 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
205 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
208 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
212 #endif /* CONFIG_SECURITY_NETWORK */
214 #define unix_peer(sk) (unix_sk(sk)->peer)
216 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
218 return unix_peer(osk) == sk;
221 static inline int unix_may_send(struct sock *sk, struct sock *osk)
223 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
226 static inline int unix_recvq_full(const struct sock *sk)
228 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
231 static inline int unix_recvq_full_lockless(const struct sock *sk)
233 return skb_queue_len_lockless(&sk->sk_receive_queue) >
234 READ_ONCE(sk->sk_max_ack_backlog);
237 struct sock *unix_peer_get(struct sock *s)
245 unix_state_unlock(s);
248 EXPORT_SYMBOL_GPL(unix_peer_get);
250 static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr,
253 struct unix_address *addr;
255 addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL);
259 refcount_set(&addr->refcnt, 1);
260 addr->len = addr_len;
261 memcpy(addr->name, sunaddr, addr_len);
266 static inline void unix_release_addr(struct unix_address *addr)
268 if (refcount_dec_and_test(&addr->refcnt))
273 * Check unix socket name:
274 * - should be not zero length.
275 * - if started by not zero, should be NULL terminated (FS object)
276 * - if started by zero, it is abstract name.
279 static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len)
281 if (addr_len <= offsetof(struct sockaddr_un, sun_path) ||
282 addr_len > sizeof(*sunaddr))
285 if (sunaddr->sun_family != AF_UNIX)
291 static void unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
293 /* This may look like an off by one error but it is a bit more
294 * subtle. 108 is the longest valid AF_UNIX path for a binding.
295 * sun_path[108] doesn't as such exist. However in kernel space
296 * we are guaranteed that it is a valid memory location in our
297 * kernel address buffer because syscall functions always pass
298 * a pointer of struct sockaddr_storage which has a bigger buffer
301 ((char *)sunaddr)[addr_len] = 0;
304 static void __unix_remove_socket(struct sock *sk)
306 sk_del_node_init(sk);
309 static void __unix_insert_socket(struct net *net, struct sock *sk)
311 DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
312 sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]);
315 static void __unix_set_addr_hash(struct net *net, struct sock *sk,
316 struct unix_address *addr, unsigned int hash)
318 __unix_remove_socket(sk);
319 smp_store_release(&unix_sk(sk)->addr, addr);
322 __unix_insert_socket(net, sk);
325 static void unix_remove_socket(struct net *net, struct sock *sk)
327 spin_lock(&net->unx.table.locks[sk->sk_hash]);
328 __unix_remove_socket(sk);
329 spin_unlock(&net->unx.table.locks[sk->sk_hash]);
332 static void unix_insert_unbound_socket(struct net *net, struct sock *sk)
334 spin_lock(&net->unx.table.locks[sk->sk_hash]);
335 __unix_insert_socket(net, sk);
336 spin_unlock(&net->unx.table.locks[sk->sk_hash]);
339 static void unix_insert_bsd_socket(struct sock *sk)
341 spin_lock(&bsd_socket_locks[sk->sk_hash]);
342 sk_add_bind_node(sk, &bsd_socket_buckets[sk->sk_hash]);
343 spin_unlock(&bsd_socket_locks[sk->sk_hash]);
346 static void unix_remove_bsd_socket(struct sock *sk)
348 if (!hlist_unhashed(&sk->sk_bind_node)) {
349 spin_lock(&bsd_socket_locks[sk->sk_hash]);
350 __sk_del_bind_node(sk);
351 spin_unlock(&bsd_socket_locks[sk->sk_hash]);
353 sk_node_init(&sk->sk_bind_node);
357 static struct sock *__unix_find_socket_byname(struct net *net,
358 struct sockaddr_un *sunname,
359 int len, unsigned int hash)
363 sk_for_each(s, &net->unx.table.buckets[hash]) {
364 struct unix_sock *u = unix_sk(s);
366 if (u->addr->len == len &&
367 !memcmp(u->addr->name, sunname, len))
373 static inline struct sock *unix_find_socket_byname(struct net *net,
374 struct sockaddr_un *sunname,
375 int len, unsigned int hash)
379 spin_lock(&net->unx.table.locks[hash]);
380 s = __unix_find_socket_byname(net, sunname, len, hash);
383 spin_unlock(&net->unx.table.locks[hash]);
387 static struct sock *unix_find_socket_byinode(struct inode *i)
389 unsigned int hash = unix_bsd_hash(i);
392 spin_lock(&bsd_socket_locks[hash]);
393 sk_for_each_bound(s, &bsd_socket_buckets[hash]) {
394 struct dentry *dentry = unix_sk(s)->path.dentry;
396 if (dentry && d_backing_inode(dentry) == i) {
398 spin_unlock(&bsd_socket_locks[hash]);
402 spin_unlock(&bsd_socket_locks[hash]);
406 /* Support code for asymmetrically connected dgram sockets
408 * If a datagram socket is connected to a socket not itself connected
409 * to the first socket (eg, /dev/log), clients may only enqueue more
410 * messages if the present receive queue of the server socket is not
411 * "too large". This means there's a second writeability condition
412 * poll and sendmsg need to test. The dgram recv code will do a wake
413 * up on the peer_wait wait queue of a socket upon reception of a
414 * datagram which needs to be propagated to sleeping would-be writers
415 * since these might not have sent anything so far. This can't be
416 * accomplished via poll_wait because the lifetime of the server
417 * socket might be less than that of its clients if these break their
418 * association with it or if the server socket is closed while clients
419 * are still connected to it and there's no way to inform "a polling
420 * implementation" that it should let go of a certain wait queue
422 * In order to propagate a wake up, a wait_queue_entry_t of the client
423 * socket is enqueued on the peer_wait queue of the server socket
424 * whose wake function does a wake_up on the ordinary client socket
425 * wait queue. This connection is established whenever a write (or
426 * poll for write) hit the flow control condition and broken when the
427 * association to the server socket is dissolved or after a wake up
431 static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
435 wait_queue_head_t *u_sleep;
437 u = container_of(q, struct unix_sock, peer_wake);
439 __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
441 u->peer_wake.private = NULL;
443 /* relaying can only happen while the wq still exists */
444 u_sleep = sk_sleep(&u->sk);
446 wake_up_interruptible_poll(u_sleep, key_to_poll(key));
451 static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
453 struct unix_sock *u, *u_other;
457 u_other = unix_sk(other);
459 spin_lock(&u_other->peer_wait.lock);
461 if (!u->peer_wake.private) {
462 u->peer_wake.private = other;
463 __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
468 spin_unlock(&u_other->peer_wait.lock);
472 static void unix_dgram_peer_wake_disconnect(struct sock *sk,
475 struct unix_sock *u, *u_other;
478 u_other = unix_sk(other);
479 spin_lock(&u_other->peer_wait.lock);
481 if (u->peer_wake.private == other) {
482 __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
483 u->peer_wake.private = NULL;
486 spin_unlock(&u_other->peer_wait.lock);
489 static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
492 unix_dgram_peer_wake_disconnect(sk, other);
493 wake_up_interruptible_poll(sk_sleep(sk),
500 * - unix_peer(sk) == other
501 * - association is stable
503 static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
507 connected = unix_dgram_peer_wake_connect(sk, other);
509 /* If other is SOCK_DEAD, we want to make sure we signal
510 * POLLOUT, such that a subsequent write() can get a
511 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
512 * to other and its full, we will hang waiting for POLLOUT.
514 if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD))
518 unix_dgram_peer_wake_disconnect(sk, other);
523 static int unix_writable(const struct sock *sk)
525 return sk->sk_state != TCP_LISTEN &&
526 (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
529 static void unix_write_space(struct sock *sk)
531 struct socket_wq *wq;
534 if (unix_writable(sk)) {
535 wq = rcu_dereference(sk->sk_wq);
536 if (skwq_has_sleeper(wq))
537 wake_up_interruptible_sync_poll(&wq->wait,
538 EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
539 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
544 /* When dgram socket disconnects (or changes its peer), we clear its receive
545 * queue of packets arrived from previous peer. First, it allows to do
546 * flow control based only on wmem_alloc; second, sk connected to peer
547 * may receive messages only from that peer. */
548 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
550 if (!skb_queue_empty(&sk->sk_receive_queue)) {
551 skb_queue_purge(&sk->sk_receive_queue);
552 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
554 /* If one link of bidirectional dgram pipe is disconnected,
555 * we signal error. Messages are lost. Do not make this,
556 * when peer was not connected to us.
558 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
559 other->sk_err = ECONNRESET;
560 sk_error_report(other);
563 other->sk_state = TCP_CLOSE;
566 static void unix_sock_destructor(struct sock *sk)
568 struct unix_sock *u = unix_sk(sk);
570 skb_queue_purge(&sk->sk_receive_queue);
572 DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
573 DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
574 DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
575 if (!sock_flag(sk, SOCK_DEAD)) {
576 pr_info("Attempt to release alive unix socket: %p\n", sk);
581 unix_release_addr(u->addr);
583 atomic_long_dec(&unix_nr_socks);
584 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
585 #ifdef UNIX_REFCNT_DEBUG
586 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
587 atomic_long_read(&unix_nr_socks));
591 static void unix_release_sock(struct sock *sk, int embrion)
593 struct unix_sock *u = unix_sk(sk);
599 unix_remove_socket(sock_net(sk), sk);
600 unix_remove_bsd_socket(sk);
605 sk->sk_shutdown = SHUTDOWN_MASK;
607 u->path.dentry = NULL;
609 state = sk->sk_state;
610 sk->sk_state = TCP_CLOSE;
612 skpair = unix_peer(sk);
613 unix_peer(sk) = NULL;
615 unix_state_unlock(sk);
617 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
619 kfree_skb(u->oob_skb);
624 wake_up_interruptible_all(&u->peer_wait);
626 if (skpair != NULL) {
627 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
628 unix_state_lock(skpair);
630 skpair->sk_shutdown = SHUTDOWN_MASK;
631 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
632 skpair->sk_err = ECONNRESET;
633 unix_state_unlock(skpair);
634 skpair->sk_state_change(skpair);
635 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
638 unix_dgram_peer_wake_disconnect(sk, skpair);
639 sock_put(skpair); /* It may now die */
642 /* Try to flush out this socket. Throw out buffers at least */
644 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
645 if (state == TCP_LISTEN)
646 unix_release_sock(skb->sk, 1);
647 /* passed fds are erased in the kfree_skb hook */
648 UNIXCB(skb).consumed = skb->len;
657 /* ---- Socket is dead now and most probably destroyed ---- */
660 * Fixme: BSD difference: In BSD all sockets connected to us get
661 * ECONNRESET and we die on the spot. In Linux we behave
662 * like files and pipes do and wait for the last
665 * Can't we simply set sock->err?
667 * What the above comment does talk about? --ANK(980817)
670 if (unix_tot_inflight)
671 unix_gc(); /* Garbage collect fds */
674 static void init_peercred(struct sock *sk)
676 const struct cred *old_cred;
679 spin_lock(&sk->sk_peer_lock);
680 old_pid = sk->sk_peer_pid;
681 old_cred = sk->sk_peer_cred;
682 sk->sk_peer_pid = get_pid(task_tgid(current));
683 sk->sk_peer_cred = get_current_cred();
684 spin_unlock(&sk->sk_peer_lock);
690 static void copy_peercred(struct sock *sk, struct sock *peersk)
692 const struct cred *old_cred;
696 spin_lock(&sk->sk_peer_lock);
697 spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
699 spin_lock(&peersk->sk_peer_lock);
700 spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
702 old_pid = sk->sk_peer_pid;
703 old_cred = sk->sk_peer_cred;
704 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
705 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
707 spin_unlock(&sk->sk_peer_lock);
708 spin_unlock(&peersk->sk_peer_lock);
714 static int unix_listen(struct socket *sock, int backlog)
717 struct sock *sk = sock->sk;
718 struct unix_sock *u = unix_sk(sk);
721 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
722 goto out; /* Only stream/seqpacket sockets accept */
725 goto out; /* No listens on an unbound socket */
727 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
729 if (backlog > sk->sk_max_ack_backlog)
730 wake_up_interruptible_all(&u->peer_wait);
731 sk->sk_max_ack_backlog = backlog;
732 sk->sk_state = TCP_LISTEN;
733 /* set credentials so connect can copy them */
738 unix_state_unlock(sk);
743 static int unix_release(struct socket *);
744 static int unix_bind(struct socket *, struct sockaddr *, int);
745 static int unix_stream_connect(struct socket *, struct sockaddr *,
746 int addr_len, int flags);
747 static int unix_socketpair(struct socket *, struct socket *);
748 static int unix_accept(struct socket *, struct socket *, int, bool);
749 static int unix_getname(struct socket *, struct sockaddr *, int);
750 static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
751 static __poll_t unix_dgram_poll(struct file *, struct socket *,
753 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
755 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
757 static int unix_shutdown(struct socket *, int);
758 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
759 static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
760 static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset,
761 size_t size, int flags);
762 static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
763 struct pipe_inode_info *, size_t size,
765 static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
766 static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
767 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
768 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
769 static int unix_dgram_connect(struct socket *, struct sockaddr *,
771 static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
772 static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
775 static int unix_set_peek_off(struct sock *sk, int val)
777 struct unix_sock *u = unix_sk(sk);
779 if (mutex_lock_interruptible(&u->iolock))
782 sk->sk_peek_off = val;
783 mutex_unlock(&u->iolock);
788 #ifdef CONFIG_PROC_FS
789 static int unix_count_nr_fds(struct sock *sk)
795 spin_lock(&sk->sk_receive_queue.lock);
796 skb = skb_peek(&sk->sk_receive_queue);
798 u = unix_sk(skb->sk);
799 nr_fds += atomic_read(&u->scm_stat.nr_fds);
800 skb = skb_peek_next(skb, &sk->sk_receive_queue);
802 spin_unlock(&sk->sk_receive_queue.lock);
807 static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
809 struct sock *sk = sock->sk;
815 if (sock->type == SOCK_DGRAM) {
816 nr_fds = atomic_read(&u->scm_stat.nr_fds);
821 if (sk->sk_state != TCP_LISTEN)
822 nr_fds = atomic_read(&u->scm_stat.nr_fds);
824 nr_fds = unix_count_nr_fds(sk);
825 unix_state_unlock(sk);
827 seq_printf(m, "scm_fds: %u\n", nr_fds);
831 #define unix_show_fdinfo NULL
834 static const struct proto_ops unix_stream_ops = {
836 .owner = THIS_MODULE,
837 .release = unix_release,
839 .connect = unix_stream_connect,
840 .socketpair = unix_socketpair,
841 .accept = unix_accept,
842 .getname = unix_getname,
846 .compat_ioctl = unix_compat_ioctl,
848 .listen = unix_listen,
849 .shutdown = unix_shutdown,
850 .sendmsg = unix_stream_sendmsg,
851 .recvmsg = unix_stream_recvmsg,
852 .read_skb = unix_stream_read_skb,
853 .mmap = sock_no_mmap,
854 .sendpage = unix_stream_sendpage,
855 .splice_read = unix_stream_splice_read,
856 .set_peek_off = unix_set_peek_off,
857 .show_fdinfo = unix_show_fdinfo,
860 static const struct proto_ops unix_dgram_ops = {
862 .owner = THIS_MODULE,
863 .release = unix_release,
865 .connect = unix_dgram_connect,
866 .socketpair = unix_socketpair,
867 .accept = sock_no_accept,
868 .getname = unix_getname,
869 .poll = unix_dgram_poll,
872 .compat_ioctl = unix_compat_ioctl,
874 .listen = sock_no_listen,
875 .shutdown = unix_shutdown,
876 .sendmsg = unix_dgram_sendmsg,
877 .read_skb = unix_read_skb,
878 .recvmsg = unix_dgram_recvmsg,
879 .mmap = sock_no_mmap,
880 .sendpage = sock_no_sendpage,
881 .set_peek_off = unix_set_peek_off,
882 .show_fdinfo = unix_show_fdinfo,
885 static const struct proto_ops unix_seqpacket_ops = {
887 .owner = THIS_MODULE,
888 .release = unix_release,
890 .connect = unix_stream_connect,
891 .socketpair = unix_socketpair,
892 .accept = unix_accept,
893 .getname = unix_getname,
894 .poll = unix_dgram_poll,
897 .compat_ioctl = unix_compat_ioctl,
899 .listen = unix_listen,
900 .shutdown = unix_shutdown,
901 .sendmsg = unix_seqpacket_sendmsg,
902 .recvmsg = unix_seqpacket_recvmsg,
903 .mmap = sock_no_mmap,
904 .sendpage = sock_no_sendpage,
905 .set_peek_off = unix_set_peek_off,
906 .show_fdinfo = unix_show_fdinfo,
909 static void unix_close(struct sock *sk, long timeout)
911 /* Nothing to do here, unix socket does not need a ->close().
912 * This is merely for sockmap.
916 static void unix_unhash(struct sock *sk)
918 /* Nothing to do here, unix socket does not need a ->unhash().
919 * This is merely for sockmap.
923 struct proto unix_dgram_proto = {
925 .owner = THIS_MODULE,
926 .obj_size = sizeof(struct unix_sock),
928 #ifdef CONFIG_BPF_SYSCALL
929 .psock_update_sk_prot = unix_dgram_bpf_update_proto,
933 struct proto unix_stream_proto = {
934 .name = "UNIX-STREAM",
935 .owner = THIS_MODULE,
936 .obj_size = sizeof(struct unix_sock),
938 .unhash = unix_unhash,
939 #ifdef CONFIG_BPF_SYSCALL
940 .psock_update_sk_prot = unix_stream_bpf_update_proto,
944 static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)
950 atomic_long_inc(&unix_nr_socks);
951 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {
956 if (type == SOCK_STREAM)
957 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);
958 else /*dgram and seqpacket */
959 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);
966 sock_init_data(sock, sk);
968 sk->sk_hash = unix_unbound_hash(sk);
969 sk->sk_allocation = GFP_KERNEL_ACCOUNT;
970 sk->sk_write_space = unix_write_space;
971 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
972 sk->sk_destruct = unix_sock_destructor;
974 u->path.dentry = NULL;
976 spin_lock_init(&u->lock);
977 atomic_long_set(&u->inflight, 0);
978 INIT_LIST_HEAD(&u->link);
979 mutex_init(&u->iolock); /* single task reading lock */
980 mutex_init(&u->bindlock); /* single task binding lock */
981 init_waitqueue_head(&u->peer_wait);
982 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
983 memset(&u->scm_stat, 0, sizeof(struct scm_stat));
984 unix_insert_unbound_socket(net, sk);
986 sock_prot_inuse_add(net, sk->sk_prot, 1);
991 atomic_long_dec(&unix_nr_socks);
995 static int unix_create(struct net *net, struct socket *sock, int protocol,
1000 if (protocol && protocol != PF_UNIX)
1001 return -EPROTONOSUPPORT;
1003 sock->state = SS_UNCONNECTED;
1005 switch (sock->type) {
1007 sock->ops = &unix_stream_ops;
1010 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
1014 sock->type = SOCK_DGRAM;
1017 sock->ops = &unix_dgram_ops;
1019 case SOCK_SEQPACKET:
1020 sock->ops = &unix_seqpacket_ops;
1023 return -ESOCKTNOSUPPORT;
1026 sk = unix_create1(net, sock, kern, sock->type);
1033 static int unix_release(struct socket *sock)
1035 struct sock *sk = sock->sk;
1040 sk->sk_prot->close(sk, 0);
1041 unix_release_sock(sk, 0);
1047 static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len,
1050 struct inode *inode;
1055 unix_mkname_bsd(sunaddr, addr_len);
1056 err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path);
1060 err = path_permission(&path, MAY_WRITE);
1064 err = -ECONNREFUSED;
1065 inode = d_backing_inode(path.dentry);
1066 if (!S_ISSOCK(inode->i_mode))
1069 sk = unix_find_socket_byinode(inode);
1074 if (sk->sk_type == type)
1088 return ERR_PTR(err);
1091 static struct sock *unix_find_abstract(struct net *net,
1092 struct sockaddr_un *sunaddr,
1093 int addr_len, int type)
1095 unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type);
1096 struct dentry *dentry;
1099 sk = unix_find_socket_byname(net, sunaddr, addr_len, hash);
1101 return ERR_PTR(-ECONNREFUSED);
1103 dentry = unix_sk(sk)->path.dentry;
1105 touch_atime(&unix_sk(sk)->path);
1110 static struct sock *unix_find_other(struct net *net,
1111 struct sockaddr_un *sunaddr,
1112 int addr_len, int type)
1116 if (sunaddr->sun_path[0])
1117 sk = unix_find_bsd(sunaddr, addr_len, type);
1119 sk = unix_find_abstract(net, sunaddr, addr_len, type);
1124 static int unix_autobind(struct sock *sk)
1126 unsigned int new_hash, old_hash = sk->sk_hash;
1127 struct unix_sock *u = unix_sk(sk);
1128 struct net *net = sock_net(sk);
1129 struct unix_address *addr;
1130 u32 lastnum, ordernum;
1133 err = mutex_lock_interruptible(&u->bindlock);
1141 addr = kzalloc(sizeof(*addr) +
1142 offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL);
1146 addr->len = offsetof(struct sockaddr_un, sun_path) + 6;
1147 addr->name->sun_family = AF_UNIX;
1148 refcount_set(&addr->refcnt, 1);
1150 ordernum = get_random_u32();
1151 lastnum = ordernum & 0xFFFFF;
1153 ordernum = (ordernum + 1) & 0xFFFFF;
1154 sprintf(addr->name->sun_path + 1, "%05x", ordernum);
1156 new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1157 unix_table_double_lock(net, old_hash, new_hash);
1159 if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) {
1160 unix_table_double_unlock(net, old_hash, new_hash);
1162 /* __unix_find_socket_byname() may take long time if many names
1163 * are already in use.
1167 if (ordernum == lastnum) {
1168 /* Give up if all names seems to be in use. */
1170 unix_release_addr(addr);
1177 __unix_set_addr_hash(net, sk, addr, new_hash);
1178 unix_table_double_unlock(net, old_hash, new_hash);
1181 out: mutex_unlock(&u->bindlock);
1185 static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
1188 umode_t mode = S_IFSOCK |
1189 (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
1190 unsigned int new_hash, old_hash = sk->sk_hash;
1191 struct unix_sock *u = unix_sk(sk);
1192 struct net *net = sock_net(sk);
1193 struct user_namespace *ns; // barf...
1194 struct unix_address *addr;
1195 struct dentry *dentry;
1199 unix_mkname_bsd(sunaddr, addr_len);
1200 addr_len = strlen(sunaddr->sun_path) +
1201 offsetof(struct sockaddr_un, sun_path) + 1;
1203 addr = unix_create_addr(sunaddr, addr_len);
1208 * Get the parent directory, calculate the hash for last
1211 dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0);
1212 if (IS_ERR(dentry)) {
1213 err = PTR_ERR(dentry);
1218 * All right, let's create it.
1220 ns = mnt_user_ns(parent.mnt);
1221 err = security_path_mknod(&parent, dentry, mode, 0);
1223 err = vfs_mknod(ns, d_inode(parent.dentry), dentry, mode, 0);
1226 err = mutex_lock_interruptible(&u->bindlock);
1232 new_hash = unix_bsd_hash(d_backing_inode(dentry));
1233 unix_table_double_lock(net, old_hash, new_hash);
1234 u->path.mnt = mntget(parent.mnt);
1235 u->path.dentry = dget(dentry);
1236 __unix_set_addr_hash(net, sk, addr, new_hash);
1237 unix_table_double_unlock(net, old_hash, new_hash);
1238 unix_insert_bsd_socket(sk);
1239 mutex_unlock(&u->bindlock);
1240 done_path_create(&parent, dentry);
1244 mutex_unlock(&u->bindlock);
1247 /* failed after successful mknod? unlink what we'd created... */
1248 vfs_unlink(ns, d_inode(parent.dentry), dentry, NULL);
1250 done_path_create(&parent, dentry);
1252 unix_release_addr(addr);
1253 return err == -EEXIST ? -EADDRINUSE : err;
1256 static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
1259 unsigned int new_hash, old_hash = sk->sk_hash;
1260 struct unix_sock *u = unix_sk(sk);
1261 struct net *net = sock_net(sk);
1262 struct unix_address *addr;
1265 addr = unix_create_addr(sunaddr, addr_len);
1269 err = mutex_lock_interruptible(&u->bindlock);
1278 new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1279 unix_table_double_lock(net, old_hash, new_hash);
1281 if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash))
1284 __unix_set_addr_hash(net, sk, addr, new_hash);
1285 unix_table_double_unlock(net, old_hash, new_hash);
1286 mutex_unlock(&u->bindlock);
1290 unix_table_double_unlock(net, old_hash, new_hash);
1293 mutex_unlock(&u->bindlock);
1295 unix_release_addr(addr);
1299 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1301 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1302 struct sock *sk = sock->sk;
1305 if (addr_len == offsetof(struct sockaddr_un, sun_path) &&
1306 sunaddr->sun_family == AF_UNIX)
1307 return unix_autobind(sk);
1309 err = unix_validate_addr(sunaddr, addr_len);
1313 if (sunaddr->sun_path[0])
1314 err = unix_bind_bsd(sk, sunaddr, addr_len);
1316 err = unix_bind_abstract(sk, sunaddr, addr_len);
1321 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1323 if (unlikely(sk1 == sk2) || !sk2) {
1324 unix_state_lock(sk1);
1328 unix_state_lock(sk1);
1329 unix_state_lock_nested(sk2);
1331 unix_state_lock(sk2);
1332 unix_state_lock_nested(sk1);
1336 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1338 if (unlikely(sk1 == sk2) || !sk2) {
1339 unix_state_unlock(sk1);
1342 unix_state_unlock(sk1);
1343 unix_state_unlock(sk2);
1346 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1347 int alen, int flags)
1349 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1350 struct sock *sk = sock->sk;
1355 if (alen < offsetofend(struct sockaddr, sa_family))
1358 if (addr->sa_family != AF_UNSPEC) {
1359 err = unix_validate_addr(sunaddr, alen);
1363 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
1364 !unix_sk(sk)->addr) {
1365 err = unix_autobind(sk);
1371 other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type);
1372 if (IS_ERR(other)) {
1373 err = PTR_ERR(other);
1377 unix_state_double_lock(sk, other);
1379 /* Apparently VFS overslept socket death. Retry. */
1380 if (sock_flag(other, SOCK_DEAD)) {
1381 unix_state_double_unlock(sk, other);
1387 if (!unix_may_send(sk, other))
1390 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1394 sk->sk_state = other->sk_state = TCP_ESTABLISHED;
1397 * 1003.1g breaking connected state with AF_UNSPEC
1400 unix_state_double_lock(sk, other);
1404 * If it was connected, reconnect.
1406 if (unix_peer(sk)) {
1407 struct sock *old_peer = unix_peer(sk);
1409 unix_peer(sk) = other;
1411 sk->sk_state = TCP_CLOSE;
1412 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1414 unix_state_double_unlock(sk, other);
1416 if (other != old_peer)
1417 unix_dgram_disconnected(sk, old_peer);
1420 unix_peer(sk) = other;
1421 unix_state_double_unlock(sk, other);
1427 unix_state_double_unlock(sk, other);
1433 static long unix_wait_for_peer(struct sock *other, long timeo)
1434 __releases(&unix_sk(other)->lock)
1436 struct unix_sock *u = unix_sk(other);
1440 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1442 sched = !sock_flag(other, SOCK_DEAD) &&
1443 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1444 unix_recvq_full(other);
1446 unix_state_unlock(other);
1449 timeo = schedule_timeout(timeo);
1451 finish_wait(&u->peer_wait, &wait);
1455 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1456 int addr_len, int flags)
1458 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1459 struct sock *sk = sock->sk, *newsk = NULL, *other = NULL;
1460 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1461 struct net *net = sock_net(sk);
1462 struct sk_buff *skb = NULL;
1467 err = unix_validate_addr(sunaddr, addr_len);
1471 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr) {
1472 err = unix_autobind(sk);
1477 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1479 /* First of all allocate resources.
1480 If we will make it after state is locked,
1481 we will have to recheck all again in any case.
1484 /* create new sock for complete connection */
1485 newsk = unix_create1(net, NULL, 0, sock->type);
1486 if (IS_ERR(newsk)) {
1487 err = PTR_ERR(newsk);
1494 /* Allocate skb for sending to listening sock */
1495 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1500 /* Find listening sock. */
1501 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type);
1502 if (IS_ERR(other)) {
1503 err = PTR_ERR(other);
1508 /* Latch state of peer */
1509 unix_state_lock(other);
1511 /* Apparently VFS overslept socket death. Retry. */
1512 if (sock_flag(other, SOCK_DEAD)) {
1513 unix_state_unlock(other);
1518 err = -ECONNREFUSED;
1519 if (other->sk_state != TCP_LISTEN)
1521 if (other->sk_shutdown & RCV_SHUTDOWN)
1524 if (unix_recvq_full(other)) {
1529 timeo = unix_wait_for_peer(other, timeo);
1531 err = sock_intr_errno(timeo);
1532 if (signal_pending(current))
1540 It is tricky place. We need to grab our state lock and cannot
1541 drop lock on peer. It is dangerous because deadlock is
1542 possible. Connect to self case and simultaneous
1543 attempt to connect are eliminated by checking socket
1544 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1545 check this before attempt to grab lock.
1547 Well, and we have to recheck the state after socket locked.
1553 /* This is ok... continue with connect */
1555 case TCP_ESTABLISHED:
1556 /* Socket is already connected */
1564 unix_state_lock_nested(sk);
1566 if (sk->sk_state != st) {
1567 unix_state_unlock(sk);
1568 unix_state_unlock(other);
1573 err = security_unix_stream_connect(sk, other, newsk);
1575 unix_state_unlock(sk);
1579 /* The way is open! Fastly set all the necessary fields... */
1582 unix_peer(newsk) = sk;
1583 newsk->sk_state = TCP_ESTABLISHED;
1584 newsk->sk_type = sk->sk_type;
1585 init_peercred(newsk);
1586 newu = unix_sk(newsk);
1587 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1588 otheru = unix_sk(other);
1590 /* copy address information from listening to new sock
1592 * The contents of *(otheru->addr) and otheru->path
1593 * are seen fully set up here, since we have found
1594 * otheru in hash under its lock. Insertion into the
1595 * hash chain we'd found it in had been done in an
1596 * earlier critical area protected by the chain's lock,
1597 * the same one where we'd set *(otheru->addr) contents,
1598 * as well as otheru->path and otheru->addr itself.
1600 * Using smp_store_release() here to set newu->addr
1601 * is enough to make those stores, as well as stores
1602 * to newu->path visible to anyone who gets newu->addr
1603 * by smp_load_acquire(). IOW, the same warranties
1604 * as for unix_sock instances bound in unix_bind() or
1605 * in unix_autobind().
1607 if (otheru->path.dentry) {
1608 path_get(&otheru->path);
1609 newu->path = otheru->path;
1611 refcount_inc(&otheru->addr->refcnt);
1612 smp_store_release(&newu->addr, otheru->addr);
1614 /* Set credentials */
1615 copy_peercred(sk, other);
1617 sock->state = SS_CONNECTED;
1618 sk->sk_state = TCP_ESTABLISHED;
1621 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
1622 unix_peer(sk) = newsk;
1624 unix_state_unlock(sk);
1626 /* take ten and send info to listening sock */
1627 spin_lock(&other->sk_receive_queue.lock);
1628 __skb_queue_tail(&other->sk_receive_queue, skb);
1629 spin_unlock(&other->sk_receive_queue.lock);
1630 unix_state_unlock(other);
1631 other->sk_data_ready(other);
1637 unix_state_unlock(other);
1642 unix_release_sock(newsk, 0);
1648 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1650 struct sock *ska = socka->sk, *skb = sockb->sk;
1652 /* Join our sockets back to back */
1655 unix_peer(ska) = skb;
1656 unix_peer(skb) = ska;
1660 ska->sk_state = TCP_ESTABLISHED;
1661 skb->sk_state = TCP_ESTABLISHED;
1662 socka->state = SS_CONNECTED;
1663 sockb->state = SS_CONNECTED;
1667 static void unix_sock_inherit_flags(const struct socket *old,
1670 if (test_bit(SOCK_PASSCRED, &old->flags))
1671 set_bit(SOCK_PASSCRED, &new->flags);
1672 if (test_bit(SOCK_PASSSEC, &old->flags))
1673 set_bit(SOCK_PASSSEC, &new->flags);
1676 static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
1679 struct sock *sk = sock->sk;
1681 struct sk_buff *skb;
1685 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1689 if (sk->sk_state != TCP_LISTEN)
1692 /* If socket state is TCP_LISTEN it cannot change (for now...),
1693 * so that no locks are necessary.
1696 skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
1699 /* This means receive shutdown. */
1706 skb_free_datagram(sk, skb);
1707 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1709 /* attach accepted sock to socket */
1710 unix_state_lock(tsk);
1711 newsock->state = SS_CONNECTED;
1712 unix_sock_inherit_flags(sock, newsock);
1713 sock_graft(tsk, newsock);
1714 unix_state_unlock(tsk);
1722 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1724 struct sock *sk = sock->sk;
1725 struct unix_address *addr;
1726 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1730 sk = unix_peer_get(sk);
1740 addr = smp_load_acquire(&unix_sk(sk)->addr);
1742 sunaddr->sun_family = AF_UNIX;
1743 sunaddr->sun_path[0] = 0;
1744 err = offsetof(struct sockaddr_un, sun_path);
1747 memcpy(sunaddr, addr->name, addr->len);
1754 static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
1756 scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1759 * Garbage collection of unix sockets starts by selecting a set of
1760 * candidate sockets which have reference only from being in flight
1761 * (total_refs == inflight_refs). This condition is checked once during
1762 * the candidate collection phase, and candidates are marked as such, so
1763 * that non-candidates can later be ignored. While inflight_refs is
1764 * protected by unix_gc_lock, total_refs (file count) is not, hence this
1765 * is an instantaneous decision.
1767 * Once a candidate, however, the socket must not be reinstalled into a
1768 * file descriptor while the garbage collection is in progress.
1770 * If the above conditions are met, then the directed graph of
1771 * candidates (*) does not change while unix_gc_lock is held.
1773 * Any operations that changes the file count through file descriptors
1774 * (dup, close, sendmsg) does not change the graph since candidates are
1775 * not installed in fds.
1777 * Dequeing a candidate via recvmsg would install it into an fd, but
1778 * that takes unix_gc_lock to decrement the inflight count, so it's
1779 * serialized with garbage collection.
1781 * MSG_PEEK is special in that it does not change the inflight count,
1782 * yet does install the socket into an fd. The following lock/unlock
1783 * pair is to ensure serialization with garbage collection. It must be
1784 * done between incrementing the file count and installing the file into
1787 * If garbage collection starts after the barrier provided by the
1788 * lock/unlock, then it will see the elevated refcount and not mark this
1789 * as a candidate. If a garbage collection is already in progress
1790 * before the file count was incremented, then the lock/unlock pair will
1791 * ensure that garbage collection is finished before progressing to
1792 * installing the fd.
1794 * (*) A -> B where B is on the queue of A or B is on the queue of C
1795 * which is on the queue of listening socket A.
1797 spin_lock(&unix_gc_lock);
1798 spin_unlock(&unix_gc_lock);
1801 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1805 UNIXCB(skb).pid = get_pid(scm->pid);
1806 UNIXCB(skb).uid = scm->creds.uid;
1807 UNIXCB(skb).gid = scm->creds.gid;
1808 UNIXCB(skb).fp = NULL;
1809 unix_get_secdata(scm, skb);
1810 if (scm->fp && send_fds)
1811 err = unix_attach_fds(scm, skb);
1813 skb->destructor = unix_destruct_scm;
1817 static bool unix_passcred_enabled(const struct socket *sock,
1818 const struct sock *other)
1820 return test_bit(SOCK_PASSCRED, &sock->flags) ||
1821 !other->sk_socket ||
1822 test_bit(SOCK_PASSCRED, &other->sk_socket->flags);
1826 * Some apps rely on write() giving SCM_CREDENTIALS
1827 * We include credentials if source or destination socket
1828 * asserted SOCK_PASSCRED.
1830 static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1831 const struct sock *other)
1833 if (UNIXCB(skb).pid)
1835 if (unix_passcred_enabled(sock, other)) {
1836 UNIXCB(skb).pid = get_pid(task_tgid(current));
1837 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1841 static int maybe_init_creds(struct scm_cookie *scm,
1842 struct socket *socket,
1843 const struct sock *other)
1846 struct msghdr msg = { .msg_controllen = 0 };
1848 err = scm_send(socket, &msg, scm, false);
1852 if (unix_passcred_enabled(socket, other)) {
1853 scm->pid = get_pid(task_tgid(current));
1854 current_uid_gid(&scm->creds.uid, &scm->creds.gid);
1859 static bool unix_skb_scm_eq(struct sk_buff *skb,
1860 struct scm_cookie *scm)
1862 return UNIXCB(skb).pid == scm->pid &&
1863 uid_eq(UNIXCB(skb).uid, scm->creds.uid) &&
1864 gid_eq(UNIXCB(skb).gid, scm->creds.gid) &&
1865 unix_secdata_eq(scm, skb);
1868 static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
1870 struct scm_fp_list *fp = UNIXCB(skb).fp;
1871 struct unix_sock *u = unix_sk(sk);
1873 if (unlikely(fp && fp->count))
1874 atomic_add(fp->count, &u->scm_stat.nr_fds);
1877 static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
1879 struct scm_fp_list *fp = UNIXCB(skb).fp;
1880 struct unix_sock *u = unix_sk(sk);
1882 if (unlikely(fp && fp->count))
1883 atomic_sub(fp->count, &u->scm_stat.nr_fds);
1887 * Send AF_UNIX data.
1890 static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1893 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1894 struct sock *sk = sock->sk, *other = NULL;
1895 struct unix_sock *u = unix_sk(sk);
1896 struct scm_cookie scm;
1897 struct sk_buff *skb;
1904 err = scm_send(sock, msg, &scm, false);
1909 if (msg->msg_flags&MSG_OOB)
1912 if (msg->msg_namelen) {
1913 err = unix_validate_addr(sunaddr, msg->msg_namelen);
1919 other = unix_peer_get(sk);
1924 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr) {
1925 err = unix_autobind(sk);
1931 if (len > sk->sk_sndbuf - 32)
1934 if (len > SKB_MAX_ALLOC) {
1935 data_len = min_t(size_t,
1936 len - SKB_MAX_ALLOC,
1937 MAX_SKB_FRAGS * PAGE_SIZE);
1938 data_len = PAGE_ALIGN(data_len);
1940 BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1943 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1944 msg->msg_flags & MSG_DONTWAIT, &err,
1945 PAGE_ALLOC_COSTLY_ORDER);
1949 err = unix_scm_to_skb(&scm, skb, true);
1953 skb_put(skb, len - data_len);
1954 skb->data_len = data_len;
1956 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1960 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1965 if (sunaddr == NULL)
1968 other = unix_find_other(sock_net(sk), sunaddr, msg->msg_namelen,
1970 if (IS_ERR(other)) {
1971 err = PTR_ERR(other);
1977 if (sk_filter(other, skb) < 0) {
1978 /* Toss the packet but do not return any error to the sender */
1984 unix_state_lock(other);
1987 if (!unix_may_send(sk, other))
1990 if (unlikely(sock_flag(other, SOCK_DEAD))) {
1992 * Check with 1003.1g - what should
1995 unix_state_unlock(other);
1999 unix_state_lock(sk);
2002 if (sk->sk_type == SOCK_SEQPACKET) {
2003 /* We are here only when racing with unix_release_sock()
2004 * is clearing @other. Never change state to TCP_CLOSE
2005 * unlike SOCK_DGRAM wants.
2007 unix_state_unlock(sk);
2009 } else if (unix_peer(sk) == other) {
2010 unix_peer(sk) = NULL;
2011 unix_dgram_peer_wake_disconnect_wakeup(sk, other);
2013 sk->sk_state = TCP_CLOSE;
2014 unix_state_unlock(sk);
2016 unix_dgram_disconnected(sk, other);
2018 err = -ECONNREFUSED;
2020 unix_state_unlock(sk);
2030 if (other->sk_shutdown & RCV_SHUTDOWN)
2033 if (sk->sk_type != SOCK_SEQPACKET) {
2034 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
2039 /* other == sk && unix_peer(other) != sk if
2040 * - unix_peer(sk) == NULL, destination address bound to sk
2041 * - unix_peer(sk) == sk by time of get but disconnected before lock
2044 unlikely(unix_peer(other) != sk &&
2045 unix_recvq_full_lockless(other))) {
2047 timeo = unix_wait_for_peer(other, timeo);
2049 err = sock_intr_errno(timeo);
2050 if (signal_pending(current))
2057 unix_state_unlock(other);
2058 unix_state_double_lock(sk, other);
2061 if (unix_peer(sk) != other ||
2062 unix_dgram_peer_wake_me(sk, other)) {
2070 goto restart_locked;
2074 if (unlikely(sk_locked))
2075 unix_state_unlock(sk);
2077 if (sock_flag(other, SOCK_RCVTSTAMP))
2078 __net_timestamp(skb);
2079 maybe_add_creds(skb, sock, other);
2080 scm_stat_add(other, skb);
2081 skb_queue_tail(&other->sk_receive_queue, skb);
2082 unix_state_unlock(other);
2083 other->sk_data_ready(other);
2090 unix_state_unlock(sk);
2091 unix_state_unlock(other);
2101 /* We use paged skbs for stream sockets, and limit occupancy to 32768
2102 * bytes, and a minimum of a full page.
2104 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
2106 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2107 static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other,
2108 struct scm_cookie *scm, bool fds_sent)
2110 struct unix_sock *ousk = unix_sk(other);
2111 struct sk_buff *skb;
2114 skb = sock_alloc_send_skb(sock->sk, 1, msg->msg_flags & MSG_DONTWAIT, &err);
2119 err = unix_scm_to_skb(scm, skb, !fds_sent);
2125 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
2132 unix_state_lock(other);
2134 if (sock_flag(other, SOCK_DEAD) ||
2135 (other->sk_shutdown & RCV_SHUTDOWN)) {
2136 unix_state_unlock(other);
2141 maybe_add_creds(skb, sock, other);
2145 consume_skb(ousk->oob_skb);
2147 WRITE_ONCE(ousk->oob_skb, skb);
2149 scm_stat_add(other, skb);
2150 skb_queue_tail(&other->sk_receive_queue, skb);
2151 sk_send_sigurg(other);
2152 unix_state_unlock(other);
2153 other->sk_data_ready(other);
2159 static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
2162 struct sock *sk = sock->sk;
2163 struct sock *other = NULL;
2165 struct sk_buff *skb;
2167 struct scm_cookie scm;
2168 bool fds_sent = false;
2172 err = scm_send(sock, msg, &scm, false);
2177 if (msg->msg_flags & MSG_OOB) {
2178 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2186 if (msg->msg_namelen) {
2187 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
2191 other = unix_peer(sk);
2196 if (sk->sk_shutdown & SEND_SHUTDOWN)
2199 while (sent < len) {
2202 /* Keep two messages in the pipe so it schedules better */
2203 size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
2205 /* allow fallback to order-0 allocations */
2206 size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
2208 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
2210 data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
2212 skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
2213 msg->msg_flags & MSG_DONTWAIT, &err,
2214 get_order(UNIX_SKB_FRAGS_SZ));
2218 /* Only send the fds in the first buffer */
2219 err = unix_scm_to_skb(&scm, skb, !fds_sent);
2226 skb_put(skb, size - data_len);
2227 skb->data_len = data_len;
2229 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
2235 unix_state_lock(other);
2237 if (sock_flag(other, SOCK_DEAD) ||
2238 (other->sk_shutdown & RCV_SHUTDOWN))
2241 maybe_add_creds(skb, sock, other);
2242 scm_stat_add(other, skb);
2243 skb_queue_tail(&other->sk_receive_queue, skb);
2244 unix_state_unlock(other);
2245 other->sk_data_ready(other);
2249 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2250 if (msg->msg_flags & MSG_OOB) {
2251 err = queue_oob(sock, msg, other, &scm, fds_sent);
2263 unix_state_unlock(other);
2266 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
2267 send_sig(SIGPIPE, current, 0);
2271 return sent ? : err;
2274 static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
2275 int offset, size_t size, int flags)
2278 bool send_sigpipe = false;
2279 bool init_scm = true;
2280 struct scm_cookie scm;
2281 struct sock *other, *sk = socket->sk;
2282 struct sk_buff *skb, *newskb = NULL, *tail = NULL;
2284 if (flags & MSG_OOB)
2287 other = unix_peer(sk);
2288 if (!other || sk->sk_state != TCP_ESTABLISHED)
2293 unix_state_unlock(other);
2294 mutex_unlock(&unix_sk(other)->iolock);
2295 newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
2301 /* we must acquire iolock as we modify already present
2302 * skbs in the sk_receive_queue and mess with skb->len
2304 err = mutex_lock_interruptible(&unix_sk(other)->iolock);
2306 err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
2310 if (sk->sk_shutdown & SEND_SHUTDOWN) {
2312 send_sigpipe = true;
2316 unix_state_lock(other);
2318 if (sock_flag(other, SOCK_DEAD) ||
2319 other->sk_shutdown & RCV_SHUTDOWN) {
2321 send_sigpipe = true;
2322 goto err_state_unlock;
2326 err = maybe_init_creds(&scm, socket, other);
2328 goto err_state_unlock;
2332 skb = skb_peek_tail(&other->sk_receive_queue);
2333 if (tail && tail == skb) {
2335 } else if (!skb || !unix_skb_scm_eq(skb, &scm)) {
2342 } else if (newskb) {
2343 /* this is fast path, we don't necessarily need to
2344 * call to kfree_skb even though with newskb == NULL
2345 * this - does no harm
2347 consume_skb(newskb);
2351 if (skb_append_pagefrags(skb, page, offset, size)) {
2357 skb->data_len += size;
2358 skb->truesize += size;
2359 refcount_add(size, &sk->sk_wmem_alloc);
2362 err = unix_scm_to_skb(&scm, skb, false);
2364 goto err_state_unlock;
2365 spin_lock(&other->sk_receive_queue.lock);
2366 __skb_queue_tail(&other->sk_receive_queue, newskb);
2367 spin_unlock(&other->sk_receive_queue.lock);
2370 unix_state_unlock(other);
2371 mutex_unlock(&unix_sk(other)->iolock);
2373 other->sk_data_ready(other);
2378 unix_state_unlock(other);
2380 mutex_unlock(&unix_sk(other)->iolock);
2383 if (send_sigpipe && !(flags & MSG_NOSIGNAL))
2384 send_sig(SIGPIPE, current, 0);
2390 static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2394 struct sock *sk = sock->sk;
2396 err = sock_error(sk);
2400 if (sk->sk_state != TCP_ESTABLISHED)
2403 if (msg->msg_namelen)
2404 msg->msg_namelen = 0;
2406 return unix_dgram_sendmsg(sock, msg, len);
2409 static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2410 size_t size, int flags)
2412 struct sock *sk = sock->sk;
2414 if (sk->sk_state != TCP_ESTABLISHED)
2417 return unix_dgram_recvmsg(sock, msg, size, flags);
2420 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2422 struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2425 msg->msg_namelen = addr->len;
2426 memcpy(msg->msg_name, addr->name, addr->len);
2430 int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
2433 struct scm_cookie scm;
2434 struct socket *sock = sk->sk_socket;
2435 struct unix_sock *u = unix_sk(sk);
2436 struct sk_buff *skb, *last;
2445 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2448 mutex_lock(&u->iolock);
2450 skip = sk_peek_offset(sk, flags);
2451 skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
2452 &skip, &err, &last);
2454 if (!(flags & MSG_PEEK))
2455 scm_stat_del(sk, skb);
2459 mutex_unlock(&u->iolock);
2464 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
2465 &err, &timeo, last));
2467 if (!skb) { /* implies iolock unlocked */
2468 unix_state_lock(sk);
2469 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2470 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2471 (sk->sk_shutdown & RCV_SHUTDOWN))
2473 unix_state_unlock(sk);
2477 if (wq_has_sleeper(&u->peer_wait))
2478 wake_up_interruptible_sync_poll(&u->peer_wait,
2479 EPOLLOUT | EPOLLWRNORM |
2483 unix_copy_addr(msg, skb->sk);
2485 if (size > skb->len - skip)
2486 size = skb->len - skip;
2487 else if (size < skb->len - skip)
2488 msg->msg_flags |= MSG_TRUNC;
2490 err = skb_copy_datagram_msg(skb, skip, msg, size);
2494 if (sock_flag(sk, SOCK_RCVTSTAMP))
2495 __sock_recv_timestamp(msg, sk, skb);
2497 memset(&scm, 0, sizeof(scm));
2499 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2500 unix_set_secdata(&scm, skb);
2502 if (!(flags & MSG_PEEK)) {
2504 unix_detach_fds(&scm, skb);
2506 sk_peek_offset_bwd(sk, skb->len);
2508 /* It is questionable: on PEEK we could:
2509 - do not return fds - good, but too simple 8)
2510 - return fds, and do not return them on read (old strategy,
2512 - clone fds (I chose it for now, it is the most universal
2515 POSIX 1003.1g does not actually define this clearly
2516 at all. POSIX 1003.1g doesn't define a lot of things
2521 sk_peek_offset_fwd(sk, size);
2524 unix_peek_fds(&scm, skb);
2526 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2528 scm_recv(sock, msg, &scm, flags);
2531 skb_free_datagram(sk, skb);
2532 mutex_unlock(&u->iolock);
2537 static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2540 struct sock *sk = sock->sk;
2542 #ifdef CONFIG_BPF_SYSCALL
2543 const struct proto *prot = READ_ONCE(sk->sk_prot);
2545 if (prot != &unix_dgram_proto)
2546 return prot->recvmsg(sk, msg, size, flags, NULL);
2548 return __unix_dgram_recvmsg(sk, msg, size, flags);
2551 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2553 struct unix_sock *u = unix_sk(sk);
2554 struct sk_buff *skb;
2557 mutex_lock(&u->iolock);
2558 skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
2559 mutex_unlock(&u->iolock);
2563 copied = recv_actor(sk, skb);
2570 * Sleep until more data has arrived. But check for races..
2572 static long unix_stream_data_wait(struct sock *sk, long timeo,
2573 struct sk_buff *last, unsigned int last_len,
2576 unsigned int state = TASK_INTERRUPTIBLE | freezable * TASK_FREEZABLE;
2577 struct sk_buff *tail;
2580 unix_state_lock(sk);
2583 prepare_to_wait(sk_sleep(sk), &wait, state);
2585 tail = skb_peek_tail(&sk->sk_receive_queue);
2587 (tail && tail->len != last_len) ||
2589 (sk->sk_shutdown & RCV_SHUTDOWN) ||
2590 signal_pending(current) ||
2594 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2595 unix_state_unlock(sk);
2596 timeo = schedule_timeout(timeo);
2597 unix_state_lock(sk);
2599 if (sock_flag(sk, SOCK_DEAD))
2602 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2605 finish_wait(sk_sleep(sk), &wait);
2606 unix_state_unlock(sk);
2610 static unsigned int unix_skb_len(const struct sk_buff *skb)
2612 return skb->len - UNIXCB(skb).consumed;
2615 struct unix_stream_read_state {
2616 int (*recv_actor)(struct sk_buff *, int, int,
2617 struct unix_stream_read_state *);
2618 struct socket *socket;
2620 struct pipe_inode_info *pipe;
2623 unsigned int splice_flags;
2626 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2627 static int unix_stream_recv_urg(struct unix_stream_read_state *state)
2629 struct socket *sock = state->socket;
2630 struct sock *sk = sock->sk;
2631 struct unix_sock *u = unix_sk(sk);
2633 struct sk_buff *oob_skb;
2635 mutex_lock(&u->iolock);
2636 unix_state_lock(sk);
2638 if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
2639 unix_state_unlock(sk);
2640 mutex_unlock(&u->iolock);
2644 oob_skb = u->oob_skb;
2646 if (!(state->flags & MSG_PEEK))
2647 WRITE_ONCE(u->oob_skb, NULL);
2649 unix_state_unlock(sk);
2651 chunk = state->recv_actor(oob_skb, 0, chunk, state);
2653 if (!(state->flags & MSG_PEEK)) {
2654 UNIXCB(oob_skb).consumed += 1;
2658 mutex_unlock(&u->iolock);
2663 state->msg->msg_flags |= MSG_OOB;
2667 static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
2668 int flags, int copied)
2670 struct unix_sock *u = unix_sk(sk);
2672 if (!unix_skb_len(skb) && !(flags & MSG_PEEK)) {
2673 skb_unlink(skb, &sk->sk_receive_queue);
2677 if (skb == u->oob_skb) {
2680 } else if (sock_flag(sk, SOCK_URGINLINE)) {
2681 if (!(flags & MSG_PEEK)) {
2682 WRITE_ONCE(u->oob_skb, NULL);
2685 } else if (!(flags & MSG_PEEK)) {
2686 skb_unlink(skb, &sk->sk_receive_queue);
2688 skb = skb_peek(&sk->sk_receive_queue);
2696 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2698 if (unlikely(sk->sk_state != TCP_ESTABLISHED))
2701 return unix_read_skb(sk, recv_actor);
2704 static int unix_stream_read_generic(struct unix_stream_read_state *state,
2707 struct scm_cookie scm;
2708 struct socket *sock = state->socket;
2709 struct sock *sk = sock->sk;
2710 struct unix_sock *u = unix_sk(sk);
2712 int flags = state->flags;
2713 int noblock = flags & MSG_DONTWAIT;
2714 bool check_creds = false;
2719 size_t size = state->size;
2720 unsigned int last_len;
2722 if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
2727 if (unlikely(flags & MSG_OOB)) {
2729 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2730 err = unix_stream_recv_urg(state);
2735 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2736 timeo = sock_rcvtimeo(sk, noblock);
2738 memset(&scm, 0, sizeof(scm));
2740 /* Lock the socket to prevent queue disordering
2741 * while sleeps in memcpy_tomsg
2743 mutex_lock(&u->iolock);
2745 skip = max(sk_peek_offset(sk, flags), 0);
2750 struct sk_buff *skb, *last;
2753 unix_state_lock(sk);
2754 if (sock_flag(sk, SOCK_DEAD)) {
2758 last = skb = skb_peek(&sk->sk_receive_queue);
2759 last_len = last ? last->len : 0;
2761 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2763 skb = manage_oob(skb, sk, flags, copied);
2765 unix_state_unlock(sk);
2774 if (copied >= target)
2778 * POSIX 1003.1g mandates this order.
2781 err = sock_error(sk);
2784 if (sk->sk_shutdown & RCV_SHUTDOWN)
2787 unix_state_unlock(sk);
2793 mutex_unlock(&u->iolock);
2795 timeo = unix_stream_data_wait(sk, timeo, last,
2796 last_len, freezable);
2798 if (signal_pending(current)) {
2799 err = sock_intr_errno(timeo);
2804 mutex_lock(&u->iolock);
2807 unix_state_unlock(sk);
2811 while (skip >= unix_skb_len(skb)) {
2812 skip -= unix_skb_len(skb);
2814 last_len = skb->len;
2815 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2820 unix_state_unlock(sk);
2823 /* Never glue messages from different writers */
2824 if (!unix_skb_scm_eq(skb, &scm))
2826 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
2827 /* Copy credentials */
2828 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2829 unix_set_secdata(&scm, skb);
2833 /* Copy address just once */
2834 if (state->msg && state->msg->msg_name) {
2835 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2836 state->msg->msg_name);
2837 unix_copy_addr(state->msg, skb->sk);
2841 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2843 chunk = state->recv_actor(skb, skip, chunk, state);
2844 drop_skb = !unix_skb_len(skb);
2845 /* skb is only safe to use if !drop_skb */
2856 /* the skb was touched by a concurrent reader;
2857 * we should not expect anything from this skb
2858 * anymore and assume it invalid - we can be
2859 * sure it was dropped from the socket queue
2861 * let's report a short read
2867 /* Mark read part of skb as used */
2868 if (!(flags & MSG_PEEK)) {
2869 UNIXCB(skb).consumed += chunk;
2871 sk_peek_offset_bwd(sk, chunk);
2873 if (UNIXCB(skb).fp) {
2874 scm_stat_del(sk, skb);
2875 unix_detach_fds(&scm, skb);
2878 if (unix_skb_len(skb))
2881 skb_unlink(skb, &sk->sk_receive_queue);
2887 /* It is questionable, see note in unix_dgram_recvmsg.
2890 unix_peek_fds(&scm, skb);
2892 sk_peek_offset_fwd(sk, chunk);
2899 last_len = skb->len;
2900 unix_state_lock(sk);
2901 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2904 unix_state_unlock(sk);
2909 mutex_unlock(&u->iolock);
2911 scm_recv(sock, state->msg, &scm, flags);
2915 return copied ? : err;
2918 static int unix_stream_read_actor(struct sk_buff *skb,
2919 int skip, int chunk,
2920 struct unix_stream_read_state *state)
2924 ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2926 return ret ?: chunk;
2929 int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg,
2930 size_t size, int flags)
2932 struct unix_stream_read_state state = {
2933 .recv_actor = unix_stream_read_actor,
2934 .socket = sk->sk_socket,
2940 return unix_stream_read_generic(&state, true);
2943 static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2944 size_t size, int flags)
2946 struct unix_stream_read_state state = {
2947 .recv_actor = unix_stream_read_actor,
2954 #ifdef CONFIG_BPF_SYSCALL
2955 struct sock *sk = sock->sk;
2956 const struct proto *prot = READ_ONCE(sk->sk_prot);
2958 if (prot != &unix_stream_proto)
2959 return prot->recvmsg(sk, msg, size, flags, NULL);
2961 return unix_stream_read_generic(&state, true);
2964 static int unix_stream_splice_actor(struct sk_buff *skb,
2965 int skip, int chunk,
2966 struct unix_stream_read_state *state)
2968 return skb_splice_bits(skb, state->socket->sk,
2969 UNIXCB(skb).consumed + skip,
2970 state->pipe, chunk, state->splice_flags);
2973 static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
2974 struct pipe_inode_info *pipe,
2975 size_t size, unsigned int flags)
2977 struct unix_stream_read_state state = {
2978 .recv_actor = unix_stream_splice_actor,
2982 .splice_flags = flags,
2985 if (unlikely(*ppos))
2988 if (sock->file->f_flags & O_NONBLOCK ||
2989 flags & SPLICE_F_NONBLOCK)
2990 state.flags = MSG_DONTWAIT;
2992 return unix_stream_read_generic(&state, false);
2995 static int unix_shutdown(struct socket *sock, int mode)
2997 struct sock *sk = sock->sk;
3000 if (mode < SHUT_RD || mode > SHUT_RDWR)
3003 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
3004 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
3005 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
3009 unix_state_lock(sk);
3010 sk->sk_shutdown |= mode;
3011 other = unix_peer(sk);
3014 unix_state_unlock(sk);
3015 sk->sk_state_change(sk);
3018 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
3021 const struct proto *prot = READ_ONCE(other->sk_prot);
3024 prot->unhash(other);
3025 if (mode&RCV_SHUTDOWN)
3026 peer_mode |= SEND_SHUTDOWN;
3027 if (mode&SEND_SHUTDOWN)
3028 peer_mode |= RCV_SHUTDOWN;
3029 unix_state_lock(other);
3030 other->sk_shutdown |= peer_mode;
3031 unix_state_unlock(other);
3032 other->sk_state_change(other);
3033 if (peer_mode == SHUTDOWN_MASK)
3034 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
3035 else if (peer_mode & RCV_SHUTDOWN)
3036 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
3044 long unix_inq_len(struct sock *sk)
3046 struct sk_buff *skb;
3049 if (sk->sk_state == TCP_LISTEN)
3052 spin_lock(&sk->sk_receive_queue.lock);
3053 if (sk->sk_type == SOCK_STREAM ||
3054 sk->sk_type == SOCK_SEQPACKET) {
3055 skb_queue_walk(&sk->sk_receive_queue, skb)
3056 amount += unix_skb_len(skb);
3058 skb = skb_peek(&sk->sk_receive_queue);
3062 spin_unlock(&sk->sk_receive_queue.lock);
3066 EXPORT_SYMBOL_GPL(unix_inq_len);
3068 long unix_outq_len(struct sock *sk)
3070 return sk_wmem_alloc_get(sk);
3072 EXPORT_SYMBOL_GPL(unix_outq_len);
3074 static int unix_open_file(struct sock *sk)
3080 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
3083 if (!smp_load_acquire(&unix_sk(sk)->addr))
3086 path = unix_sk(sk)->path;
3092 fd = get_unused_fd_flags(O_CLOEXEC);
3096 f = dentry_open(&path, O_PATH, current_cred());
3110 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3112 struct sock *sk = sock->sk;
3118 amount = unix_outq_len(sk);
3119 err = put_user(amount, (int __user *)arg);
3122 amount = unix_inq_len(sk);
3126 err = put_user(amount, (int __user *)arg);
3129 err = unix_open_file(sk);
3131 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3134 struct sk_buff *skb;
3137 skb = skb_peek(&sk->sk_receive_queue);
3138 if (skb && skb == READ_ONCE(unix_sk(sk)->oob_skb))
3140 err = put_user(answ, (int __user *)arg);
3151 #ifdef CONFIG_COMPAT
3152 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3154 return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
3158 static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
3160 struct sock *sk = sock->sk;
3163 sock_poll_wait(file, sock, wait);
3166 /* exceptional events? */
3169 if (sk->sk_shutdown == SHUTDOWN_MASK)
3171 if (sk->sk_shutdown & RCV_SHUTDOWN)
3172 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3175 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3176 mask |= EPOLLIN | EPOLLRDNORM;
3177 if (sk_is_readable(sk))
3178 mask |= EPOLLIN | EPOLLRDNORM;
3179 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3180 if (READ_ONCE(unix_sk(sk)->oob_skb))
3184 /* Connection-based need to check for termination and startup */
3185 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
3186 sk->sk_state == TCP_CLOSE)
3190 * we set writable also when the other side has shut down the
3191 * connection. This prevents stuck sockets.
3193 if (unix_writable(sk))
3194 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3199 static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
3202 struct sock *sk = sock->sk, *other;
3203 unsigned int writable;
3206 sock_poll_wait(file, sock, wait);
3209 /* exceptional events? */
3210 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
3212 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
3214 if (sk->sk_shutdown & RCV_SHUTDOWN)
3215 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3216 if (sk->sk_shutdown == SHUTDOWN_MASK)
3220 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3221 mask |= EPOLLIN | EPOLLRDNORM;
3222 if (sk_is_readable(sk))
3223 mask |= EPOLLIN | EPOLLRDNORM;
3225 /* Connection-based need to check for termination and startup */
3226 if (sk->sk_type == SOCK_SEQPACKET) {
3227 if (sk->sk_state == TCP_CLOSE)
3229 /* connection hasn't started yet? */
3230 if (sk->sk_state == TCP_SYN_SENT)
3234 /* No write status requested, avoid expensive OUT tests. */
3235 if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
3238 writable = unix_writable(sk);
3240 unix_state_lock(sk);
3242 other = unix_peer(sk);
3243 if (other && unix_peer(other) != sk &&
3244 unix_recvq_full_lockless(other) &&
3245 unix_dgram_peer_wake_me(sk, other))
3248 unix_state_unlock(sk);
3252 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3254 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
3259 #ifdef CONFIG_PROC_FS
3261 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
3263 #define get_bucket(x) ((x) >> BUCKET_SPACE)
3264 #define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1))
3265 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
3267 static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
3269 unsigned long offset = get_offset(*pos);
3270 unsigned long bucket = get_bucket(*pos);
3271 unsigned long count = 0;
3274 for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]);
3275 sk; sk = sk_next(sk)) {
3276 if (++count == offset)
3283 static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
3285 unsigned long bucket = get_bucket(*pos);
3286 struct net *net = seq_file_net(seq);
3289 while (bucket < UNIX_HASH_SIZE) {
3290 spin_lock(&net->unx.table.locks[bucket]);
3292 sk = unix_from_bucket(seq, pos);
3296 spin_unlock(&net->unx.table.locks[bucket]);
3298 *pos = set_bucket_offset(++bucket, 1);
3304 static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk,
3307 unsigned long bucket = get_bucket(*pos);
3314 spin_unlock(&seq_file_net(seq)->unx.table.locks[bucket]);
3316 *pos = set_bucket_offset(++bucket, 1);
3318 return unix_get_first(seq, pos);
3321 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
3324 return SEQ_START_TOKEN;
3326 return unix_get_first(seq, pos);
3329 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3333 if (v == SEQ_START_TOKEN)
3334 return unix_get_first(seq, pos);
3336 return unix_get_next(seq, v, pos);
3339 static void unix_seq_stop(struct seq_file *seq, void *v)
3341 struct sock *sk = v;
3344 spin_unlock(&seq_file_net(seq)->unx.table.locks[sk->sk_hash]);
3347 static int unix_seq_show(struct seq_file *seq, void *v)
3350 if (v == SEQ_START_TOKEN)
3351 seq_puts(seq, "Num RefCount Protocol Flags Type St "
3355 struct unix_sock *u = unix_sk(s);
3358 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
3360 refcount_read(&s->sk_refcnt),
3362 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
3365 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
3366 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
3369 if (u->addr) { // under a hash table lock here
3374 len = u->addr->len -
3375 offsetof(struct sockaddr_un, sun_path);
3376 if (u->addr->name->sun_path[0]) {
3382 for ( ; i < len; i++)
3383 seq_putc(seq, u->addr->name->sun_path[i] ?:
3386 unix_state_unlock(s);
3387 seq_putc(seq, '\n');
3393 static const struct seq_operations unix_seq_ops = {
3394 .start = unix_seq_start,
3395 .next = unix_seq_next,
3396 .stop = unix_seq_stop,
3397 .show = unix_seq_show,
3400 #if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL)
3401 struct bpf_unix_iter_state {
3402 struct seq_net_private p;
3403 unsigned int cur_sk;
3404 unsigned int end_sk;
3405 unsigned int max_sk;
3406 struct sock **batch;
3407 bool st_bucket_done;
3410 struct bpf_iter__unix {
3411 __bpf_md_ptr(struct bpf_iter_meta *, meta);
3412 __bpf_md_ptr(struct unix_sock *, unix_sk);
3413 uid_t uid __aligned(8);
3416 static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
3417 struct unix_sock *unix_sk, uid_t uid)
3419 struct bpf_iter__unix ctx;
3421 meta->seq_num--; /* skip SEQ_START_TOKEN */
3423 ctx.unix_sk = unix_sk;
3425 return bpf_iter_run_prog(prog, &ctx);
3428 static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk)
3431 struct bpf_unix_iter_state *iter = seq->private;
3432 unsigned int expected = 1;
3435 sock_hold(start_sk);
3436 iter->batch[iter->end_sk++] = start_sk;
3438 for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) {
3439 if (iter->end_sk < iter->max_sk) {
3441 iter->batch[iter->end_sk++] = sk;
3447 spin_unlock(&seq_file_net(seq)->unx.table.locks[start_sk->sk_hash]);
3452 static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter)
3454 while (iter->cur_sk < iter->end_sk)
3455 sock_put(iter->batch[iter->cur_sk++]);
3458 static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter,
3459 unsigned int new_batch_sz)
3461 struct sock **new_batch;
3463 new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
3464 GFP_USER | __GFP_NOWARN);
3468 bpf_iter_unix_put_batch(iter);
3469 kvfree(iter->batch);
3470 iter->batch = new_batch;
3471 iter->max_sk = new_batch_sz;
3476 static struct sock *bpf_iter_unix_batch(struct seq_file *seq,
3479 struct bpf_unix_iter_state *iter = seq->private;
3480 unsigned int expected;
3481 bool resized = false;
3484 if (iter->st_bucket_done)
3485 *pos = set_bucket_offset(get_bucket(*pos) + 1, 1);
3488 /* Get a new batch */
3492 sk = unix_get_first(seq, pos);
3494 return NULL; /* Done */
3496 expected = bpf_iter_unix_hold_batch(seq, sk);
3498 if (iter->end_sk == expected) {
3499 iter->st_bucket_done = true;
3503 if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) {
3511 static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos)
3514 return SEQ_START_TOKEN;
3516 /* bpf iter does not support lseek, so it always
3517 * continue from where it was stop()-ped.
3519 return bpf_iter_unix_batch(seq, pos);
3522 static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3524 struct bpf_unix_iter_state *iter = seq->private;
3527 /* Whenever seq_next() is called, the iter->cur_sk is
3528 * done with seq_show(), so advance to the next sk in
3531 if (iter->cur_sk < iter->end_sk)
3532 sock_put(iter->batch[iter->cur_sk++]);
3536 if (iter->cur_sk < iter->end_sk)
3537 sk = iter->batch[iter->cur_sk];
3539 sk = bpf_iter_unix_batch(seq, pos);
3544 static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v)
3546 struct bpf_iter_meta meta;
3547 struct bpf_prog *prog;
3548 struct sock *sk = v;
3553 if (v == SEQ_START_TOKEN)
3556 slow = lock_sock_fast(sk);
3558 if (unlikely(sk_unhashed(sk))) {
3563 uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
3565 prog = bpf_iter_get_info(&meta, false);
3566 ret = unix_prog_seq_show(prog, &meta, v, uid);
3568 unlock_sock_fast(sk, slow);
3572 static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v)
3574 struct bpf_unix_iter_state *iter = seq->private;
3575 struct bpf_iter_meta meta;
3576 struct bpf_prog *prog;
3580 prog = bpf_iter_get_info(&meta, true);
3582 (void)unix_prog_seq_show(prog, &meta, v, 0);
3585 if (iter->cur_sk < iter->end_sk)
3586 bpf_iter_unix_put_batch(iter);
3589 static const struct seq_operations bpf_iter_unix_seq_ops = {
3590 .start = bpf_iter_unix_seq_start,
3591 .next = bpf_iter_unix_seq_next,
3592 .stop = bpf_iter_unix_seq_stop,
3593 .show = bpf_iter_unix_seq_show,
3598 static const struct net_proto_family unix_family_ops = {
3600 .create = unix_create,
3601 .owner = THIS_MODULE,
3605 static int __net_init unix_net_init(struct net *net)
3609 net->unx.sysctl_max_dgram_qlen = 10;
3610 if (unix_sysctl_register(net))
3613 #ifdef CONFIG_PROC_FS
3614 if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
3615 sizeof(struct seq_net_private)))
3619 net->unx.table.locks = kvmalloc_array(UNIX_HASH_SIZE,
3620 sizeof(spinlock_t), GFP_KERNEL);
3621 if (!net->unx.table.locks)
3624 net->unx.table.buckets = kvmalloc_array(UNIX_HASH_SIZE,
3625 sizeof(struct hlist_head),
3627 if (!net->unx.table.buckets)
3630 for (i = 0; i < UNIX_HASH_SIZE; i++) {
3631 spin_lock_init(&net->unx.table.locks[i]);
3632 INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
3638 kvfree(net->unx.table.locks);
3640 #ifdef CONFIG_PROC_FS
3641 remove_proc_entry("unix", net->proc_net);
3644 unix_sysctl_unregister(net);
3649 static void __net_exit unix_net_exit(struct net *net)
3651 kvfree(net->unx.table.buckets);
3652 kvfree(net->unx.table.locks);
3653 unix_sysctl_unregister(net);
3654 remove_proc_entry("unix", net->proc_net);
3657 static struct pernet_operations unix_net_ops = {
3658 .init = unix_net_init,
3659 .exit = unix_net_exit,
3662 #if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3663 DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta,
3664 struct unix_sock *unix_sk, uid_t uid)
3666 #define INIT_BATCH_SZ 16
3668 static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux)
3670 struct bpf_unix_iter_state *iter = priv_data;
3673 err = bpf_iter_init_seq_net(priv_data, aux);
3677 err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ);
3679 bpf_iter_fini_seq_net(priv_data);
3686 static void bpf_iter_fini_unix(void *priv_data)
3688 struct bpf_unix_iter_state *iter = priv_data;
3690 bpf_iter_fini_seq_net(priv_data);
3691 kvfree(iter->batch);
3694 static const struct bpf_iter_seq_info unix_seq_info = {
3695 .seq_ops = &bpf_iter_unix_seq_ops,
3696 .init_seq_private = bpf_iter_init_unix,
3697 .fini_seq_private = bpf_iter_fini_unix,
3698 .seq_priv_size = sizeof(struct bpf_unix_iter_state),
3701 static const struct bpf_func_proto *
3702 bpf_iter_unix_get_func_proto(enum bpf_func_id func_id,
3703 const struct bpf_prog *prog)
3706 case BPF_FUNC_setsockopt:
3707 return &bpf_sk_setsockopt_proto;
3708 case BPF_FUNC_getsockopt:
3709 return &bpf_sk_getsockopt_proto;
3715 static struct bpf_iter_reg unix_reg_info = {
3717 .ctx_arg_info_size = 1,
3719 { offsetof(struct bpf_iter__unix, unix_sk),
3720 PTR_TO_BTF_ID_OR_NULL },
3722 .get_func_proto = bpf_iter_unix_get_func_proto,
3723 .seq_info = &unix_seq_info,
3726 static void __init bpf_iter_register(void)
3728 unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX];
3729 if (bpf_iter_reg_target(&unix_reg_info))
3730 pr_warn("Warning: could not register bpf iterator unix\n");
3734 static int __init af_unix_init(void)
3738 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
3740 for (i = 0; i < UNIX_HASH_SIZE / 2; i++) {
3741 spin_lock_init(&bsd_socket_locks[i]);
3742 INIT_HLIST_HEAD(&bsd_socket_buckets[i]);
3745 rc = proto_register(&unix_dgram_proto, 1);
3747 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3751 rc = proto_register(&unix_stream_proto, 1);
3753 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3754 proto_unregister(&unix_dgram_proto);
3758 sock_register(&unix_family_ops);
3759 register_pernet_subsys(&unix_net_ops);
3760 unix_bpf_build_proto();
3762 #if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3763 bpf_iter_register();
3770 static void __exit af_unix_exit(void)
3772 sock_unregister(PF_UNIX);
3773 proto_unregister(&unix_dgram_proto);
3774 proto_unregister(&unix_stream_proto);
3775 unregister_pernet_subsys(&unix_net_ops);
3778 /* Earlier than device_initcall() so that other drivers invoking
3779 request_module() don't end up in a loop when modprobe tries
3780 to use a UNIX socket. But later than subsys_initcall() because
3781 we depend on stuff initialised there */
3782 fs_initcall(af_unix_init);
3783 module_exit(af_unix_exit);
3785 MODULE_LICENSE("GPL");
3786 MODULE_ALIAS_NETPROTO(PF_UNIX);