1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * Generic socket support routines. Memory allocators, socket lock/release
8 * handler for protocols to use and generic option handler.
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
35 * code. The ACK stuff can wait and needs major
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
86 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
88 #include <asm/unaligned.h>
89 #include <linux/capability.h>
90 #include <linux/errno.h>
91 #include <linux/errqueue.h>
92 #include <linux/types.h>
93 #include <linux/socket.h>
95 #include <linux/kernel.h>
96 #include <linux/module.h>
97 #include <linux/proc_fs.h>
98 #include <linux/seq_file.h>
99 #include <linux/sched.h>
100 #include <linux/sched/mm.h>
101 #include <linux/timer.h>
102 #include <linux/string.h>
103 #include <linux/sockios.h>
104 #include <linux/net.h>
105 #include <linux/mm.h>
106 #include <linux/slab.h>
107 #include <linux/interrupt.h>
108 #include <linux/poll.h>
109 #include <linux/tcp.h>
110 #include <linux/init.h>
111 #include <linux/highmem.h>
112 #include <linux/user_namespace.h>
113 #include <linux/static_key.h>
114 #include <linux/memcontrol.h>
115 #include <linux/prefetch.h>
117 #include <linux/uaccess.h>
119 #include <linux/netdevice.h>
120 #include <net/protocol.h>
121 #include <linux/skbuff.h>
122 #include <net/net_namespace.h>
123 #include <net/request_sock.h>
124 #include <net/sock.h>
125 #include <linux/net_tstamp.h>
126 #include <net/xfrm.h>
127 #include <linux/ipsec.h>
128 #include <net/cls_cgroup.h>
129 #include <net/netprio_cgroup.h>
130 #include <linux/sock_diag.h>
132 #include <linux/filter.h>
133 #include <net/sock_reuseport.h>
134 #include <net/bpf_sk_storage.h>
136 #include <trace/events/sock.h>
139 #include <net/busy_poll.h>
141 static DEFINE_MUTEX(proto_list_mutex);
142 static LIST_HEAD(proto_list);
144 static void sock_inuse_add(struct net *net, int val);
147 * sk_ns_capable - General socket capability test
148 * @sk: Socket to use a capability on or through
149 * @user_ns: The user namespace of the capability to use
150 * @cap: The capability to use
152 * Test to see if the opener of the socket had when the socket was
153 * created and the current process has the capability @cap in the user
154 * namespace @user_ns.
156 bool sk_ns_capable(const struct sock *sk,
157 struct user_namespace *user_ns, int cap)
159 return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
160 ns_capable(user_ns, cap);
162 EXPORT_SYMBOL(sk_ns_capable);
165 * sk_capable - Socket global capability test
166 * @sk: Socket to use a capability on or through
167 * @cap: The global capability to use
169 * Test to see if the opener of the socket had when the socket was
170 * created and the current process has the capability @cap in all user
173 bool sk_capable(const struct sock *sk, int cap)
175 return sk_ns_capable(sk, &init_user_ns, cap);
177 EXPORT_SYMBOL(sk_capable);
180 * sk_net_capable - Network namespace socket capability test
181 * @sk: Socket to use a capability on or through
182 * @cap: The capability to use
184 * Test to see if the opener of the socket had when the socket was created
185 * and the current process has the capability @cap over the network namespace
186 * the socket is a member of.
188 bool sk_net_capable(const struct sock *sk, int cap)
190 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
192 EXPORT_SYMBOL(sk_net_capable);
195 * Each address family might have different locking rules, so we have
196 * one slock key per address family and separate keys for internal and
199 static struct lock_class_key af_family_keys[AF_MAX];
200 static struct lock_class_key af_family_kern_keys[AF_MAX];
201 static struct lock_class_key af_family_slock_keys[AF_MAX];
202 static struct lock_class_key af_family_kern_slock_keys[AF_MAX];
205 * Make lock validator output more readable. (we pre-construct these
206 * strings build-time, so that runtime initialization of socket
210 #define _sock_locks(x) \
211 x "AF_UNSPEC", x "AF_UNIX" , x "AF_INET" , \
212 x "AF_AX25" , x "AF_IPX" , x "AF_APPLETALK", \
213 x "AF_NETROM", x "AF_BRIDGE" , x "AF_ATMPVC" , \
214 x "AF_X25" , x "AF_INET6" , x "AF_ROSE" , \
215 x "AF_DECnet", x "AF_NETBEUI" , x "AF_SECURITY" , \
216 x "AF_KEY" , x "AF_NETLINK" , x "AF_PACKET" , \
217 x "AF_ASH" , x "AF_ECONET" , x "AF_ATMSVC" , \
218 x "AF_RDS" , x "AF_SNA" , x "AF_IRDA" , \
219 x "AF_PPPOX" , x "AF_WANPIPE" , x "AF_LLC" , \
220 x "27" , x "28" , x "AF_CAN" , \
221 x "AF_TIPC" , x "AF_BLUETOOTH", x "IUCV" , \
222 x "AF_RXRPC" , x "AF_ISDN" , x "AF_PHONET" , \
223 x "AF_IEEE802154", x "AF_CAIF" , x "AF_ALG" , \
224 x "AF_NFC" , x "AF_VSOCK" , x "AF_KCM" , \
225 x "AF_QIPCRTR", x "AF_SMC" , x "AF_XDP" , \
228 static const char *const af_family_key_strings[AF_MAX+1] = {
229 _sock_locks("sk_lock-")
231 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
232 _sock_locks("slock-")
234 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
235 _sock_locks("clock-")
238 static const char *const af_family_kern_key_strings[AF_MAX+1] = {
239 _sock_locks("k-sk_lock-")
241 static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {
242 _sock_locks("k-slock-")
244 static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
245 _sock_locks("k-clock-")
247 static const char *const af_family_rlock_key_strings[AF_MAX+1] = {
248 _sock_locks("rlock-")
250 static const char *const af_family_wlock_key_strings[AF_MAX+1] = {
251 _sock_locks("wlock-")
253 static const char *const af_family_elock_key_strings[AF_MAX+1] = {
254 _sock_locks("elock-")
258 * sk_callback_lock and sk queues locking rules are per-address-family,
259 * so split the lock classes by using a per-AF key:
261 static struct lock_class_key af_callback_keys[AF_MAX];
262 static struct lock_class_key af_rlock_keys[AF_MAX];
263 static struct lock_class_key af_wlock_keys[AF_MAX];
264 static struct lock_class_key af_elock_keys[AF_MAX];
265 static struct lock_class_key af_kern_callback_keys[AF_MAX];
267 /* Run time adjustable parameters. */
268 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
269 EXPORT_SYMBOL(sysctl_wmem_max);
270 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
271 EXPORT_SYMBOL(sysctl_rmem_max);
272 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
273 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
275 /* Maximal space eaten by iovec or ancillary data plus some space */
276 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
277 EXPORT_SYMBOL(sysctl_optmem_max);
279 int sysctl_tstamp_allow_data __read_mostly = 1;
281 DEFINE_STATIC_KEY_FALSE(memalloc_socks_key);
282 EXPORT_SYMBOL_GPL(memalloc_socks_key);
285 * sk_set_memalloc - sets %SOCK_MEMALLOC
286 * @sk: socket to set it on
288 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
289 * It's the responsibility of the admin to adjust min_free_kbytes
290 * to meet the requirements
292 void sk_set_memalloc(struct sock *sk)
294 sock_set_flag(sk, SOCK_MEMALLOC);
295 sk->sk_allocation |= __GFP_MEMALLOC;
296 static_branch_inc(&memalloc_socks_key);
298 EXPORT_SYMBOL_GPL(sk_set_memalloc);
300 void sk_clear_memalloc(struct sock *sk)
302 sock_reset_flag(sk, SOCK_MEMALLOC);
303 sk->sk_allocation &= ~__GFP_MEMALLOC;
304 static_branch_dec(&memalloc_socks_key);
307 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
308 * progress of swapping. SOCK_MEMALLOC may be cleared while
309 * it has rmem allocations due to the last swapfile being deactivated
310 * but there is a risk that the socket is unusable due to exceeding
311 * the rmem limits. Reclaim the reserves and obey rmem limits again.
315 EXPORT_SYMBOL_GPL(sk_clear_memalloc);
317 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
320 unsigned int noreclaim_flag;
322 /* these should have been dropped before queueing */
323 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
325 noreclaim_flag = memalloc_noreclaim_save();
326 ret = sk->sk_backlog_rcv(sk, skb);
327 memalloc_noreclaim_restore(noreclaim_flag);
331 EXPORT_SYMBOL(__sk_backlog_rcv);
333 static int sock_get_timeout(long timeo, void *optval, bool old_timeval)
335 struct __kernel_sock_timeval tv;
338 if (timeo == MAX_SCHEDULE_TIMEOUT) {
342 tv.tv_sec = timeo / HZ;
343 tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ;
346 if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
347 struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec };
348 *(struct old_timeval32 *)optval = tv32;
353 struct __kernel_old_timeval old_tv;
354 old_tv.tv_sec = tv.tv_sec;
355 old_tv.tv_usec = tv.tv_usec;
356 *(struct __kernel_old_timeval *)optval = old_tv;
357 size = sizeof(old_tv);
359 *(struct __kernel_sock_timeval *)optval = tv;
366 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen, bool old_timeval)
368 struct __kernel_sock_timeval tv;
370 if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
371 struct old_timeval32 tv32;
373 if (optlen < sizeof(tv32))
376 if (copy_from_user(&tv32, optval, sizeof(tv32)))
378 tv.tv_sec = tv32.tv_sec;
379 tv.tv_usec = tv32.tv_usec;
380 } else if (old_timeval) {
381 struct __kernel_old_timeval old_tv;
383 if (optlen < sizeof(old_tv))
385 if (copy_from_user(&old_tv, optval, sizeof(old_tv)))
387 tv.tv_sec = old_tv.tv_sec;
388 tv.tv_usec = old_tv.tv_usec;
390 if (optlen < sizeof(tv))
392 if (copy_from_user(&tv, optval, sizeof(tv)))
395 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
399 static int warned __read_mostly;
402 if (warned < 10 && net_ratelimit()) {
404 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
405 __func__, current->comm, task_pid_nr(current));
409 *timeo_p = MAX_SCHEDULE_TIMEOUT;
410 if (tv.tv_sec == 0 && tv.tv_usec == 0)
412 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1))
413 *timeo_p = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec, USEC_PER_SEC / HZ);
417 static void sock_warn_obsolete_bsdism(const char *name)
420 static char warncomm[TASK_COMM_LEN];
421 if (strcmp(warncomm, current->comm) && warned < 5) {
422 strcpy(warncomm, current->comm);
423 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
429 static bool sock_needs_netstamp(const struct sock *sk)
431 switch (sk->sk_family) {
440 static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
442 if (sk->sk_flags & flags) {
443 sk->sk_flags &= ~flags;
444 if (sock_needs_netstamp(sk) &&
445 !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
446 net_disable_timestamp();
451 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
454 struct sk_buff_head *list = &sk->sk_receive_queue;
456 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
457 atomic_inc(&sk->sk_drops);
458 trace_sock_rcvqueue_full(sk, skb);
462 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
463 atomic_inc(&sk->sk_drops);
468 skb_set_owner_r(skb, sk);
470 /* we escape from rcu protected region, make sure we dont leak
475 spin_lock_irqsave(&list->lock, flags);
476 sock_skb_set_dropcount(sk, skb);
477 __skb_queue_tail(list, skb);
478 spin_unlock_irqrestore(&list->lock, flags);
480 if (!sock_flag(sk, SOCK_DEAD))
481 sk->sk_data_ready(sk);
484 EXPORT_SYMBOL(__sock_queue_rcv_skb);
486 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
490 err = sk_filter(sk, skb);
494 return __sock_queue_rcv_skb(sk, skb);
496 EXPORT_SYMBOL(sock_queue_rcv_skb);
498 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
499 const int nested, unsigned int trim_cap, bool refcounted)
501 int rc = NET_RX_SUCCESS;
503 if (sk_filter_trim_cap(sk, skb, trim_cap))
504 goto discard_and_relse;
508 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
509 atomic_inc(&sk->sk_drops);
510 goto discard_and_relse;
513 bh_lock_sock_nested(sk);
516 if (!sock_owned_by_user(sk)) {
518 * trylock + unlock semantics:
520 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
522 rc = sk_backlog_rcv(sk, skb);
524 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
525 } else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) {
527 atomic_inc(&sk->sk_drops);
528 goto discard_and_relse;
540 EXPORT_SYMBOL(__sk_receive_skb);
542 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
544 struct dst_entry *dst = __sk_dst_get(sk);
546 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
547 sk_tx_queue_clear(sk);
548 sk->sk_dst_pending_confirm = 0;
549 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
556 EXPORT_SYMBOL(__sk_dst_check);
558 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
560 struct dst_entry *dst = sk_dst_get(sk);
562 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
570 EXPORT_SYMBOL(sk_dst_check);
572 static int sock_setbindtodevice_locked(struct sock *sk, int ifindex)
574 int ret = -ENOPROTOOPT;
575 #ifdef CONFIG_NETDEVICES
576 struct net *net = sock_net(sk);
580 if (!ns_capable(net->user_ns, CAP_NET_RAW))
587 sk->sk_bound_dev_if = ifindex;
588 if (sk->sk_prot->rehash)
589 sk->sk_prot->rehash(sk);
600 static int sock_setbindtodevice(struct sock *sk, char __user *optval,
603 int ret = -ENOPROTOOPT;
604 #ifdef CONFIG_NETDEVICES
605 struct net *net = sock_net(sk);
606 char devname[IFNAMSIZ];
613 /* Bind this socket to a particular device like "eth0",
614 * as specified in the passed interface name. If the
615 * name is "" or the option length is zero the socket
618 if (optlen > IFNAMSIZ - 1)
619 optlen = IFNAMSIZ - 1;
620 memset(devname, 0, sizeof(devname));
623 if (copy_from_user(devname, optval, optlen))
627 if (devname[0] != '\0') {
628 struct net_device *dev;
631 dev = dev_get_by_name_rcu(net, devname);
633 index = dev->ifindex;
641 ret = sock_setbindtodevice_locked(sk, index);
650 static int sock_getbindtodevice(struct sock *sk, char __user *optval,
651 int __user *optlen, int len)
653 int ret = -ENOPROTOOPT;
654 #ifdef CONFIG_NETDEVICES
655 struct net *net = sock_net(sk);
656 char devname[IFNAMSIZ];
658 if (sk->sk_bound_dev_if == 0) {
667 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
671 len = strlen(devname) + 1;
674 if (copy_to_user(optval, devname, len))
679 if (put_user(len, optlen))
690 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
693 sock_set_flag(sk, bit);
695 sock_reset_flag(sk, bit);
698 bool sk_mc_loop(struct sock *sk)
700 if (dev_recursion_level())
704 switch (sk->sk_family) {
706 return inet_sk(sk)->mc_loop;
707 #if IS_ENABLED(CONFIG_IPV6)
709 return inet6_sk(sk)->mc_loop;
715 EXPORT_SYMBOL(sk_mc_loop);
718 * This is meant for all protocols to use and covers goings on
719 * at the socket level. Everything here is generic.
722 int sock_setsockopt(struct socket *sock, int level, int optname,
723 char __user *optval, unsigned int optlen)
725 struct sock_txtime sk_txtime;
726 struct sock *sk = sock->sk;
733 * Options without arguments
736 if (optname == SO_BINDTODEVICE)
737 return sock_setbindtodevice(sk, optval, optlen);
739 if (optlen < sizeof(int))
742 if (get_user(val, (int __user *)optval))
745 valbool = val ? 1 : 0;
751 if (val && !capable(CAP_NET_ADMIN))
754 sock_valbool_flag(sk, SOCK_DBG, valbool);
757 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
760 sk->sk_reuseport = valbool;
769 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
773 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
776 /* Don't error on this BSD doesn't and if you think
777 * about it this is right. Otherwise apps have to
778 * play 'guess the biggest size' games. RCVBUF/SNDBUF
779 * are treated in BSD as hints
781 val = min_t(u32, val, sysctl_wmem_max);
783 /* Ensure val * 2 fits into an int, to prevent max_t()
784 * from treating it as a negative value.
786 val = min_t(int, val, INT_MAX / 2);
787 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
788 WRITE_ONCE(sk->sk_sndbuf,
789 max_t(int, val * 2, SOCK_MIN_SNDBUF));
790 /* Wake up sending tasks if we upped the value. */
791 sk->sk_write_space(sk);
795 if (!capable(CAP_NET_ADMIN)) {
800 /* No negative values (to prevent underflow, as val will be
808 /* Don't error on this BSD doesn't and if you think
809 * about it this is right. Otherwise apps have to
810 * play 'guess the biggest size' games. RCVBUF/SNDBUF
811 * are treated in BSD as hints
813 val = min_t(u32, val, sysctl_rmem_max);
815 /* Ensure val * 2 fits into an int, to prevent max_t()
816 * from treating it as a negative value.
818 val = min_t(int, val, INT_MAX / 2);
819 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
821 * We double it on the way in to account for
822 * "struct sk_buff" etc. overhead. Applications
823 * assume that the SO_RCVBUF setting they make will
824 * allow that much actual data to be received on that
827 * Applications are unaware that "struct sk_buff" and
828 * other overheads allocate from the receive buffer
829 * during socket buffer allocation.
831 * And after considering the possible alternatives,
832 * returning the value we actually used in getsockopt
833 * is the most desirable behavior.
835 WRITE_ONCE(sk->sk_rcvbuf,
836 max_t(int, val * 2, SOCK_MIN_RCVBUF));
840 if (!capable(CAP_NET_ADMIN)) {
845 /* No negative values (to prevent underflow, as val will be
853 if (sk->sk_prot->keepalive)
854 sk->sk_prot->keepalive(sk, valbool);
855 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
859 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
863 sk->sk_no_check_tx = valbool;
867 if ((val >= 0 && val <= 6) ||
868 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
869 sk->sk_priority = val;
875 if (optlen < sizeof(ling)) {
876 ret = -EINVAL; /* 1003.1g */
879 if (copy_from_user(&ling, optval, sizeof(ling))) {
884 sock_reset_flag(sk, SOCK_LINGER);
886 #if (BITS_PER_LONG == 32)
887 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
888 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
891 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
892 sock_set_flag(sk, SOCK_LINGER);
897 sock_warn_obsolete_bsdism("setsockopt");
902 set_bit(SOCK_PASSCRED, &sock->flags);
904 clear_bit(SOCK_PASSCRED, &sock->flags);
907 case SO_TIMESTAMP_OLD:
908 case SO_TIMESTAMP_NEW:
909 case SO_TIMESTAMPNS_OLD:
910 case SO_TIMESTAMPNS_NEW:
912 if (optname == SO_TIMESTAMP_NEW || optname == SO_TIMESTAMPNS_NEW)
913 sock_set_flag(sk, SOCK_TSTAMP_NEW);
915 sock_reset_flag(sk, SOCK_TSTAMP_NEW);
917 if (optname == SO_TIMESTAMP_OLD || optname == SO_TIMESTAMP_NEW)
918 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
920 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
921 sock_set_flag(sk, SOCK_RCVTSTAMP);
922 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
924 sock_reset_flag(sk, SOCK_RCVTSTAMP);
925 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
929 case SO_TIMESTAMPING_NEW:
930 case SO_TIMESTAMPING_OLD:
931 if (val & ~SOF_TIMESTAMPING_MASK) {
936 if (val & SOF_TIMESTAMPING_OPT_ID &&
937 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
938 if (sk->sk_protocol == IPPROTO_TCP &&
939 sk->sk_type == SOCK_STREAM) {
940 if ((1 << sk->sk_state) &
941 (TCPF_CLOSE | TCPF_LISTEN)) {
945 sk->sk_tskey = tcp_sk(sk)->snd_una;
951 if (val & SOF_TIMESTAMPING_OPT_STATS &&
952 !(val & SOF_TIMESTAMPING_OPT_TSONLY)) {
957 sk->sk_tsflags = val;
958 sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW);
960 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
961 sock_enable_timestamp(sk,
962 SOCK_TIMESTAMPING_RX_SOFTWARE);
964 sock_disable_timestamp(sk,
965 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
971 if (sock->ops->set_rcvlowat)
972 ret = sock->ops->set_rcvlowat(sk, val);
974 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
977 case SO_RCVTIMEO_OLD:
978 case SO_RCVTIMEO_NEW:
979 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen, optname == SO_RCVTIMEO_OLD);
982 case SO_SNDTIMEO_OLD:
983 case SO_SNDTIMEO_NEW:
984 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen, optname == SO_SNDTIMEO_OLD);
987 case SO_ATTACH_FILTER:
989 if (optlen == sizeof(struct sock_fprog)) {
990 struct sock_fprog fprog;
993 if (copy_from_user(&fprog, optval, sizeof(fprog)))
996 ret = sk_attach_filter(&fprog, sk);
1002 if (optlen == sizeof(u32)) {
1006 if (copy_from_user(&ufd, optval, sizeof(ufd)))
1009 ret = sk_attach_bpf(ufd, sk);
1013 case SO_ATTACH_REUSEPORT_CBPF:
1015 if (optlen == sizeof(struct sock_fprog)) {
1016 struct sock_fprog fprog;
1019 if (copy_from_user(&fprog, optval, sizeof(fprog)))
1022 ret = sk_reuseport_attach_filter(&fprog, sk);
1026 case SO_ATTACH_REUSEPORT_EBPF:
1028 if (optlen == sizeof(u32)) {
1032 if (copy_from_user(&ufd, optval, sizeof(ufd)))
1035 ret = sk_reuseport_attach_bpf(ufd, sk);
1039 case SO_DETACH_REUSEPORT_BPF:
1040 ret = reuseport_detach_prog(sk);
1043 case SO_DETACH_FILTER:
1044 ret = sk_detach_filter(sk);
1047 case SO_LOCK_FILTER:
1048 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
1051 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
1056 set_bit(SOCK_PASSSEC, &sock->flags);
1058 clear_bit(SOCK_PASSSEC, &sock->flags);
1061 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1063 } else if (val != sk->sk_mark) {
1070 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
1073 case SO_WIFI_STATUS:
1074 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
1078 if (sock->ops->set_peek_off)
1079 ret = sock->ops->set_peek_off(sk, val);
1085 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
1088 case SO_SELECT_ERR_QUEUE:
1089 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
1092 #ifdef CONFIG_NET_RX_BUSY_POLL
1094 /* allow unprivileged users to decrease the value */
1095 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
1101 WRITE_ONCE(sk->sk_ll_usec, val);
1106 case SO_MAX_PACING_RATE:
1108 unsigned long ulval = (val == ~0U) ? ~0UL : (unsigned int)val;
1110 if (sizeof(ulval) != sizeof(val) &&
1111 optlen >= sizeof(ulval) &&
1112 get_user(ulval, (unsigned long __user *)optval)) {
1117 cmpxchg(&sk->sk_pacing_status,
1120 sk->sk_max_pacing_rate = ulval;
1121 sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval);
1124 case SO_INCOMING_CPU:
1125 WRITE_ONCE(sk->sk_incoming_cpu, val);
1130 dst_negative_advice(sk);
1134 if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) {
1135 if (!((sk->sk_type == SOCK_STREAM &&
1136 sk->sk_protocol == IPPROTO_TCP) ||
1137 (sk->sk_type == SOCK_DGRAM &&
1138 sk->sk_protocol == IPPROTO_UDP)))
1140 } else if (sk->sk_family != PF_RDS) {
1144 if (val < 0 || val > 1)
1147 sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool);
1152 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1154 } else if (optlen != sizeof(struct sock_txtime)) {
1156 } else if (copy_from_user(&sk_txtime, optval,
1157 sizeof(struct sock_txtime))) {
1159 } else if (sk_txtime.flags & ~SOF_TXTIME_FLAGS_MASK) {
1162 sock_valbool_flag(sk, SOCK_TXTIME, true);
1163 sk->sk_clockid = sk_txtime.clockid;
1164 sk->sk_txtime_deadline_mode =
1165 !!(sk_txtime.flags & SOF_TXTIME_DEADLINE_MODE);
1166 sk->sk_txtime_report_errors =
1167 !!(sk_txtime.flags & SOF_TXTIME_REPORT_ERRORS);
1171 case SO_BINDTOIFINDEX:
1172 ret = sock_setbindtodevice_locked(sk, val);
1182 EXPORT_SYMBOL(sock_setsockopt);
1184 static const struct cred *sk_get_peer_cred(struct sock *sk)
1186 const struct cred *cred;
1188 spin_lock(&sk->sk_peer_lock);
1189 cred = get_cred(sk->sk_peer_cred);
1190 spin_unlock(&sk->sk_peer_lock);
1195 static void cred_to_ucred(struct pid *pid, const struct cred *cred,
1196 struct ucred *ucred)
1198 ucred->pid = pid_vnr(pid);
1199 ucred->uid = ucred->gid = -1;
1201 struct user_namespace *current_ns = current_user_ns();
1203 ucred->uid = from_kuid_munged(current_ns, cred->euid);
1204 ucred->gid = from_kgid_munged(current_ns, cred->egid);
1208 static int groups_to_user(gid_t __user *dst, const struct group_info *src)
1210 struct user_namespace *user_ns = current_user_ns();
1213 for (i = 0; i < src->ngroups; i++)
1214 if (put_user(from_kgid_munged(user_ns, src->gid[i]), dst + i))
1220 int sock_getsockopt(struct socket *sock, int level, int optname,
1221 char __user *optval, int __user *optlen)
1223 struct sock *sk = sock->sk;
1228 unsigned long ulval;
1230 struct old_timeval32 tm32;
1231 struct __kernel_old_timeval tm;
1232 struct __kernel_sock_timeval stm;
1233 struct sock_txtime txtime;
1236 int lv = sizeof(int);
1239 if (get_user(len, optlen))
1244 memset(&v, 0, sizeof(v));
1248 v.val = sock_flag(sk, SOCK_DBG);
1252 v.val = sock_flag(sk, SOCK_LOCALROUTE);
1256 v.val = sock_flag(sk, SOCK_BROADCAST);
1260 v.val = sk->sk_sndbuf;
1264 v.val = sk->sk_rcvbuf;
1268 v.val = sk->sk_reuse;
1272 v.val = sk->sk_reuseport;
1276 v.val = sock_flag(sk, SOCK_KEEPOPEN);
1280 v.val = sk->sk_type;
1284 v.val = sk->sk_protocol;
1288 v.val = sk->sk_family;
1292 v.val = -sock_error(sk);
1294 v.val = xchg(&sk->sk_err_soft, 0);
1298 v.val = sock_flag(sk, SOCK_URGINLINE);
1302 v.val = sk->sk_no_check_tx;
1306 v.val = sk->sk_priority;
1310 lv = sizeof(v.ling);
1311 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
1312 v.ling.l_linger = sk->sk_lingertime / HZ;
1316 sock_warn_obsolete_bsdism("getsockopt");
1319 case SO_TIMESTAMP_OLD:
1320 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1321 !sock_flag(sk, SOCK_TSTAMP_NEW) &&
1322 !sock_flag(sk, SOCK_RCVTSTAMPNS);
1325 case SO_TIMESTAMPNS_OLD:
1326 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && !sock_flag(sk, SOCK_TSTAMP_NEW);
1329 case SO_TIMESTAMP_NEW:
1330 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && sock_flag(sk, SOCK_TSTAMP_NEW);
1333 case SO_TIMESTAMPNS_NEW:
1334 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && sock_flag(sk, SOCK_TSTAMP_NEW);
1337 case SO_TIMESTAMPING_OLD:
1338 v.val = sk->sk_tsflags;
1341 case SO_RCVTIMEO_OLD:
1342 case SO_RCVTIMEO_NEW:
1343 lv = sock_get_timeout(sk->sk_rcvtimeo, &v, SO_RCVTIMEO_OLD == optname);
1346 case SO_SNDTIMEO_OLD:
1347 case SO_SNDTIMEO_NEW:
1348 lv = sock_get_timeout(sk->sk_sndtimeo, &v, SO_SNDTIMEO_OLD == optname);
1352 v.val = sk->sk_rcvlowat;
1360 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1365 struct ucred peercred;
1366 if (len > sizeof(peercred))
1367 len = sizeof(peercred);
1369 spin_lock(&sk->sk_peer_lock);
1370 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1371 spin_unlock(&sk->sk_peer_lock);
1373 if (copy_to_user(optval, &peercred, len))
1380 const struct cred *cred;
1383 cred = sk_get_peer_cred(sk);
1387 n = cred->group_info->ngroups;
1388 if (len < n * sizeof(gid_t)) {
1389 len = n * sizeof(gid_t);
1391 return put_user(len, optlen) ? -EFAULT : -ERANGE;
1393 len = n * sizeof(gid_t);
1395 ret = groups_to_user((gid_t __user *)optval, cred->group_info);
1406 lv = sock->ops->getname(sock, (struct sockaddr *)address, 2);
1411 if (copy_to_user(optval, address, len))
1416 /* Dubious BSD thing... Probably nobody even uses it, but
1417 * the UNIX standard wants it for whatever reason... -DaveM
1420 v.val = sk->sk_state == TCP_LISTEN;
1424 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1428 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1431 v.val = sk->sk_mark;
1435 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1438 case SO_WIFI_STATUS:
1439 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1443 if (!sock->ops->set_peek_off)
1446 v.val = sk->sk_peek_off;
1449 v.val = sock_flag(sk, SOCK_NOFCS);
1452 case SO_BINDTODEVICE:
1453 return sock_getbindtodevice(sk, optval, optlen, len);
1456 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1462 case SO_LOCK_FILTER:
1463 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1466 case SO_BPF_EXTENSIONS:
1467 v.val = bpf_tell_extensions();
1470 case SO_SELECT_ERR_QUEUE:
1471 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1474 #ifdef CONFIG_NET_RX_BUSY_POLL
1476 v.val = sk->sk_ll_usec;
1480 case SO_MAX_PACING_RATE:
1481 if (sizeof(v.ulval) != sizeof(v.val) && len >= sizeof(v.ulval)) {
1482 lv = sizeof(v.ulval);
1483 v.ulval = sk->sk_max_pacing_rate;
1486 v.val = min_t(unsigned long, sk->sk_max_pacing_rate, ~0U);
1490 case SO_INCOMING_CPU:
1491 v.val = READ_ONCE(sk->sk_incoming_cpu);
1496 u32 meminfo[SK_MEMINFO_VARS];
1498 sk_get_meminfo(sk, meminfo);
1500 len = min_t(unsigned int, len, sizeof(meminfo));
1501 if (copy_to_user(optval, &meminfo, len))
1507 #ifdef CONFIG_NET_RX_BUSY_POLL
1508 case SO_INCOMING_NAPI_ID:
1509 v.val = READ_ONCE(sk->sk_napi_id);
1511 /* aggregate non-NAPI IDs down to 0 */
1512 if (v.val < MIN_NAPI_ID)
1522 v.val64 = sock_gen_cookie(sk);
1526 v.val = sock_flag(sk, SOCK_ZEROCOPY);
1530 lv = sizeof(v.txtime);
1531 v.txtime.clockid = sk->sk_clockid;
1532 v.txtime.flags |= sk->sk_txtime_deadline_mode ?
1533 SOF_TXTIME_DEADLINE_MODE : 0;
1534 v.txtime.flags |= sk->sk_txtime_report_errors ?
1535 SOF_TXTIME_REPORT_ERRORS : 0;
1538 case SO_BINDTOIFINDEX:
1539 v.val = sk->sk_bound_dev_if;
1543 /* We implement the SO_SNDLOWAT etc to not be settable
1546 return -ENOPROTOOPT;
1551 if (copy_to_user(optval, &v, len))
1554 if (put_user(len, optlen))
1560 * Initialize an sk_lock.
1562 * (We also register the sk_lock with the lock validator.)
1564 static inline void sock_lock_init(struct sock *sk)
1566 if (sk->sk_kern_sock)
1567 sock_lock_init_class_and_name(
1569 af_family_kern_slock_key_strings[sk->sk_family],
1570 af_family_kern_slock_keys + sk->sk_family,
1571 af_family_kern_key_strings[sk->sk_family],
1572 af_family_kern_keys + sk->sk_family);
1574 sock_lock_init_class_and_name(
1576 af_family_slock_key_strings[sk->sk_family],
1577 af_family_slock_keys + sk->sk_family,
1578 af_family_key_strings[sk->sk_family],
1579 af_family_keys + sk->sk_family);
1583 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1584 * even temporarly, because of RCU lookups. sk_node should also be left as is.
1585 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
1587 static void sock_copy(struct sock *nsk, const struct sock *osk)
1589 #ifdef CONFIG_SECURITY_NETWORK
1590 void *sptr = nsk->sk_security;
1592 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1594 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1595 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1597 #ifdef CONFIG_SECURITY_NETWORK
1598 nsk->sk_security = sptr;
1599 security_sk_clone(osk, nsk);
1603 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1607 struct kmem_cache *slab;
1611 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1614 if (want_init_on_alloc(priority))
1615 sk_prot_clear_nulls(sk, prot->obj_size);
1617 sk = kmalloc(prot->obj_size, priority);
1620 if (security_sk_alloc(sk, family, priority))
1623 if (!try_module_get(prot->owner))
1625 sk_tx_queue_clear(sk);
1631 security_sk_free(sk);
1634 kmem_cache_free(slab, sk);
1640 static void sk_prot_free(struct proto *prot, struct sock *sk)
1642 struct kmem_cache *slab;
1643 struct module *owner;
1645 owner = prot->owner;
1648 cgroup_sk_free(&sk->sk_cgrp_data);
1649 mem_cgroup_sk_free(sk);
1650 security_sk_free(sk);
1652 kmem_cache_free(slab, sk);
1659 * sk_alloc - All socket objects are allocated here
1660 * @net: the applicable net namespace
1661 * @family: protocol family
1662 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1663 * @prot: struct proto associated with this new sock instance
1664 * @kern: is this to be a kernel socket?
1666 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1667 struct proto *prot, int kern)
1671 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1673 sk->sk_family = family;
1675 * See comment in struct sock definition to understand
1676 * why we need sk_prot_creator -acme
1678 sk->sk_prot = sk->sk_prot_creator = prot;
1679 sk->sk_kern_sock = kern;
1681 sk->sk_net_refcnt = kern ? 0 : 1;
1682 if (likely(sk->sk_net_refcnt)) {
1684 sock_inuse_add(net, 1);
1687 sock_net_set(sk, net);
1688 refcount_set(&sk->sk_wmem_alloc, 1);
1690 mem_cgroup_sk_alloc(sk);
1691 cgroup_sk_alloc(&sk->sk_cgrp_data);
1692 sock_update_classid(&sk->sk_cgrp_data);
1693 sock_update_netprioidx(&sk->sk_cgrp_data);
1694 sk_tx_queue_clear(sk);
1699 EXPORT_SYMBOL(sk_alloc);
1701 /* Sockets having SOCK_RCU_FREE will call this function after one RCU
1702 * grace period. This is the case for UDP sockets and TCP listeners.
1704 static void __sk_destruct(struct rcu_head *head)
1706 struct sock *sk = container_of(head, struct sock, sk_rcu);
1707 struct sk_filter *filter;
1709 if (sk->sk_destruct)
1710 sk->sk_destruct(sk);
1712 filter = rcu_dereference_check(sk->sk_filter,
1713 refcount_read(&sk->sk_wmem_alloc) == 0);
1715 sk_filter_uncharge(sk, filter);
1716 RCU_INIT_POINTER(sk->sk_filter, NULL);
1719 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1721 #ifdef CONFIG_BPF_SYSCALL
1722 bpf_sk_storage_free(sk);
1725 if (atomic_read(&sk->sk_omem_alloc))
1726 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1727 __func__, atomic_read(&sk->sk_omem_alloc));
1729 if (sk->sk_frag.page) {
1730 put_page(sk->sk_frag.page);
1731 sk->sk_frag.page = NULL;
1734 /* We do not need to acquire sk->sk_peer_lock, we are the last user. */
1735 put_cred(sk->sk_peer_cred);
1736 put_pid(sk->sk_peer_pid);
1738 if (likely(sk->sk_net_refcnt))
1739 put_net(sock_net(sk));
1740 sk_prot_free(sk->sk_prot_creator, sk);
1743 void sk_destruct(struct sock *sk)
1745 bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
1747 if (rcu_access_pointer(sk->sk_reuseport_cb)) {
1748 reuseport_detach_sock(sk);
1749 use_call_rcu = true;
1753 call_rcu(&sk->sk_rcu, __sk_destruct);
1755 __sk_destruct(&sk->sk_rcu);
1758 static void __sk_free(struct sock *sk)
1760 if (likely(sk->sk_net_refcnt))
1761 sock_inuse_add(sock_net(sk), -1);
1763 if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
1764 sock_diag_broadcast_destroy(sk);
1769 void sk_free(struct sock *sk)
1772 * We subtract one from sk_wmem_alloc and can know if
1773 * some packets are still in some tx queue.
1774 * If not null, sock_wfree() will call __sk_free(sk) later
1776 if (refcount_dec_and_test(&sk->sk_wmem_alloc))
1779 EXPORT_SYMBOL(sk_free);
1781 static void sk_init_common(struct sock *sk)
1783 skb_queue_head_init(&sk->sk_receive_queue);
1784 skb_queue_head_init(&sk->sk_write_queue);
1785 skb_queue_head_init(&sk->sk_error_queue);
1787 rwlock_init(&sk->sk_callback_lock);
1788 lockdep_set_class_and_name(&sk->sk_receive_queue.lock,
1789 af_rlock_keys + sk->sk_family,
1790 af_family_rlock_key_strings[sk->sk_family]);
1791 lockdep_set_class_and_name(&sk->sk_write_queue.lock,
1792 af_wlock_keys + sk->sk_family,
1793 af_family_wlock_key_strings[sk->sk_family]);
1794 lockdep_set_class_and_name(&sk->sk_error_queue.lock,
1795 af_elock_keys + sk->sk_family,
1796 af_family_elock_key_strings[sk->sk_family]);
1797 lockdep_set_class_and_name(&sk->sk_callback_lock,
1798 af_callback_keys + sk->sk_family,
1799 af_family_clock_key_strings[sk->sk_family]);
1803 * sk_clone_lock - clone a socket, and lock its clone
1804 * @sk: the socket to clone
1805 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1807 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1809 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1812 bool is_charged = true;
1814 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1815 if (newsk != NULL) {
1816 struct sk_filter *filter;
1818 sock_copy(newsk, sk);
1820 newsk->sk_prot_creator = sk->sk_prot;
1823 if (likely(newsk->sk_net_refcnt))
1824 get_net(sock_net(newsk));
1825 sk_node_init(&newsk->sk_node);
1826 sock_lock_init(newsk);
1827 bh_lock_sock(newsk);
1828 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
1829 newsk->sk_backlog.len = 0;
1831 atomic_set(&newsk->sk_rmem_alloc, 0);
1833 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1835 refcount_set(&newsk->sk_wmem_alloc, 1);
1836 atomic_set(&newsk->sk_omem_alloc, 0);
1837 sk_init_common(newsk);
1839 newsk->sk_dst_cache = NULL;
1840 newsk->sk_dst_pending_confirm = 0;
1841 newsk->sk_wmem_queued = 0;
1842 newsk->sk_forward_alloc = 0;
1843 atomic_set(&newsk->sk_drops, 0);
1844 newsk->sk_send_head = NULL;
1845 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1846 atomic_set(&newsk->sk_zckey, 0);
1848 sock_reset_flag(newsk, SOCK_DONE);
1850 /* sk->sk_memcg will be populated at accept() time */
1851 newsk->sk_memcg = NULL;
1853 cgroup_sk_clone(&newsk->sk_cgrp_data);
1856 filter = rcu_dereference(sk->sk_filter);
1858 /* though it's an empty new sock, the charging may fail
1859 * if sysctl_optmem_max was changed between creation of
1860 * original socket and cloning
1862 is_charged = sk_filter_charge(newsk, filter);
1863 RCU_INIT_POINTER(newsk->sk_filter, filter);
1866 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
1867 /* We need to make sure that we don't uncharge the new
1868 * socket if we couldn't charge it in the first place
1869 * as otherwise we uncharge the parent's filter.
1872 RCU_INIT_POINTER(newsk->sk_filter, NULL);
1873 sk_free_unlock_clone(newsk);
1877 RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
1879 if (bpf_sk_storage_clone(sk, newsk)) {
1880 sk_free_unlock_clone(newsk);
1886 newsk->sk_err_soft = 0;
1887 newsk->sk_priority = 0;
1888 newsk->sk_incoming_cpu = raw_smp_processor_id();
1889 if (likely(newsk->sk_net_refcnt))
1890 sock_inuse_add(sock_net(newsk), 1);
1893 * Before updating sk_refcnt, we must commit prior changes to memory
1894 * (Documentation/RCU/rculist_nulls.txt for details)
1897 refcount_set(&newsk->sk_refcnt, 2);
1900 * Increment the counter in the same struct proto as the master
1901 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1902 * is the same as sk->sk_prot->socks, as this field was copied
1905 * This _changes_ the previous behaviour, where
1906 * tcp_create_openreq_child always was incrementing the
1907 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1908 * to be taken into account in all callers. -acme
1910 sk_refcnt_debug_inc(newsk);
1911 sk_set_socket(newsk, NULL);
1912 sk_tx_queue_clear(newsk);
1913 RCU_INIT_POINTER(newsk->sk_wq, NULL);
1915 if (newsk->sk_prot->sockets_allocated)
1916 sk_sockets_allocated_inc(newsk);
1918 if (sock_needs_netstamp(sk) &&
1919 newsk->sk_flags & SK_FLAGS_TIMESTAMP)
1920 net_enable_timestamp();
1925 EXPORT_SYMBOL_GPL(sk_clone_lock);
1927 void sk_free_unlock_clone(struct sock *sk)
1929 /* It is still raw copy of parent, so invalidate
1930 * destructor and make plain sk_free() */
1931 sk->sk_destruct = NULL;
1935 EXPORT_SYMBOL_GPL(sk_free_unlock_clone);
1937 void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1941 sk_dst_set(sk, dst);
1942 sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps;
1943 if (sk->sk_route_caps & NETIF_F_GSO)
1944 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1945 sk->sk_route_caps &= ~sk->sk_route_nocaps;
1946 if (sk_can_gso(sk)) {
1947 if (dst->header_len && !xfrm_dst_offload_ok(dst)) {
1948 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1950 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1951 sk->sk_gso_max_size = dst->dev->gso_max_size;
1952 max_segs = max_t(u32, dst->dev->gso_max_segs, 1);
1955 sk->sk_gso_max_segs = max_segs;
1957 EXPORT_SYMBOL_GPL(sk_setup_caps);
1960 * Simple resource managers for sockets.
1965 * Write buffer destructor automatically called from kfree_skb.
1967 void sock_wfree(struct sk_buff *skb)
1969 struct sock *sk = skb->sk;
1970 unsigned int len = skb->truesize;
1972 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1974 * Keep a reference on sk_wmem_alloc, this will be released
1975 * after sk_write_space() call
1977 WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc));
1978 sk->sk_write_space(sk);
1982 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1983 * could not do because of in-flight packets
1985 if (refcount_sub_and_test(len, &sk->sk_wmem_alloc))
1988 EXPORT_SYMBOL(sock_wfree);
1990 /* This variant of sock_wfree() is used by TCP,
1991 * since it sets SOCK_USE_WRITE_QUEUE.
1993 void __sock_wfree(struct sk_buff *skb)
1995 struct sock *sk = skb->sk;
1997 if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
2001 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
2006 if (unlikely(!sk_fullsock(sk))) {
2007 skb->destructor = sock_edemux;
2012 skb->destructor = sock_wfree;
2013 skb_set_hash_from_sk(skb, sk);
2015 * We used to take a refcount on sk, but following operation
2016 * is enough to guarantee sk_free() wont free this sock until
2017 * all in-flight packets are completed
2019 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
2021 EXPORT_SYMBOL(skb_set_owner_w);
2023 static bool can_skb_orphan_partial(const struct sk_buff *skb)
2025 #ifdef CONFIG_TLS_DEVICE
2026 /* Drivers depend on in-order delivery for crypto offload,
2027 * partial orphan breaks out-of-order-OK logic.
2032 return (skb->destructor == sock_wfree ||
2033 (IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree));
2036 /* This helper is used by netem, as it can hold packets in its
2037 * delay queue. We want to allow the owner socket to send more
2038 * packets, as if they were already TX completed by a typical driver.
2039 * But we also want to keep skb->sk set because some packet schedulers
2040 * rely on it (sch_fq for example).
2042 void skb_orphan_partial(struct sk_buff *skb)
2044 if (skb_is_tcp_pure_ack(skb))
2047 if (can_skb_orphan_partial(skb) && skb_set_owner_sk_safe(skb, skb->sk))
2052 EXPORT_SYMBOL(skb_orphan_partial);
2055 * Read buffer destructor automatically called from kfree_skb.
2057 void sock_rfree(struct sk_buff *skb)
2059 struct sock *sk = skb->sk;
2060 unsigned int len = skb->truesize;
2062 atomic_sub(len, &sk->sk_rmem_alloc);
2063 sk_mem_uncharge(sk, len);
2065 EXPORT_SYMBOL(sock_rfree);
2068 * Buffer destructor for skbs that are not used directly in read or write
2069 * path, e.g. for error handler skbs. Automatically called from kfree_skb.
2071 void sock_efree(struct sk_buff *skb)
2075 EXPORT_SYMBOL(sock_efree);
2077 kuid_t sock_i_uid(struct sock *sk)
2081 read_lock_bh(&sk->sk_callback_lock);
2082 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
2083 read_unlock_bh(&sk->sk_callback_lock);
2086 EXPORT_SYMBOL(sock_i_uid);
2088 unsigned long sock_i_ino(struct sock *sk)
2092 read_lock_bh(&sk->sk_callback_lock);
2093 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
2094 read_unlock_bh(&sk->sk_callback_lock);
2097 EXPORT_SYMBOL(sock_i_ino);
2100 * Allocate a skb from the socket's send buffer.
2102 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
2106 refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) {
2107 struct sk_buff *skb = alloc_skb(size, priority);
2110 skb_set_owner_w(skb, sk);
2116 EXPORT_SYMBOL(sock_wmalloc);
2118 static void sock_ofree(struct sk_buff *skb)
2120 struct sock *sk = skb->sk;
2122 atomic_sub(skb->truesize, &sk->sk_omem_alloc);
2125 struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
2128 struct sk_buff *skb;
2130 /* small safe race: SKB_TRUESIZE may differ from final skb->truesize */
2131 if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) >
2135 skb = alloc_skb(size, priority);
2139 atomic_add(skb->truesize, &sk->sk_omem_alloc);
2141 skb->destructor = sock_ofree;
2146 * Allocate a memory block from the socket's option memory buffer.
2148 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
2150 if ((unsigned int)size <= sysctl_optmem_max &&
2151 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
2153 /* First do the add, to avoid the race if kmalloc
2156 atomic_add(size, &sk->sk_omem_alloc);
2157 mem = kmalloc(size, priority);
2160 atomic_sub(size, &sk->sk_omem_alloc);
2164 EXPORT_SYMBOL(sock_kmalloc);
2166 /* Free an option memory block. Note, we actually want the inline
2167 * here as this allows gcc to detect the nullify and fold away the
2168 * condition entirely.
2170 static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
2173 if (WARN_ON_ONCE(!mem))
2179 atomic_sub(size, &sk->sk_omem_alloc);
2182 void sock_kfree_s(struct sock *sk, void *mem, int size)
2184 __sock_kfree_s(sk, mem, size, false);
2186 EXPORT_SYMBOL(sock_kfree_s);
2188 void sock_kzfree_s(struct sock *sk, void *mem, int size)
2190 __sock_kfree_s(sk, mem, size, true);
2192 EXPORT_SYMBOL(sock_kzfree_s);
2194 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
2195 I think, these locks should be removed for datagram sockets.
2197 static long sock_wait_for_wmem(struct sock *sk, long timeo)
2201 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2205 if (signal_pending(current))
2207 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2208 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2209 if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf))
2211 if (sk->sk_shutdown & SEND_SHUTDOWN)
2215 timeo = schedule_timeout(timeo);
2217 finish_wait(sk_sleep(sk), &wait);
2223 * Generic send/receive buffer handlers
2226 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
2227 unsigned long data_len, int noblock,
2228 int *errcode, int max_page_order)
2230 struct sk_buff *skb;
2234 timeo = sock_sndtimeo(sk, noblock);
2236 err = sock_error(sk);
2241 if (sk->sk_shutdown & SEND_SHUTDOWN)
2244 if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf))
2247 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2248 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2252 if (signal_pending(current))
2254 timeo = sock_wait_for_wmem(sk, timeo);
2256 skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
2257 errcode, sk->sk_allocation);
2259 skb_set_owner_w(skb, sk);
2263 err = sock_intr_errno(timeo);
2268 EXPORT_SYMBOL(sock_alloc_send_pskb);
2270 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
2271 int noblock, int *errcode)
2273 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
2275 EXPORT_SYMBOL(sock_alloc_send_skb);
2277 int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
2278 struct sockcm_cookie *sockc)
2282 switch (cmsg->cmsg_type) {
2284 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2286 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2288 sockc->mark = *(u32 *)CMSG_DATA(cmsg);
2290 case SO_TIMESTAMPING_OLD:
2291 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2294 tsflags = *(u32 *)CMSG_DATA(cmsg);
2295 if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK)
2298 sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
2299 sockc->tsflags |= tsflags;
2302 if (!sock_flag(sk, SOCK_TXTIME))
2304 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u64)))
2306 sockc->transmit_time = get_unaligned((u64 *)CMSG_DATA(cmsg));
2308 /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
2310 case SCM_CREDENTIALS:
2317 EXPORT_SYMBOL(__sock_cmsg_send);
2319 int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
2320 struct sockcm_cookie *sockc)
2322 struct cmsghdr *cmsg;
2325 for_each_cmsghdr(cmsg, msg) {
2326 if (!CMSG_OK(msg, cmsg))
2328 if (cmsg->cmsg_level != SOL_SOCKET)
2330 ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
2336 EXPORT_SYMBOL(sock_cmsg_send);
2338 static void sk_enter_memory_pressure(struct sock *sk)
2340 if (!sk->sk_prot->enter_memory_pressure)
2343 sk->sk_prot->enter_memory_pressure(sk);
2346 static void sk_leave_memory_pressure(struct sock *sk)
2348 if (sk->sk_prot->leave_memory_pressure) {
2349 sk->sk_prot->leave_memory_pressure(sk);
2351 unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
2353 if (memory_pressure && READ_ONCE(*memory_pressure))
2354 WRITE_ONCE(*memory_pressure, 0);
2358 DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
2361 * skb_page_frag_refill - check that a page_frag contains enough room
2362 * @sz: minimum size of the fragment we want to get
2363 * @pfrag: pointer to page_frag
2364 * @gfp: priority for memory allocation
2366 * Note: While this allocator tries to use high order pages, there is
2367 * no guarantee that allocations succeed. Therefore, @sz MUST be
2368 * less or equal than PAGE_SIZE.
2370 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
2373 if (page_ref_count(pfrag->page) == 1) {
2377 if (pfrag->offset + sz <= pfrag->size)
2379 put_page(pfrag->page);
2383 if (SKB_FRAG_PAGE_ORDER &&
2384 !static_branch_unlikely(&net_high_order_alloc_disable_key)) {
2385 /* Avoid direct reclaim but allow kswapd to wake */
2386 pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
2387 __GFP_COMP | __GFP_NOWARN |
2389 SKB_FRAG_PAGE_ORDER);
2390 if (likely(pfrag->page)) {
2391 pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
2395 pfrag->page = alloc_page(gfp);
2396 if (likely(pfrag->page)) {
2397 pfrag->size = PAGE_SIZE;
2402 EXPORT_SYMBOL(skb_page_frag_refill);
2404 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
2406 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
2409 sk_enter_memory_pressure(sk);
2410 sk_stream_moderate_sndbuf(sk);
2413 EXPORT_SYMBOL(sk_page_frag_refill);
2415 static void __lock_sock(struct sock *sk)
2416 __releases(&sk->sk_lock.slock)
2417 __acquires(&sk->sk_lock.slock)
2422 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
2423 TASK_UNINTERRUPTIBLE);
2424 spin_unlock_bh(&sk->sk_lock.slock);
2426 spin_lock_bh(&sk->sk_lock.slock);
2427 if (!sock_owned_by_user(sk))
2430 finish_wait(&sk->sk_lock.wq, &wait);
2433 void __release_sock(struct sock *sk)
2434 __releases(&sk->sk_lock.slock)
2435 __acquires(&sk->sk_lock.slock)
2437 struct sk_buff *skb, *next;
2439 while ((skb = sk->sk_backlog.head) != NULL) {
2440 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
2442 spin_unlock_bh(&sk->sk_lock.slock);
2447 WARN_ON_ONCE(skb_dst_is_noref(skb));
2448 skb_mark_not_on_list(skb);
2449 sk_backlog_rcv(sk, skb);
2454 } while (skb != NULL);
2456 spin_lock_bh(&sk->sk_lock.slock);
2460 * Doing the zeroing here guarantee we can not loop forever
2461 * while a wild producer attempts to flood us.
2463 sk->sk_backlog.len = 0;
2466 void __sk_flush_backlog(struct sock *sk)
2468 spin_lock_bh(&sk->sk_lock.slock);
2470 spin_unlock_bh(&sk->sk_lock.slock);
2474 * sk_wait_data - wait for data to arrive at sk_receive_queue
2475 * @sk: sock to wait on
2476 * @timeo: for how long
2477 * @skb: last skb seen on sk_receive_queue
2479 * Now socket state including sk->sk_err is changed only under lock,
2480 * hence we may omit checks after joining wait queue.
2481 * We check receive queue before schedule() only as optimization;
2482 * it is very likely that release_sock() added new data.
2484 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
2486 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2489 add_wait_queue(sk_sleep(sk), &wait);
2490 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2491 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait);
2492 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2493 remove_wait_queue(sk_sleep(sk), &wait);
2496 EXPORT_SYMBOL(sk_wait_data);
2499 * __sk_mem_raise_allocated - increase memory_allocated
2501 * @size: memory size to allocate
2502 * @amt: pages to allocate
2503 * @kind: allocation type
2505 * Similar to __sk_mem_schedule(), but does not update sk_forward_alloc
2507 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
2509 struct proto *prot = sk->sk_prot;
2510 long allocated = sk_memory_allocated_add(sk, amt);
2511 bool charged = true;
2513 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
2514 !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt)))
2515 goto suppress_allocation;
2518 if (allocated <= sk_prot_mem_limits(sk, 0)) {
2519 sk_leave_memory_pressure(sk);
2523 /* Under pressure. */
2524 if (allocated > sk_prot_mem_limits(sk, 1))
2525 sk_enter_memory_pressure(sk);
2527 /* Over hard limit. */
2528 if (allocated > sk_prot_mem_limits(sk, 2))
2529 goto suppress_allocation;
2531 /* guarantee minimum buffer size under pressure */
2532 if (kind == SK_MEM_RECV) {
2533 if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot))
2536 } else { /* SK_MEM_SEND */
2537 int wmem0 = sk_get_wmem0(sk, prot);
2539 if (sk->sk_type == SOCK_STREAM) {
2540 if (sk->sk_wmem_queued < wmem0)
2542 } else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) {
2547 if (sk_has_memory_pressure(sk)) {
2550 if (!sk_under_memory_pressure(sk))
2552 alloc = sk_sockets_allocated_read_positive(sk);
2553 if (sk_prot_mem_limits(sk, 2) > alloc *
2554 sk_mem_pages(sk->sk_wmem_queued +
2555 atomic_read(&sk->sk_rmem_alloc) +
2556 sk->sk_forward_alloc))
2560 suppress_allocation:
2562 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2563 sk_stream_moderate_sndbuf(sk);
2565 /* Fail only if socket is _under_ its sndbuf.
2566 * In this case we cannot block, so that we have to fail.
2568 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2572 if (kind == SK_MEM_SEND || (kind == SK_MEM_RECV && charged))
2573 trace_sock_exceed_buf_limit(sk, prot, allocated, kind);
2575 sk_memory_allocated_sub(sk, amt);
2577 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2578 mem_cgroup_uncharge_skmem(sk->sk_memcg, amt);
2582 EXPORT_SYMBOL(__sk_mem_raise_allocated);
2585 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
2587 * @size: memory size to allocate
2588 * @kind: allocation type
2590 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
2591 * rmem allocation. This function assumes that protocols which have
2592 * memory_pressure use sk_wmem_queued as write buffer accounting.
2594 int __sk_mem_schedule(struct sock *sk, int size, int kind)
2596 int ret, amt = sk_mem_pages(size);
2598 sk->sk_forward_alloc += amt << SK_MEM_QUANTUM_SHIFT;
2599 ret = __sk_mem_raise_allocated(sk, size, amt, kind);
2601 sk->sk_forward_alloc -= amt << SK_MEM_QUANTUM_SHIFT;
2604 EXPORT_SYMBOL(__sk_mem_schedule);
2607 * __sk_mem_reduce_allocated - reclaim memory_allocated
2609 * @amount: number of quanta
2611 * Similar to __sk_mem_reclaim(), but does not update sk_forward_alloc
2613 void __sk_mem_reduce_allocated(struct sock *sk, int amount)
2615 sk_memory_allocated_sub(sk, amount);
2617 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2618 mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
2620 if (sk_under_memory_pressure(sk) &&
2621 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2622 sk_leave_memory_pressure(sk);
2624 EXPORT_SYMBOL(__sk_mem_reduce_allocated);
2627 * __sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated
2629 * @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
2631 void __sk_mem_reclaim(struct sock *sk, int amount)
2633 amount >>= SK_MEM_QUANTUM_SHIFT;
2634 sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
2635 __sk_mem_reduce_allocated(sk, amount);
2637 EXPORT_SYMBOL(__sk_mem_reclaim);
2639 int sk_set_peek_off(struct sock *sk, int val)
2641 sk->sk_peek_off = val;
2644 EXPORT_SYMBOL_GPL(sk_set_peek_off);
2647 * Set of default routines for initialising struct proto_ops when
2648 * the protocol does not support a particular function. In certain
2649 * cases where it makes no sense for a protocol to have a "do nothing"
2650 * function, some default processing is provided.
2653 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2657 EXPORT_SYMBOL(sock_no_bind);
2659 int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
2664 EXPORT_SYMBOL(sock_no_connect);
2666 int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2670 EXPORT_SYMBOL(sock_no_socketpair);
2672 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags,
2677 EXPORT_SYMBOL(sock_no_accept);
2679 int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
2684 EXPORT_SYMBOL(sock_no_getname);
2686 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2690 EXPORT_SYMBOL(sock_no_ioctl);
2692 int sock_no_listen(struct socket *sock, int backlog)
2696 EXPORT_SYMBOL(sock_no_listen);
2698 int sock_no_shutdown(struct socket *sock, int how)
2702 EXPORT_SYMBOL(sock_no_shutdown);
2704 int sock_no_setsockopt(struct socket *sock, int level, int optname,
2705 char __user *optval, unsigned int optlen)
2709 EXPORT_SYMBOL(sock_no_setsockopt);
2711 int sock_no_getsockopt(struct socket *sock, int level, int optname,
2712 char __user *optval, int __user *optlen)
2716 EXPORT_SYMBOL(sock_no_getsockopt);
2718 int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
2722 EXPORT_SYMBOL(sock_no_sendmsg);
2724 int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len)
2728 EXPORT_SYMBOL(sock_no_sendmsg_locked);
2730 int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
2735 EXPORT_SYMBOL(sock_no_recvmsg);
2737 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2739 /* Mirror missing mmap method error code */
2742 EXPORT_SYMBOL(sock_no_mmap);
2745 * When a file is received (via SCM_RIGHTS, etc), we must bump the
2746 * various sock-based usage counts.
2748 void __receive_sock(struct file *file)
2750 struct socket *sock;
2754 * The resulting value of "error" is ignored here since we only
2755 * need to take action when the file is a socket and testing
2756 * "sock" for NULL is sufficient.
2758 sock = sock_from_file(file, &error);
2760 sock_update_netprioidx(&sock->sk->sk_cgrp_data);
2761 sock_update_classid(&sock->sk->sk_cgrp_data);
2765 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2768 struct msghdr msg = {.msg_flags = flags};
2770 char *kaddr = kmap(page);
2771 iov.iov_base = kaddr + offset;
2773 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2777 EXPORT_SYMBOL(sock_no_sendpage);
2779 ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
2780 int offset, size_t size, int flags)
2783 struct msghdr msg = {.msg_flags = flags};
2785 char *kaddr = kmap(page);
2787 iov.iov_base = kaddr + offset;
2789 res = kernel_sendmsg_locked(sk, &msg, &iov, 1, size);
2793 EXPORT_SYMBOL(sock_no_sendpage_locked);
2796 * Default Socket Callbacks
2799 static void sock_def_wakeup(struct sock *sk)
2801 struct socket_wq *wq;
2804 wq = rcu_dereference(sk->sk_wq);
2805 if (skwq_has_sleeper(wq))
2806 wake_up_interruptible_all(&wq->wait);
2810 static void sock_def_error_report(struct sock *sk)
2812 struct socket_wq *wq;
2815 wq = rcu_dereference(sk->sk_wq);
2816 if (skwq_has_sleeper(wq))
2817 wake_up_interruptible_poll(&wq->wait, EPOLLERR);
2818 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
2822 static void sock_def_readable(struct sock *sk)
2824 struct socket_wq *wq;
2827 wq = rcu_dereference(sk->sk_wq);
2828 if (skwq_has_sleeper(wq))
2829 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
2830 EPOLLRDNORM | EPOLLRDBAND);
2831 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
2835 static void sock_def_write_space(struct sock *sk)
2837 struct socket_wq *wq;
2841 /* Do not wake up a writer until he can make "significant"
2844 if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= READ_ONCE(sk->sk_sndbuf)) {
2845 wq = rcu_dereference(sk->sk_wq);
2846 if (skwq_has_sleeper(wq))
2847 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
2848 EPOLLWRNORM | EPOLLWRBAND);
2850 /* Should agree with poll, otherwise some programs break */
2851 if (sock_writeable(sk))
2852 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
2858 static void sock_def_destruct(struct sock *sk)
2862 void sk_send_sigurg(struct sock *sk)
2864 if (sk->sk_socket && sk->sk_socket->file)
2865 if (send_sigurg(&sk->sk_socket->file->f_owner))
2866 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
2868 EXPORT_SYMBOL(sk_send_sigurg);
2870 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2871 unsigned long expires)
2873 if (!mod_timer(timer, expires))
2876 EXPORT_SYMBOL(sk_reset_timer);
2878 void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2880 if (del_timer(timer))
2883 EXPORT_SYMBOL(sk_stop_timer);
2885 void sock_init_data(struct socket *sock, struct sock *sk)
2888 sk->sk_send_head = NULL;
2890 timer_setup(&sk->sk_timer, NULL, 0);
2892 sk->sk_allocation = GFP_KERNEL;
2893 sk->sk_rcvbuf = sysctl_rmem_default;
2894 sk->sk_sndbuf = sysctl_wmem_default;
2895 sk->sk_state = TCP_CLOSE;
2896 sk_set_socket(sk, sock);
2898 sock_set_flag(sk, SOCK_ZAPPED);
2901 sk->sk_type = sock->type;
2902 RCU_INIT_POINTER(sk->sk_wq, &sock->wq);
2904 sk->sk_uid = SOCK_INODE(sock)->i_uid;
2906 RCU_INIT_POINTER(sk->sk_wq, NULL);
2907 sk->sk_uid = make_kuid(sock_net(sk)->user_ns, 0);
2910 rwlock_init(&sk->sk_callback_lock);
2911 if (sk->sk_kern_sock)
2912 lockdep_set_class_and_name(
2913 &sk->sk_callback_lock,
2914 af_kern_callback_keys + sk->sk_family,
2915 af_family_kern_clock_key_strings[sk->sk_family]);
2917 lockdep_set_class_and_name(
2918 &sk->sk_callback_lock,
2919 af_callback_keys + sk->sk_family,
2920 af_family_clock_key_strings[sk->sk_family]);
2922 sk->sk_state_change = sock_def_wakeup;
2923 sk->sk_data_ready = sock_def_readable;
2924 sk->sk_write_space = sock_def_write_space;
2925 sk->sk_error_report = sock_def_error_report;
2926 sk->sk_destruct = sock_def_destruct;
2928 sk->sk_frag.page = NULL;
2929 sk->sk_frag.offset = 0;
2930 sk->sk_peek_off = -1;
2932 sk->sk_peer_pid = NULL;
2933 sk->sk_peer_cred = NULL;
2934 spin_lock_init(&sk->sk_peer_lock);
2936 sk->sk_write_pending = 0;
2937 sk->sk_rcvlowat = 1;
2938 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2939 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2941 sk->sk_stamp = SK_DEFAULT_STAMP;
2942 #if BITS_PER_LONG==32
2943 seqlock_init(&sk->sk_stamp_seq);
2945 atomic_set(&sk->sk_zckey, 0);
2947 #ifdef CONFIG_NET_RX_BUSY_POLL
2949 sk->sk_ll_usec = sysctl_net_busy_read;
2952 sk->sk_max_pacing_rate = ~0UL;
2953 sk->sk_pacing_rate = ~0UL;
2954 WRITE_ONCE(sk->sk_pacing_shift, 10);
2955 sk->sk_incoming_cpu = -1;
2957 sk_rx_queue_clear(sk);
2959 * Before updating sk_refcnt, we must commit prior changes to memory
2960 * (Documentation/RCU/rculist_nulls.txt for details)
2963 refcount_set(&sk->sk_refcnt, 1);
2964 atomic_set(&sk->sk_drops, 0);
2966 EXPORT_SYMBOL(sock_init_data);
2968 void lock_sock_nested(struct sock *sk, int subclass)
2971 spin_lock_bh(&sk->sk_lock.slock);
2972 if (sk->sk_lock.owned)
2974 sk->sk_lock.owned = 1;
2975 spin_unlock(&sk->sk_lock.slock);
2977 * The sk_lock has mutex_lock() semantics here:
2979 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2982 EXPORT_SYMBOL(lock_sock_nested);
2984 void release_sock(struct sock *sk)
2986 spin_lock_bh(&sk->sk_lock.slock);
2987 if (sk->sk_backlog.tail)
2990 /* Warning : release_cb() might need to release sk ownership,
2991 * ie call sock_release_ownership(sk) before us.
2993 if (sk->sk_prot->release_cb)
2994 sk->sk_prot->release_cb(sk);
2996 sock_release_ownership(sk);
2997 if (waitqueue_active(&sk->sk_lock.wq))
2998 wake_up(&sk->sk_lock.wq);
2999 spin_unlock_bh(&sk->sk_lock.slock);
3001 EXPORT_SYMBOL(release_sock);
3004 * lock_sock_fast - fast version of lock_sock
3007 * This version should be used for very small section, where process wont block
3008 * return false if fast path is taken:
3010 * sk_lock.slock locked, owned = 0, BH disabled
3012 * return true if slow path is taken:
3014 * sk_lock.slock unlocked, owned = 1, BH enabled
3016 bool lock_sock_fast(struct sock *sk)
3019 spin_lock_bh(&sk->sk_lock.slock);
3021 if (!sk->sk_lock.owned)
3023 * Note : We must disable BH
3028 sk->sk_lock.owned = 1;
3029 spin_unlock(&sk->sk_lock.slock);
3031 * The sk_lock has mutex_lock() semantics here:
3033 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
3037 EXPORT_SYMBOL(lock_sock_fast);
3039 int sock_gettstamp(struct socket *sock, void __user *userstamp,
3040 bool timeval, bool time32)
3042 struct sock *sk = sock->sk;
3043 struct timespec64 ts;
3045 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
3046 ts = ktime_to_timespec64(sock_read_timestamp(sk));
3047 if (ts.tv_sec == -1)
3049 if (ts.tv_sec == 0) {
3050 ktime_t kt = ktime_get_real();
3051 sock_write_timestamp(sk, kt);;
3052 ts = ktime_to_timespec64(kt);
3058 #ifdef CONFIG_COMPAT_32BIT_TIME
3060 return put_old_timespec32(&ts, userstamp);
3062 #ifdef CONFIG_SPARC64
3063 /* beware of padding in sparc64 timeval */
3064 if (timeval && !in_compat_syscall()) {
3065 struct __kernel_old_timeval __user tv = {
3066 .tv_sec = ts.tv_sec,
3067 .tv_usec = ts.tv_nsec,
3069 if (copy_to_user(userstamp, &tv, sizeof(tv)))
3074 return put_timespec64(&ts, userstamp);
3076 EXPORT_SYMBOL(sock_gettstamp);
3078 void sock_enable_timestamp(struct sock *sk, int flag)
3080 if (!sock_flag(sk, flag)) {
3081 unsigned long previous_flags = sk->sk_flags;
3083 sock_set_flag(sk, flag);
3085 * we just set one of the two flags which require net
3086 * time stamping, but time stamping might have been on
3087 * already because of the other one
3089 if (sock_needs_netstamp(sk) &&
3090 !(previous_flags & SK_FLAGS_TIMESTAMP))
3091 net_enable_timestamp();
3095 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
3096 int level, int type)
3098 struct sock_exterr_skb *serr;
3099 struct sk_buff *skb;
3103 skb = sock_dequeue_err_skb(sk);
3109 msg->msg_flags |= MSG_TRUNC;
3112 err = skb_copy_datagram_msg(skb, 0, msg, copied);
3116 sock_recv_timestamp(msg, sk, skb);
3118 serr = SKB_EXT_ERR(skb);
3119 put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
3121 msg->msg_flags |= MSG_ERRQUEUE;
3129 EXPORT_SYMBOL(sock_recv_errqueue);
3132 * Get a socket option on an socket.
3134 * FIX: POSIX 1003.1g is very ambiguous here. It states that
3135 * asynchronous errors should be reported by getsockopt. We assume
3136 * this means if you specify SO_ERROR (otherwise whats the point of it).
3138 int sock_common_getsockopt(struct socket *sock, int level, int optname,
3139 char __user *optval, int __user *optlen)
3141 struct sock *sk = sock->sk;
3143 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
3145 EXPORT_SYMBOL(sock_common_getsockopt);
3147 #ifdef CONFIG_COMPAT
3148 int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
3149 char __user *optval, int __user *optlen)
3151 struct sock *sk = sock->sk;
3153 if (sk->sk_prot->compat_getsockopt != NULL)
3154 return sk->sk_prot->compat_getsockopt(sk, level, optname,
3156 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
3158 EXPORT_SYMBOL(compat_sock_common_getsockopt);
3161 int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
3164 struct sock *sk = sock->sk;
3168 err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
3169 flags & ~MSG_DONTWAIT, &addr_len);
3171 msg->msg_namelen = addr_len;
3174 EXPORT_SYMBOL(sock_common_recvmsg);
3177 * Set socket options on an inet socket.
3179 int sock_common_setsockopt(struct socket *sock, int level, int optname,
3180 char __user *optval, unsigned int optlen)
3182 struct sock *sk = sock->sk;
3184 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
3186 EXPORT_SYMBOL(sock_common_setsockopt);
3188 #ifdef CONFIG_COMPAT
3189 int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
3190 char __user *optval, unsigned int optlen)
3192 struct sock *sk = sock->sk;
3194 if (sk->sk_prot->compat_setsockopt != NULL)
3195 return sk->sk_prot->compat_setsockopt(sk, level, optname,
3197 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
3199 EXPORT_SYMBOL(compat_sock_common_setsockopt);
3202 void sk_common_release(struct sock *sk)
3204 if (sk->sk_prot->destroy)
3205 sk->sk_prot->destroy(sk);
3208 * Observation: when sock_common_release is called, processes have
3209 * no access to socket. But net still has.
3210 * Step one, detach it from networking:
3212 * A. Remove from hash tables.
3215 sk->sk_prot->unhash(sk);
3218 * In this point socket cannot receive new packets, but it is possible
3219 * that some packets are in flight because some CPU runs receiver and
3220 * did hash table lookup before we unhashed socket. They will achieve
3221 * receive queue and will be purged by socket destructor.
3223 * Also we still have packets pending on receive queue and probably,
3224 * our own packets waiting in device queues. sock_destroy will drain
3225 * receive queue, but transmitted packets will delay socket destruction
3226 * until the last reference will be released.
3231 xfrm_sk_free_policy(sk);
3233 sk_refcnt_debug_release(sk);
3237 EXPORT_SYMBOL(sk_common_release);
3239 void sk_get_meminfo(const struct sock *sk, u32 *mem)
3241 memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS);
3243 mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
3244 mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf);
3245 mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
3246 mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf);
3247 mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
3248 mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued);
3249 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
3250 mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
3251 mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
3254 #ifdef CONFIG_PROC_FS
3255 #define PROTO_INUSE_NR 64 /* should be enough for the first time */
3257 int val[PROTO_INUSE_NR];
3260 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
3262 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
3264 __this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val);
3266 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
3268 int sock_prot_inuse_get(struct net *net, struct proto *prot)
3270 int cpu, idx = prot->inuse_idx;
3273 for_each_possible_cpu(cpu)
3274 res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx];
3276 return res >= 0 ? res : 0;
3278 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
3280 static void sock_inuse_add(struct net *net, int val)
3282 this_cpu_add(*net->core.sock_inuse, val);
3285 int sock_inuse_get(struct net *net)
3289 for_each_possible_cpu(cpu)
3290 res += *per_cpu_ptr(net->core.sock_inuse, cpu);
3295 EXPORT_SYMBOL_GPL(sock_inuse_get);
3297 static int __net_init sock_inuse_init_net(struct net *net)
3299 net->core.prot_inuse = alloc_percpu(struct prot_inuse);
3300 if (net->core.prot_inuse == NULL)
3303 net->core.sock_inuse = alloc_percpu(int);
3304 if (net->core.sock_inuse == NULL)
3310 free_percpu(net->core.prot_inuse);
3314 static void __net_exit sock_inuse_exit_net(struct net *net)
3316 free_percpu(net->core.prot_inuse);
3317 free_percpu(net->core.sock_inuse);
3320 static struct pernet_operations net_inuse_ops = {
3321 .init = sock_inuse_init_net,
3322 .exit = sock_inuse_exit_net,
3325 static __init int net_inuse_init(void)
3327 if (register_pernet_subsys(&net_inuse_ops))
3328 panic("Cannot initialize net inuse counters");
3333 core_initcall(net_inuse_init);
3335 static int assign_proto_idx(struct proto *prot)
3337 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
3339 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
3340 pr_err("PROTO_INUSE_NR exhausted\n");
3344 set_bit(prot->inuse_idx, proto_inuse_idx);
3348 static void release_proto_idx(struct proto *prot)
3350 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
3351 clear_bit(prot->inuse_idx, proto_inuse_idx);
3354 static inline int assign_proto_idx(struct proto *prot)
3359 static inline void release_proto_idx(struct proto *prot)
3363 static void sock_inuse_add(struct net *net, int val)
3368 static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot)
3372 kfree(twsk_prot->twsk_slab_name);
3373 twsk_prot->twsk_slab_name = NULL;
3374 kmem_cache_destroy(twsk_prot->twsk_slab);
3375 twsk_prot->twsk_slab = NULL;
3378 static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
3382 kfree(rsk_prot->slab_name);
3383 rsk_prot->slab_name = NULL;
3384 kmem_cache_destroy(rsk_prot->slab);
3385 rsk_prot->slab = NULL;
3388 static int req_prot_init(const struct proto *prot)
3390 struct request_sock_ops *rsk_prot = prot->rsk_prot;
3395 rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
3397 if (!rsk_prot->slab_name)
3400 rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
3401 rsk_prot->obj_size, 0,
3402 SLAB_ACCOUNT | prot->slab_flags,
3405 if (!rsk_prot->slab) {
3406 pr_crit("%s: Can't create request sock SLAB cache!\n",
3413 int proto_register(struct proto *prot, int alloc_slab)
3418 prot->slab = kmem_cache_create_usercopy(prot->name,
3420 SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT |
3422 prot->useroffset, prot->usersize,
3425 if (prot->slab == NULL) {
3426 pr_crit("%s: Can't create sock SLAB cache!\n",
3431 if (req_prot_init(prot))
3432 goto out_free_request_sock_slab;
3434 if (prot->twsk_prot != NULL) {
3435 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
3437 if (prot->twsk_prot->twsk_slab_name == NULL)
3438 goto out_free_request_sock_slab;
3440 prot->twsk_prot->twsk_slab =
3441 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
3442 prot->twsk_prot->twsk_obj_size,
3447 if (prot->twsk_prot->twsk_slab == NULL)
3448 goto out_free_timewait_sock_slab;
3452 mutex_lock(&proto_list_mutex);
3453 ret = assign_proto_idx(prot);
3455 mutex_unlock(&proto_list_mutex);
3456 goto out_free_timewait_sock_slab;
3458 list_add(&prot->node, &proto_list);
3459 mutex_unlock(&proto_list_mutex);
3462 out_free_timewait_sock_slab:
3463 if (alloc_slab && prot->twsk_prot)
3464 tw_prot_cleanup(prot->twsk_prot);
3465 out_free_request_sock_slab:
3467 req_prot_cleanup(prot->rsk_prot);
3469 kmem_cache_destroy(prot->slab);
3475 EXPORT_SYMBOL(proto_register);
3477 void proto_unregister(struct proto *prot)
3479 mutex_lock(&proto_list_mutex);
3480 release_proto_idx(prot);
3481 list_del(&prot->node);
3482 mutex_unlock(&proto_list_mutex);
3484 kmem_cache_destroy(prot->slab);
3487 req_prot_cleanup(prot->rsk_prot);
3488 tw_prot_cleanup(prot->twsk_prot);
3490 EXPORT_SYMBOL(proto_unregister);
3492 int sock_load_diag_module(int family, int protocol)
3495 if (!sock_is_registered(family))
3498 return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
3499 NETLINK_SOCK_DIAG, family);
3503 if (family == AF_INET &&
3504 protocol != IPPROTO_RAW &&
3505 !rcu_access_pointer(inet_protos[protocol]))
3509 return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
3510 NETLINK_SOCK_DIAG, family, protocol);
3512 EXPORT_SYMBOL(sock_load_diag_module);
3514 #ifdef CONFIG_PROC_FS
3515 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
3516 __acquires(proto_list_mutex)
3518 mutex_lock(&proto_list_mutex);
3519 return seq_list_start_head(&proto_list, *pos);
3522 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3524 return seq_list_next(v, &proto_list, pos);
3527 static void proto_seq_stop(struct seq_file *seq, void *v)
3528 __releases(proto_list_mutex)
3530 mutex_unlock(&proto_list_mutex);
3533 static char proto_method_implemented(const void *method)
3535 return method == NULL ? 'n' : 'y';
3537 static long sock_prot_memory_allocated(struct proto *proto)
3539 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
3542 static const char *sock_prot_memory_pressure(struct proto *proto)
3544 return proto->memory_pressure != NULL ?
3545 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
3548 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
3551 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
3552 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
3555 sock_prot_inuse_get(seq_file_net(seq), proto),
3556 sock_prot_memory_allocated(proto),
3557 sock_prot_memory_pressure(proto),
3559 proto->slab == NULL ? "no" : "yes",
3560 module_name(proto->owner),
3561 proto_method_implemented(proto->close),
3562 proto_method_implemented(proto->connect),
3563 proto_method_implemented(proto->disconnect),
3564 proto_method_implemented(proto->accept),
3565 proto_method_implemented(proto->ioctl),
3566 proto_method_implemented(proto->init),
3567 proto_method_implemented(proto->destroy),
3568 proto_method_implemented(proto->shutdown),
3569 proto_method_implemented(proto->setsockopt),
3570 proto_method_implemented(proto->getsockopt),
3571 proto_method_implemented(proto->sendmsg),
3572 proto_method_implemented(proto->recvmsg),
3573 proto_method_implemented(proto->sendpage),
3574 proto_method_implemented(proto->bind),
3575 proto_method_implemented(proto->backlog_rcv),
3576 proto_method_implemented(proto->hash),
3577 proto_method_implemented(proto->unhash),
3578 proto_method_implemented(proto->get_port),
3579 proto_method_implemented(proto->enter_memory_pressure));
3582 static int proto_seq_show(struct seq_file *seq, void *v)
3584 if (v == &proto_list)
3585 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
3594 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
3596 proto_seq_printf(seq, list_entry(v, struct proto, node));
3600 static const struct seq_operations proto_seq_ops = {
3601 .start = proto_seq_start,
3602 .next = proto_seq_next,
3603 .stop = proto_seq_stop,
3604 .show = proto_seq_show,
3607 static __net_init int proto_init_net(struct net *net)
3609 if (!proc_create_net("protocols", 0444, net->proc_net, &proto_seq_ops,
3610 sizeof(struct seq_net_private)))
3616 static __net_exit void proto_exit_net(struct net *net)
3618 remove_proc_entry("protocols", net->proc_net);
3622 static __net_initdata struct pernet_operations proto_net_ops = {
3623 .init = proto_init_net,
3624 .exit = proto_exit_net,
3627 static int __init proto_init(void)
3629 return register_pernet_subsys(&proto_net_ops);
3632 subsys_initcall(proto_init);
3634 #endif /* PROC_FS */
3636 #ifdef CONFIG_NET_RX_BUSY_POLL
3637 bool sk_busy_loop_end(void *p, unsigned long start_time)
3639 struct sock *sk = p;
3641 return !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
3642 sk_busy_loop_timeout(sk, start_time);
3644 EXPORT_SYMBOL(sk_busy_loop_end);
3645 #endif /* CONFIG_NET_RX_BUSY_POLL */