1 // SPDX-License-Identifier: GPL-2.0-only
5 * An implementation of the DCCP protocol
6 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
9 #include <linux/dccp.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/skbuff.h>
15 #include <linux/netdevice.h>
17 #include <linux/if_arp.h>
18 #include <linux/init.h>
19 #include <linux/random.h>
20 #include <linux/slab.h>
21 #include <net/checksum.h>
23 #include <net/inet_sock.h>
24 #include <net/inet_common.h>
28 #include <asm/ioctls.h>
29 #include <linux/spinlock.h>
30 #include <linux/timer.h>
31 #include <linux/delay.h>
32 #include <linux/poll.h>
38 #define CREATE_TRACE_POINTS
41 DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
43 EXPORT_SYMBOL_GPL(dccp_statistics);
45 struct percpu_counter dccp_orphan_count;
46 EXPORT_SYMBOL_GPL(dccp_orphan_count);
48 struct inet_hashinfo dccp_hashinfo;
49 EXPORT_SYMBOL_GPL(dccp_hashinfo);
51 /* the maximum queue length for tx in packets. 0 is no limit */
52 int sysctl_dccp_tx_qlen __read_mostly = 5;
54 #ifdef CONFIG_IP_DCCP_DEBUG
55 static const char *dccp_state_name(const int state)
57 static const char *const dccp_state_names[] = {
59 [DCCP_REQUESTING] = "REQUESTING",
60 [DCCP_PARTOPEN] = "PARTOPEN",
61 [DCCP_LISTEN] = "LISTEN",
62 [DCCP_RESPOND] = "RESPOND",
63 [DCCP_CLOSING] = "CLOSING",
64 [DCCP_ACTIVE_CLOSEREQ] = "CLOSEREQ",
65 [DCCP_PASSIVE_CLOSE] = "PASSIVE_CLOSE",
66 [DCCP_PASSIVE_CLOSEREQ] = "PASSIVE_CLOSEREQ",
67 [DCCP_TIME_WAIT] = "TIME_WAIT",
68 [DCCP_CLOSED] = "CLOSED",
71 if (state >= DCCP_MAX_STATES)
72 return "INVALID STATE!";
74 return dccp_state_names[state];
78 void dccp_set_state(struct sock *sk, const int state)
80 const int oldstate = sk->sk_state;
82 dccp_pr_debug("%s(%p) %s --> %s\n", dccp_role(sk), sk,
83 dccp_state_name(oldstate), dccp_state_name(state));
84 WARN_ON(state == oldstate);
88 if (oldstate != DCCP_OPEN)
89 DCCP_INC_STATS(DCCP_MIB_CURRESTAB);
90 /* Client retransmits all Confirm options until entering OPEN */
91 if (oldstate == DCCP_PARTOPEN)
92 dccp_feat_list_purge(&dccp_sk(sk)->dccps_featneg);
96 if (oldstate == DCCP_OPEN || oldstate == DCCP_ACTIVE_CLOSEREQ ||
97 oldstate == DCCP_CLOSING)
98 DCCP_INC_STATS(DCCP_MIB_ESTABRESETS);
100 sk->sk_prot->unhash(sk);
101 if (inet_csk(sk)->icsk_bind_hash != NULL &&
102 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
106 if (oldstate == DCCP_OPEN)
107 DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
110 /* Change state AFTER socket is unhashed to avoid closed
111 * socket sitting in hash tables.
113 inet_sk_set_state(sk, state);
116 EXPORT_SYMBOL_GPL(dccp_set_state);
118 static void dccp_finish_passive_close(struct sock *sk)
120 switch (sk->sk_state) {
121 case DCCP_PASSIVE_CLOSE:
122 /* Node (client or server) has received Close packet. */
123 dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
124 dccp_set_state(sk, DCCP_CLOSED);
126 case DCCP_PASSIVE_CLOSEREQ:
128 * Client received CloseReq. We set the `active' flag so that
129 * dccp_send_close() retransmits the Close as per RFC 4340, 8.3.
131 dccp_send_close(sk, 1);
132 dccp_set_state(sk, DCCP_CLOSING);
136 void dccp_done(struct sock *sk)
138 dccp_set_state(sk, DCCP_CLOSED);
139 dccp_clear_xmit_timers(sk);
141 sk->sk_shutdown = SHUTDOWN_MASK;
143 if (!sock_flag(sk, SOCK_DEAD))
144 sk->sk_state_change(sk);
146 inet_csk_destroy_sock(sk);
149 EXPORT_SYMBOL_GPL(dccp_done);
151 const char *dccp_packet_name(const int type)
153 static const char *const dccp_packet_names[] = {
154 [DCCP_PKT_REQUEST] = "REQUEST",
155 [DCCP_PKT_RESPONSE] = "RESPONSE",
156 [DCCP_PKT_DATA] = "DATA",
157 [DCCP_PKT_ACK] = "ACK",
158 [DCCP_PKT_DATAACK] = "DATAACK",
159 [DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
160 [DCCP_PKT_CLOSE] = "CLOSE",
161 [DCCP_PKT_RESET] = "RESET",
162 [DCCP_PKT_SYNC] = "SYNC",
163 [DCCP_PKT_SYNCACK] = "SYNCACK",
166 if (type >= DCCP_NR_PKT_TYPES)
169 return dccp_packet_names[type];
172 EXPORT_SYMBOL_GPL(dccp_packet_name);
174 void dccp_destruct_common(struct sock *sk)
176 struct dccp_sock *dp = dccp_sk(sk);
178 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
179 dp->dccps_hc_tx_ccid = NULL;
181 EXPORT_SYMBOL_GPL(dccp_destruct_common);
183 static void dccp_sk_destruct(struct sock *sk)
185 dccp_destruct_common(sk);
186 inet_sock_destruct(sk);
189 int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
191 struct dccp_sock *dp = dccp_sk(sk);
192 struct inet_connection_sock *icsk = inet_csk(sk);
194 icsk->icsk_rto = DCCP_TIMEOUT_INIT;
195 icsk->icsk_syn_retries = sysctl_dccp_request_retries;
196 sk->sk_state = DCCP_CLOSED;
197 sk->sk_write_space = dccp_write_space;
198 sk->sk_destruct = dccp_sk_destruct;
199 icsk->icsk_sync_mss = dccp_sync_mss;
200 dp->dccps_mss_cache = 536;
201 dp->dccps_rate_last = jiffies;
202 dp->dccps_role = DCCP_ROLE_UNDEFINED;
203 dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT;
204 dp->dccps_tx_qlen = sysctl_dccp_tx_qlen;
206 dccp_init_xmit_timers(sk);
208 INIT_LIST_HEAD(&dp->dccps_featneg);
209 /* control socket doesn't need feat nego */
210 if (likely(ctl_sock_initialized))
211 return dccp_feat_init(sk);
215 EXPORT_SYMBOL_GPL(dccp_init_sock);
217 void dccp_destroy_sock(struct sock *sk)
219 struct dccp_sock *dp = dccp_sk(sk);
221 __skb_queue_purge(&sk->sk_write_queue);
222 if (sk->sk_send_head != NULL) {
223 kfree_skb(sk->sk_send_head);
224 sk->sk_send_head = NULL;
227 /* Clean up a referenced DCCP bind bucket. */
228 if (inet_csk(sk)->icsk_bind_hash != NULL)
231 kfree(dp->dccps_service_list);
232 dp->dccps_service_list = NULL;
234 if (dp->dccps_hc_rx_ackvec != NULL) {
235 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
236 dp->dccps_hc_rx_ackvec = NULL;
238 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
239 dp->dccps_hc_rx_ccid = NULL;
241 /* clean up feature negotiation state */
242 dccp_feat_list_purge(&dp->dccps_featneg);
245 EXPORT_SYMBOL_GPL(dccp_destroy_sock);
247 static inline int dccp_listen_start(struct sock *sk, int backlog)
249 struct dccp_sock *dp = dccp_sk(sk);
251 dp->dccps_role = DCCP_ROLE_LISTEN;
252 /* do not start to listen if feature negotiation setup fails */
253 if (dccp_feat_finalise_settings(dp))
255 return inet_csk_listen_start(sk, backlog);
258 static inline int dccp_need_reset(int state)
260 return state != DCCP_CLOSED && state != DCCP_LISTEN &&
261 state != DCCP_REQUESTING;
264 int dccp_disconnect(struct sock *sk, int flags)
266 struct inet_connection_sock *icsk = inet_csk(sk);
267 struct inet_sock *inet = inet_sk(sk);
268 struct dccp_sock *dp = dccp_sk(sk);
269 const int old_state = sk->sk_state;
271 if (old_state != DCCP_CLOSED)
272 dccp_set_state(sk, DCCP_CLOSED);
275 * This corresponds to the ABORT function of RFC793, sec. 3.8
276 * TCP uses a RST segment, DCCP a Reset packet with Code 2, "Aborted".
278 if (old_state == DCCP_LISTEN) {
279 inet_csk_listen_stop(sk);
280 } else if (dccp_need_reset(old_state)) {
281 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
282 sk->sk_err = ECONNRESET;
283 } else if (old_state == DCCP_REQUESTING)
284 sk->sk_err = ECONNRESET;
286 dccp_clear_xmit_timers(sk);
287 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
288 dp->dccps_hc_rx_ccid = NULL;
290 __skb_queue_purge(&sk->sk_receive_queue);
291 __skb_queue_purge(&sk->sk_write_queue);
292 if (sk->sk_send_head != NULL) {
293 __kfree_skb(sk->sk_send_head);
294 sk->sk_send_head = NULL;
297 inet->inet_dport = 0;
299 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
300 inet_reset_saddr(sk);
303 sock_reset_flag(sk, SOCK_DONE);
305 icsk->icsk_backoff = 0;
306 inet_csk_delack_init(sk);
309 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
311 sk->sk_error_report(sk);
315 EXPORT_SYMBOL_GPL(dccp_disconnect);
318 * Wait for a DCCP event.
320 * Note that we don't need to lock the socket, as the upper poll layers
321 * take care of normal races (between the test and the event) and we don't
322 * go look at any of the socket buffers directly.
324 __poll_t dccp_poll(struct file *file, struct socket *sock,
327 struct sock *sk = sock->sk;
332 sock_poll_wait(file, sock, wait);
334 state = inet_sk_state_load(sk);
335 if (state == DCCP_LISTEN)
336 return inet_csk_listen_poll(sk);
338 /* Socket is not locked. We are protected from async events
339 by poll logic and correct handling of state changes
340 made by another threads is impossible in any case.
344 if (READ_ONCE(sk->sk_err))
346 shutdown = READ_ONCE(sk->sk_shutdown);
348 if (shutdown == SHUTDOWN_MASK || state == DCCP_CLOSED)
350 if (shutdown & RCV_SHUTDOWN)
351 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
354 if ((1 << state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
355 if (atomic_read(&sk->sk_rmem_alloc) > 0)
356 mask |= EPOLLIN | EPOLLRDNORM;
358 if (!(shutdown & SEND_SHUTDOWN)) {
359 if (sk_stream_is_writeable(sk)) {
360 mask |= EPOLLOUT | EPOLLWRNORM;
361 } else { /* send SIGIO later */
362 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
363 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
365 /* Race breaker. If space is freed after
366 * wspace test but before the flags are set,
367 * IO signal will be lost.
369 if (sk_stream_is_writeable(sk))
370 mask |= EPOLLOUT | EPOLLWRNORM;
376 EXPORT_SYMBOL_GPL(dccp_poll);
378 int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
384 if (sk->sk_state == DCCP_LISTEN)
390 unsigned long amount = 0;
392 skb = skb_peek(&sk->sk_receive_queue);
395 * We will only return the amount of this packet since
396 * that is all that will be read.
400 rc = put_user(amount, (int __user *)arg);
412 EXPORT_SYMBOL_GPL(dccp_ioctl);
414 static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
415 char __user *optval, unsigned int optlen)
417 struct dccp_sock *dp = dccp_sk(sk);
418 struct dccp_service_list *sl = NULL;
420 if (service == DCCP_SERVICE_INVALID_VALUE ||
421 optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
424 if (optlen > sizeof(service)) {
425 sl = kmalloc(optlen, GFP_KERNEL);
429 sl->dccpsl_nr = optlen / sizeof(u32) - 1;
430 if (copy_from_user(sl->dccpsl_list,
431 optval + sizeof(service),
432 optlen - sizeof(service)) ||
433 dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) {
440 dp->dccps_service = service;
442 kfree(dp->dccps_service_list);
444 dp->dccps_service_list = sl;
449 static int dccp_setsockopt_cscov(struct sock *sk, int cscov, bool rx)
454 if (cscov < 0 || cscov > 15)
457 * Populate a list of permissible values, in the range cscov...15. This
458 * is necessary since feature negotiation of single values only works if
459 * both sides incidentally choose the same value. Since the list starts
460 * lowest-value first, negotiation will pick the smallest shared value.
466 list = kmalloc(len, GFP_KERNEL);
470 for (i = 0; i < len; i++)
473 rc = dccp_feat_register_sp(sk, DCCPF_MIN_CSUM_COVER, rx, list, len);
477 dccp_sk(sk)->dccps_pcrlen = cscov;
479 dccp_sk(sk)->dccps_pcslen = cscov;
485 static int dccp_setsockopt_ccid(struct sock *sk, int type,
486 char __user *optval, unsigned int optlen)
491 if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS)
494 val = memdup_user(optval, optlen);
499 if (type == DCCP_SOCKOPT_TX_CCID || type == DCCP_SOCKOPT_CCID)
500 rc = dccp_feat_register_sp(sk, DCCPF_CCID, 1, val, optlen);
502 if (!rc && (type == DCCP_SOCKOPT_RX_CCID || type == DCCP_SOCKOPT_CCID))
503 rc = dccp_feat_register_sp(sk, DCCPF_CCID, 0, val, optlen);
510 static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
511 char __user *optval, unsigned int optlen)
513 struct dccp_sock *dp = dccp_sk(sk);
517 case DCCP_SOCKOPT_PACKET_SIZE:
518 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
520 case DCCP_SOCKOPT_CHANGE_L:
521 case DCCP_SOCKOPT_CHANGE_R:
522 DCCP_WARN("sockopt(CHANGE_L/R) is deprecated: fix your app\n");
524 case DCCP_SOCKOPT_CCID:
525 case DCCP_SOCKOPT_RX_CCID:
526 case DCCP_SOCKOPT_TX_CCID:
527 return dccp_setsockopt_ccid(sk, optname, optval, optlen);
530 if (optlen < (int)sizeof(int))
533 if (get_user(val, (int __user *)optval))
536 if (optname == DCCP_SOCKOPT_SERVICE)
537 return dccp_setsockopt_service(sk, val, optval, optlen);
541 case DCCP_SOCKOPT_SERVER_TIMEWAIT:
542 if (dp->dccps_role != DCCP_ROLE_SERVER)
545 dp->dccps_server_timewait = (val != 0);
547 case DCCP_SOCKOPT_SEND_CSCOV:
548 err = dccp_setsockopt_cscov(sk, val, false);
550 case DCCP_SOCKOPT_RECV_CSCOV:
551 err = dccp_setsockopt_cscov(sk, val, true);
553 case DCCP_SOCKOPT_QPOLICY_ID:
554 if (sk->sk_state != DCCP_CLOSED)
556 else if (val < 0 || val >= DCCPQ_POLICY_MAX)
559 dp->dccps_qpolicy = val;
561 case DCCP_SOCKOPT_QPOLICY_TXQLEN:
565 dp->dccps_tx_qlen = val;
576 int dccp_setsockopt(struct sock *sk, int level, int optname,
577 char __user *optval, unsigned int optlen)
579 if (level != SOL_DCCP)
580 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
583 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
586 EXPORT_SYMBOL_GPL(dccp_setsockopt);
589 int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
590 char __user *optval, unsigned int optlen)
592 if (level != SOL_DCCP)
593 return inet_csk_compat_setsockopt(sk, level, optname,
595 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
598 EXPORT_SYMBOL_GPL(compat_dccp_setsockopt);
601 static int dccp_getsockopt_service(struct sock *sk, int len,
602 __be32 __user *optval,
605 const struct dccp_sock *dp = dccp_sk(sk);
606 const struct dccp_service_list *sl;
607 int err = -ENOENT, slen = 0, total_len = sizeof(u32);
610 if ((sl = dp->dccps_service_list) != NULL) {
611 slen = sl->dccpsl_nr * sizeof(u32);
620 if (put_user(total_len, optlen) ||
621 put_user(dp->dccps_service, optval) ||
622 (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen)))
629 static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
630 char __user *optval, int __user *optlen)
632 struct dccp_sock *dp;
635 if (get_user(len, optlen))
638 if (len < (int)sizeof(int))
644 case DCCP_SOCKOPT_PACKET_SIZE:
645 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
647 case DCCP_SOCKOPT_SERVICE:
648 return dccp_getsockopt_service(sk, len,
649 (__be32 __user *)optval, optlen);
650 case DCCP_SOCKOPT_GET_CUR_MPS:
651 val = READ_ONCE(dp->dccps_mss_cache);
653 case DCCP_SOCKOPT_AVAILABLE_CCIDS:
654 return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen);
655 case DCCP_SOCKOPT_TX_CCID:
656 val = ccid_get_current_tx_ccid(dp);
660 case DCCP_SOCKOPT_RX_CCID:
661 val = ccid_get_current_rx_ccid(dp);
665 case DCCP_SOCKOPT_SERVER_TIMEWAIT:
666 val = dp->dccps_server_timewait;
668 case DCCP_SOCKOPT_SEND_CSCOV:
669 val = dp->dccps_pcslen;
671 case DCCP_SOCKOPT_RECV_CSCOV:
672 val = dp->dccps_pcrlen;
674 case DCCP_SOCKOPT_QPOLICY_ID:
675 val = dp->dccps_qpolicy;
677 case DCCP_SOCKOPT_QPOLICY_TXQLEN:
678 val = dp->dccps_tx_qlen;
681 return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
682 len, (u32 __user *)optval, optlen);
684 return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,
685 len, (u32 __user *)optval, optlen);
691 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
697 int dccp_getsockopt(struct sock *sk, int level, int optname,
698 char __user *optval, int __user *optlen)
700 if (level != SOL_DCCP)
701 return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,
704 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
707 EXPORT_SYMBOL_GPL(dccp_getsockopt);
710 int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
711 char __user *optval, int __user *optlen)
713 if (level != SOL_DCCP)
714 return inet_csk_compat_getsockopt(sk, level, optname,
716 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
719 EXPORT_SYMBOL_GPL(compat_dccp_getsockopt);
722 static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb)
724 struct cmsghdr *cmsg;
727 * Assign an (opaque) qpolicy priority value to skb->priority.
729 * We are overloading this skb field for use with the qpolicy subystem.
730 * The skb->priority is normally used for the SO_PRIORITY option, which
731 * is initialised from sk_priority. Since the assignment of sk_priority
732 * to skb->priority happens later (on layer 3), we overload this field
733 * for use with queueing priorities as long as the skb is on layer 4.
734 * The default priority value (if nothing is set) is 0.
738 for_each_cmsghdr(cmsg, msg) {
739 if (!CMSG_OK(msg, cmsg))
742 if (cmsg->cmsg_level != SOL_DCCP)
745 if (cmsg->cmsg_type <= DCCP_SCM_QPOLICY_MAX &&
746 !dccp_qpolicy_param_ok(skb->sk, cmsg->cmsg_type))
749 switch (cmsg->cmsg_type) {
750 case DCCP_SCM_PRIORITY:
751 if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u32)))
753 skb->priority = *(__u32 *)CMSG_DATA(cmsg);
762 int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
764 const struct dccp_sock *dp = dccp_sk(sk);
765 const int flags = msg->msg_flags;
766 const int noblock = flags & MSG_DONTWAIT;
771 trace_dccp_probe(sk, len);
773 if (len > READ_ONCE(dp->dccps_mss_cache))
778 timeo = sock_sndtimeo(sk, noblock);
781 * We have to use sk_stream_wait_connect here to set sk_write_pending,
782 * so that the trick in dccp_rcv_request_sent_state_process.
784 /* Wait for a connection to finish. */
785 if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))
786 if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
789 size = sk->sk_prot->max_header + len;
791 skb = sock_alloc_send_skb(sk, size, noblock, &rc);
796 if (dccp_qpolicy_full(sk)) {
801 if (sk->sk_state == DCCP_CLOSED) {
806 /* We need to check dccps_mss_cache after socket is locked. */
807 if (len > dp->dccps_mss_cache) {
812 skb_reserve(skb, sk->sk_prot->max_header);
813 rc = memcpy_from_msg(skb_put(skb, len), msg, len);
817 rc = dccp_msghdr_parse(msg, skb);
821 dccp_qpolicy_push(sk, skb);
823 * The xmit_timer is set if the TX CCID is rate-based and will expire
824 * when congestion control permits to release further packets into the
825 * network. Window-based CCIDs do not use this timer.
827 if (!timer_pending(&dp->dccps_xmit_timer))
837 EXPORT_SYMBOL_GPL(dccp_sendmsg);
839 int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
840 int flags, int *addr_len)
842 const struct dccp_hdr *dh;
847 if (sk->sk_state == DCCP_LISTEN) {
852 timeo = sock_rcvtimeo(sk, nonblock);
855 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
858 goto verify_sock_status;
862 switch (dh->dccph_type) {
864 case DCCP_PKT_DATAACK:
868 case DCCP_PKT_CLOSEREQ:
869 if (!(flags & MSG_PEEK))
870 dccp_finish_passive_close(sk);
873 dccp_pr_debug("found fin (%s) ok!\n",
874 dccp_packet_name(dh->dccph_type));
878 dccp_pr_debug("packet_type=%s\n",
879 dccp_packet_name(dh->dccph_type));
883 if (sock_flag(sk, SOCK_DONE)) {
889 len = sock_error(sk);
893 if (sk->sk_shutdown & RCV_SHUTDOWN) {
898 if (sk->sk_state == DCCP_CLOSED) {
899 if (!sock_flag(sk, SOCK_DONE)) {
900 /* This occurs when user tries to read
901 * from never connected socket.
915 if (signal_pending(current)) {
916 len = sock_intr_errno(timeo);
920 sk_wait_data(sk, &timeo, NULL);
925 else if (len < skb->len)
926 msg->msg_flags |= MSG_TRUNC;
928 if (skb_copy_datagram_msg(skb, 0, msg, len)) {
929 /* Exception. Bailout! */
933 if (flags & MSG_TRUNC)
936 if (!(flags & MSG_PEEK))
945 EXPORT_SYMBOL_GPL(dccp_recvmsg);
947 int inet_dccp_listen(struct socket *sock, int backlog)
949 struct sock *sk = sock->sk;
950 unsigned char old_state;
956 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
959 old_state = sk->sk_state;
960 if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
963 sk->sk_max_ack_backlog = backlog;
964 /* Really, if the socket is already in listen state
965 * we can only allow the backlog to be adjusted.
967 if (old_state != DCCP_LISTEN) {
969 * FIXME: here it probably should be sk->sk_prot->listen_start
970 * see tcp_listen_start
972 err = dccp_listen_start(sk, backlog);
983 EXPORT_SYMBOL_GPL(inet_dccp_listen);
985 static void dccp_terminate_connection(struct sock *sk)
987 u8 next_state = DCCP_CLOSED;
989 switch (sk->sk_state) {
990 case DCCP_PASSIVE_CLOSE:
991 case DCCP_PASSIVE_CLOSEREQ:
992 dccp_finish_passive_close(sk);
995 dccp_pr_debug("Stop PARTOPEN timer (%p)\n", sk);
996 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
999 dccp_send_close(sk, 1);
1001 if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER &&
1002 !dccp_sk(sk)->dccps_server_timewait)
1003 next_state = DCCP_ACTIVE_CLOSEREQ;
1005 next_state = DCCP_CLOSING;
1008 dccp_set_state(sk, next_state);
1012 void dccp_close(struct sock *sk, long timeout)
1014 struct dccp_sock *dp = dccp_sk(sk);
1015 struct sk_buff *skb;
1016 u32 data_was_unread = 0;
1021 sk->sk_shutdown = SHUTDOWN_MASK;
1023 if (sk->sk_state == DCCP_LISTEN) {
1024 dccp_set_state(sk, DCCP_CLOSED);
1027 inet_csk_listen_stop(sk);
1029 goto adjudge_to_death;
1032 sk_stop_timer(sk, &dp->dccps_xmit_timer);
1035 * We need to flush the recv. buffs. We do this only on the
1036 * descriptor close, not protocol-sourced closes, because the
1037 *reader process may not have drained the data yet!
1039 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1040 data_was_unread += skb->len;
1044 /* If socket has been already reset kill it. */
1045 if (sk->sk_state == DCCP_CLOSED)
1046 goto adjudge_to_death;
1048 if (data_was_unread) {
1049 /* Unread data was tossed, send an appropriate Reset Code */
1050 DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread);
1051 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
1052 dccp_set_state(sk, DCCP_CLOSED);
1053 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1054 /* Check zero linger _after_ checking for unread data. */
1055 sk->sk_prot->disconnect(sk, 0);
1056 } else if (sk->sk_state != DCCP_CLOSED) {
1058 * Normal connection termination. May need to wait if there are
1059 * still packets in the TX queue that are delayed by the CCID.
1061 dccp_flush_write_queue(sk, &timeout);
1062 dccp_terminate_connection(sk);
1066 * Flush write queue. This may be necessary in several cases:
1067 * - we have been closed by the peer but still have application data;
1068 * - abortive termination (unread data or zero linger time),
1069 * - normal termination but queue could not be flushed within time limit
1071 __skb_queue_purge(&sk->sk_write_queue);
1073 sk_stream_wait_close(sk, timeout);
1076 state = sk->sk_state;
1081 * It is the last release_sock in its life. It will remove backlog.
1085 * Now socket is owned by kernel and we acquire BH lock
1086 * to finish close. No need to check for user refs.
1090 WARN_ON(sock_owned_by_user(sk));
1092 percpu_counter_inc(sk->sk_prot->orphan_count);
1094 /* Have we already been destroyed by a softirq or backlog? */
1095 if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
1098 if (sk->sk_state == DCCP_CLOSED)
1099 inet_csk_destroy_sock(sk);
1101 /* Otherwise, socket is reprieved until protocol close. */
1109 EXPORT_SYMBOL_GPL(dccp_close);
1111 void dccp_shutdown(struct sock *sk, int how)
1113 dccp_pr_debug("called shutdown(%x)\n", how);
1116 EXPORT_SYMBOL_GPL(dccp_shutdown);
1118 static inline int __init dccp_mib_init(void)
1120 dccp_statistics = alloc_percpu(struct dccp_mib);
1121 if (!dccp_statistics)
1126 static inline void dccp_mib_exit(void)
1128 free_percpu(dccp_statistics);
1131 static int thash_entries;
1132 module_param(thash_entries, int, 0444);
1133 MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
1135 #ifdef CONFIG_IP_DCCP_DEBUG
1137 module_param(dccp_debug, bool, 0644);
1138 MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
1140 EXPORT_SYMBOL_GPL(dccp_debug);
1143 static int __init dccp_init(void)
1146 unsigned long nr_pages = totalram_pages();
1147 int ehash_order, bhash_order, i;
1150 BUILD_BUG_ON(sizeof(struct dccp_skb_cb) >
1151 FIELD_SIZEOF(struct sk_buff, cb));
1152 rc = percpu_counter_init(&dccp_orphan_count, 0, GFP_KERNEL);
1155 inet_hashinfo_init(&dccp_hashinfo);
1156 rc = inet_hashinfo2_init_mod(&dccp_hashinfo);
1158 goto out_free_percpu;
1160 dccp_hashinfo.bind_bucket_cachep =
1161 kmem_cache_create("dccp_bind_bucket",
1162 sizeof(struct inet_bind_bucket), 0,
1163 SLAB_HWCACHE_ALIGN, NULL);
1164 if (!dccp_hashinfo.bind_bucket_cachep)
1165 goto out_free_hashinfo2;
1168 * Size and allocate the main established and bind bucket
1171 * The methodology is similar to that of the buffer cache.
1173 if (nr_pages >= (128 * 1024))
1174 goal = nr_pages >> (21 - PAGE_SHIFT);
1176 goal = nr_pages >> (23 - PAGE_SHIFT);
1179 goal = (thash_entries *
1180 sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
1181 for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
1184 unsigned long hash_size = (1UL << ehash_order) * PAGE_SIZE /
1185 sizeof(struct inet_ehash_bucket);
1187 while (hash_size & (hash_size - 1))
1189 dccp_hashinfo.ehash_mask = hash_size - 1;
1190 dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
1191 __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, ehash_order);
1192 } while (!dccp_hashinfo.ehash && --ehash_order > 0);
1194 if (!dccp_hashinfo.ehash) {
1195 DCCP_CRIT("Failed to allocate DCCP established hash table");
1196 goto out_free_bind_bucket_cachep;
1199 for (i = 0; i <= dccp_hashinfo.ehash_mask; i++)
1200 INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i);
1202 if (inet_ehash_locks_alloc(&dccp_hashinfo))
1203 goto out_free_dccp_ehash;
1205 bhash_order = ehash_order;
1208 dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
1209 sizeof(struct inet_bind_hashbucket);
1210 if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
1213 dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
1214 __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, bhash_order);
1215 } while (!dccp_hashinfo.bhash && --bhash_order >= 0);
1217 if (!dccp_hashinfo.bhash) {
1218 DCCP_CRIT("Failed to allocate DCCP bind hash table");
1219 goto out_free_dccp_locks;
1222 for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
1223 spin_lock_init(&dccp_hashinfo.bhash[i].lock);
1224 INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
1227 rc = dccp_mib_init();
1229 goto out_free_dccp_bhash;
1231 rc = dccp_ackvec_init();
1233 goto out_free_dccp_mib;
1235 rc = dccp_sysctl_init();
1237 goto out_ackvec_exit;
1239 rc = ccid_initialize_builtins();
1241 goto out_sysctl_exit;
1243 dccp_timestamping_init();
1253 out_free_dccp_bhash:
1254 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
1255 out_free_dccp_locks:
1256 inet_ehash_locks_free(&dccp_hashinfo);
1257 out_free_dccp_ehash:
1258 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
1259 out_free_bind_bucket_cachep:
1260 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1262 inet_hashinfo2_free_mod(&dccp_hashinfo);
1264 percpu_counter_destroy(&dccp_orphan_count);
1266 dccp_hashinfo.bhash = NULL;
1267 dccp_hashinfo.ehash = NULL;
1268 dccp_hashinfo.bind_bucket_cachep = NULL;
1272 static void __exit dccp_fini(void)
1274 ccid_cleanup_builtins();
1276 free_pages((unsigned long)dccp_hashinfo.bhash,
1277 get_order(dccp_hashinfo.bhash_size *
1278 sizeof(struct inet_bind_hashbucket)));
1279 free_pages((unsigned long)dccp_hashinfo.ehash,
1280 get_order((dccp_hashinfo.ehash_mask + 1) *
1281 sizeof(struct inet_ehash_bucket)));
1282 inet_ehash_locks_free(&dccp_hashinfo);
1283 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1286 inet_hashinfo2_free_mod(&dccp_hashinfo);
1287 percpu_counter_destroy(&dccp_orphan_count);
1290 module_init(dccp_init);
1291 module_exit(dccp_fini);
1293 MODULE_LICENSE("GPL");
1294 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
1295 MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");