1 // SPDX-License-Identifier: GPL-2.0-only
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * Implementation of the Transmission Control Protocol(TCP).
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Mark Evans, <evansmp@uhura.aston.ac.uk>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche, <flla@stud.uni-sb.de>
14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15 * Linus Torvalds, <torvalds@cs.helsinki.fi>
16 * Alan Cox, <gw4pts@gw4pts.ampr.org>
17 * Matthew Dillon, <dillon@apollo.west.oic.com>
18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19 * Jorge Cwik, <jorge@laser.satlink.net>
22 #include <linux/module.h>
23 #include <linux/gfp.h>
26 static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
28 struct inet_connection_sock *icsk = inet_csk(sk);
29 const struct tcp_sock *tp = tcp_sk(sk);
30 u32 elapsed, user_timeout;
33 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
35 return icsk->icsk_rto;
37 elapsed = tcp_time_stamp_ts(tp) - tp->retrans_stamp;
39 elapsed /= USEC_PER_MSEC;
41 remaining = user_timeout - elapsed;
43 return 1; /* user timeout has passed; fire ASAP */
45 return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
48 u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
50 struct inet_connection_sock *icsk = inet_csk(sk);
51 u32 remaining, user_timeout;
54 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
55 if (!user_timeout || !icsk->icsk_probes_tstamp)
58 elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp;
59 if (unlikely(elapsed < 0))
61 remaining = msecs_to_jiffies(user_timeout) - elapsed;
62 remaining = max_t(u32, remaining, TCP_TIMEOUT_MIN);
64 return min_t(u32, remaining, when);
68 * tcp_write_err() - close socket and save error info
69 * @sk: The socket the error has appeared on.
71 * Returns: Nothing (void)
74 static void tcp_write_err(struct sock *sk)
76 WRITE_ONCE(sk->sk_err, READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT);
79 tcp_write_queue_purge(sk);
81 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
85 * tcp_out_of_resources() - Close socket if out of resources
86 * @sk: pointer to current socket
87 * @do_reset: send a last packet with reset flag
89 * Do not allow orphaned sockets to eat all our resources.
90 * This is direct violation of TCP specs, but it is required
91 * to prevent DoS attacks. It is called when a retransmission timeout
92 * or zero probe timeout occurs on orphaned socket.
94 * Also close if our net namespace is exiting; in that case there is no
95 * hope of ever communicating again since all netns interfaces are already
96 * down (or about to be down), and we need to release our dst references,
97 * which have been moved to the netns loopback interface, so the namespace
98 * can finish exiting. This condition is only possible if we are a kernel
99 * socket, as those do not hold references to the namespace.
101 * Criteria is still not confirmed experimentally and may change.
102 * We kill the socket, if:
103 * 1. If number of orphaned sockets exceeds an administratively configured
105 * 2. If we have strong memory pressure.
106 * 3. If our net namespace is exiting.
108 static int tcp_out_of_resources(struct sock *sk, bool do_reset)
110 struct tcp_sock *tp = tcp_sk(sk);
113 /* If peer does not open window for long time, or did not transmit
114 * anything for long time, penalize it. */
115 if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
118 /* If some dubious ICMP arrived, penalize even more. */
119 if (READ_ONCE(sk->sk_err_soft))
122 if (tcp_check_oom(sk, shift)) {
123 /* Catch exceptional cases, when connection requires reset.
124 * 1. Last segment was sent recently. */
125 if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
126 /* 2. Window is closed. */
127 (!tp->snd_wnd && !tp->packets_out))
130 tcp_send_active_reset(sk, GFP_ATOMIC);
132 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
136 if (!check_net(sock_net(sk))) {
137 /* Not possible to send reset; just close */
146 * tcp_orphan_retries() - Returns maximal number of retries on an orphaned socket
147 * @sk: Pointer to the current socket.
148 * @alive: bool, socket alive state
150 static int tcp_orphan_retries(struct sock *sk, bool alive)
152 int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */
154 /* We know from an ICMP that something is wrong. */
155 if (READ_ONCE(sk->sk_err_soft) && !alive)
158 /* However, if socket sent something recently, select some safe
159 * number of retries. 8 corresponds to >100 seconds with minimal
161 if (retries == 0 && alive)
166 static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
168 const struct net *net = sock_net(sk);
171 /* Black hole detection */
172 if (!READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing))
175 if (!icsk->icsk_mtup.enabled) {
176 icsk->icsk_mtup.enabled = 1;
177 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
179 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
180 mss = min(READ_ONCE(net->ipv4.sysctl_tcp_base_mss), mss);
181 mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_mtu_probe_floor));
182 mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_min_snd_mss));
183 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
185 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
188 static unsigned int tcp_model_timeout(struct sock *sk,
189 unsigned int boundary,
190 unsigned int rto_base)
192 unsigned int linear_backoff_thresh, timeout;
194 linear_backoff_thresh = ilog2(TCP_RTO_MAX / rto_base);
195 if (boundary <= linear_backoff_thresh)
196 timeout = ((2 << boundary) - 1) * rto_base;
198 timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
199 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
200 return jiffies_to_msecs(timeout);
203 * retransmits_timed_out() - returns true if this connection has timed out
204 * @sk: The current socket
205 * @boundary: max number of retransmissions
206 * @timeout: A custom timeout value.
207 * If set to 0 the default timeout is calculated and used.
208 * Using TCP_RTO_MIN and the number of unsuccessful retransmits.
210 * The default "timeout" value this function can calculate and use
211 * is equivalent to the timeout of a TCP Connection
212 * after "boundary" unsuccessful, exponentially backed-off
213 * retransmissions with an initial RTO of TCP_RTO_MIN.
215 static bool retransmits_timed_out(struct sock *sk,
216 unsigned int boundary,
217 unsigned int timeout)
219 struct tcp_sock *tp = tcp_sk(sk);
220 unsigned int start_ts, delta;
222 if (!inet_csk(sk)->icsk_retransmits)
225 start_ts = tp->retrans_stamp;
226 if (likely(timeout == 0)) {
227 unsigned int rto_base = TCP_RTO_MIN;
229 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
230 rto_base = tcp_timeout_init(sk);
231 timeout = tcp_model_timeout(sk, boundary, rto_base);
234 if (tp->tcp_usec_ts) {
235 /* delta maybe off up to a jiffy due to timer granularity. */
236 delta = tp->tcp_mstamp - start_ts + jiffies_to_usecs(1);
237 return (s32)(delta - timeout * USEC_PER_MSEC) >= 0;
239 return (s32)(tcp_time_stamp_ts(tp) - start_ts - timeout) >= 0;
242 /* A write timeout has occurred. Process the after effects. */
243 static int tcp_write_timeout(struct sock *sk)
245 struct inet_connection_sock *icsk = inet_csk(sk);
246 struct tcp_sock *tp = tcp_sk(sk);
247 struct net *net = sock_net(sk);
248 bool expired = false, do_reset;
249 int retry_until, max_retransmits;
251 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
252 if (icsk->icsk_retransmits)
253 __dst_negative_advice(sk);
254 /* Paired with WRITE_ONCE() in tcp_sock_set_syncnt() */
255 retry_until = READ_ONCE(icsk->icsk_syn_retries) ? :
256 READ_ONCE(net->ipv4.sysctl_tcp_syn_retries);
258 max_retransmits = retry_until;
259 if (sk->sk_state == TCP_SYN_SENT)
260 max_retransmits += READ_ONCE(net->ipv4.sysctl_tcp_syn_linear_timeouts);
262 expired = icsk->icsk_retransmits >= max_retransmits;
264 if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1), 0)) {
265 /* Black hole detection */
266 tcp_mtu_probing(icsk, sk);
268 __dst_negative_advice(sk);
271 retry_until = READ_ONCE(net->ipv4.sysctl_tcp_retries2);
272 if (sock_flag(sk, SOCK_DEAD)) {
273 const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
275 retry_until = tcp_orphan_retries(sk, alive);
277 !retransmits_timed_out(sk, retry_until, 0);
279 if (tcp_out_of_resources(sk, do_reset))
284 expired = retransmits_timed_out(sk, retry_until,
285 READ_ONCE(icsk->icsk_user_timeout));
286 tcp_fastopen_active_detect_blackhole(sk, expired);
288 if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
289 tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB,
290 icsk->icsk_retransmits,
291 icsk->icsk_rto, (int)expired);
294 /* Has it gone just too far? */
299 if (sk_rethink_txhash(sk)) {
300 tp->timeout_rehash++;
301 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTREHASH);
307 /* Called with BH disabled */
308 void tcp_delack_timer_handler(struct sock *sk)
310 struct inet_connection_sock *icsk = inet_csk(sk);
311 struct tcp_sock *tp = tcp_sk(sk);
313 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
316 /* Handling the sack compression case */
317 if (tp->compressed_ack) {
318 tcp_mstamp_refresh(tp);
319 tcp_sack_compress_send_ack(sk);
323 if (!(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
326 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
327 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
330 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
332 if (inet_csk_ack_scheduled(sk)) {
333 if (!inet_csk_in_pingpong_mode(sk)) {
334 /* Delayed ACK missed: inflate ATO. */
335 icsk->icsk_ack.ato = min_t(u32, icsk->icsk_ack.ato << 1, icsk->icsk_rto);
337 /* Delayed ACK missed: leave pingpong mode and
340 inet_csk_exit_pingpong_mode(sk);
341 icsk->icsk_ack.ato = TCP_ATO_MIN;
343 tcp_mstamp_refresh(tp);
345 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
351 * tcp_delack_timer() - The TCP delayed ACK timeout handler
352 * @t: Pointer to the timer. (gets casted to struct sock *)
354 * This function gets (indirectly) called when the kernel timer for a TCP packet
355 * of this socket expires. Calls tcp_delack_timer_handler() to do the actual work.
357 * Returns: Nothing (void)
359 static void tcp_delack_timer(struct timer_list *t)
361 struct inet_connection_sock *icsk =
362 from_timer(icsk, t, icsk_delack_timer);
363 struct sock *sk = &icsk->icsk_inet.sk;
366 if (!sock_owned_by_user(sk)) {
367 tcp_delack_timer_handler(sk);
369 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
370 /* deleguate our work to tcp_release_cb() */
371 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
378 static void tcp_probe_timer(struct sock *sk)
380 struct inet_connection_sock *icsk = inet_csk(sk);
381 struct sk_buff *skb = tcp_send_head(sk);
382 struct tcp_sock *tp = tcp_sk(sk);
385 if (tp->packets_out || !skb) {
386 icsk->icsk_probes_out = 0;
387 icsk->icsk_probes_tstamp = 0;
391 /* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
392 * long as the receiver continues to respond probes. We support this by
393 * default and reset icsk_probes_out with incoming ACKs. But if the
394 * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
395 * kill the socket when the retry count and the time exceeds the
396 * corresponding system limit. We also implement similar policy when
397 * we use RTO to probe window in tcp_retransmit_timer().
399 if (!icsk->icsk_probes_tstamp) {
400 icsk->icsk_probes_tstamp = tcp_jiffies32;
402 u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
405 (s32)(tcp_jiffies32 - icsk->icsk_probes_tstamp) >=
406 msecs_to_jiffies(user_timeout))
409 max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2);
410 if (sock_flag(sk, SOCK_DEAD)) {
411 const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
413 max_probes = tcp_orphan_retries(sk, alive);
414 if (!alive && icsk->icsk_backoff >= max_probes)
416 if (tcp_out_of_resources(sk, true))
420 if (icsk->icsk_probes_out >= max_probes) {
421 abort: tcp_write_err(sk);
423 /* Only send another probe if we didn't close things up. */
428 static void tcp_update_rto_stats(struct sock *sk)
430 struct inet_connection_sock *icsk = inet_csk(sk);
431 struct tcp_sock *tp = tcp_sk(sk);
433 if (!icsk->icsk_retransmits) {
434 tp->total_rto_recoveries++;
435 tp->rto_stamp = tcp_time_stamp_ms(tp);
437 icsk->icsk_retransmits++;
442 * Timer for Fast Open socket to retransmit SYNACK. Note that the
443 * sk here is the child socket, not the parent (listener) socket.
445 static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
447 struct inet_connection_sock *icsk = inet_csk(sk);
448 struct tcp_sock *tp = tcp_sk(sk);
451 req->rsk_ops->syn_ack_timeout(req);
453 /* Add one more retry for fastopen.
454 * Paired with WRITE_ONCE() in tcp_sock_set_syncnt()
456 max_retries = READ_ONCE(icsk->icsk_syn_retries) ? :
457 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_synack_retries) + 1;
459 if (req->num_timeout >= max_retries) {
463 /* Lower cwnd after certain SYNACK timeout like tcp_init_transfer() */
464 if (icsk->icsk_retransmits == 1)
466 /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
467 * returned from rtx_syn_ack() to make it more persistent like
468 * regular retransmit because if the child socket has been accepted
469 * it's not good to give up too easily.
471 inet_rtx_syn_ack(sk, req);
473 tcp_update_rto_stats(sk);
474 if (!tp->retrans_stamp)
475 tp->retrans_stamp = tcp_time_stamp_ts(tp);
476 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
477 req->timeout << req->num_timeout, TCP_RTO_MAX);
480 static bool tcp_rtx_probe0_timed_out(const struct sock *sk,
481 const struct sk_buff *skb,
484 const struct tcp_sock *tp = tcp_sk(sk);
485 const int timeout = TCP_RTO_MAX * 2;
488 rcv_delta = inet_csk(sk)->icsk_timeout - tp->rcv_tstamp;
489 if (rcv_delta <= timeout)
492 return msecs_to_jiffies(rtx_delta) > timeout;
496 * tcp_retransmit_timer() - The TCP retransmit timeout handler
497 * @sk: Pointer to the current socket.
499 * This function gets called when the kernel timer for a TCP packet
500 * of this socket expires.
502 * It handles retransmission, timer adjustment and other necessary measures.
504 * Returns: Nothing (void)
506 void tcp_retransmit_timer(struct sock *sk)
508 struct tcp_sock *tp = tcp_sk(sk);
509 struct net *net = sock_net(sk);
510 struct inet_connection_sock *icsk = inet_csk(sk);
511 struct request_sock *req;
514 req = rcu_dereference_protected(tp->fastopen_rsk,
515 lockdep_sock_is_held(sk));
517 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
518 sk->sk_state != TCP_FIN_WAIT1);
519 tcp_fastopen_synack_timer(sk, req);
520 /* Before we receive ACK to our SYN-ACK don't retransmit
521 * anything else (e.g., data or FIN segments).
526 if (!tp->packets_out)
529 skb = tcp_rtx_queue_head(sk);
530 if (WARN_ON_ONCE(!skb))
533 tp->tlp_high_seq = 0;
535 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
536 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
537 /* Receiver dastardly shrinks window. Our retransmits
538 * become zero probes, but we should not timeout this
539 * connection. If the socket is an orphan, time it out,
540 * we cannot allow such beasts to hang infinitely.
542 struct inet_sock *inet = inet_sk(sk);
545 rtx_delta = tcp_time_stamp_ts(tp) - (tp->retrans_stamp ?:
546 tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb));
548 rtx_delta /= USEC_PER_MSEC;
550 if (sk->sk_family == AF_INET) {
551 net_dbg_ratelimited("Probing zero-window on %pI4:%u/%u, seq=%u:%u, recv %ums ago, lasting %ums\n",
552 &inet->inet_daddr, ntohs(inet->inet_dport),
553 inet->inet_num, tp->snd_una, tp->snd_nxt,
554 jiffies_to_msecs(jiffies - tp->rcv_tstamp),
557 #if IS_ENABLED(CONFIG_IPV6)
558 else if (sk->sk_family == AF_INET6) {
559 net_dbg_ratelimited("Probing zero-window on %pI6:%u/%u, seq=%u:%u, recv %ums ago, lasting %ums\n",
560 &sk->sk_v6_daddr, ntohs(inet->inet_dport),
561 inet->inet_num, tp->snd_una, tp->snd_nxt,
562 jiffies_to_msecs(jiffies - tp->rcv_tstamp),
566 if (tcp_rtx_probe0_timed_out(sk, skb, rtx_delta)) {
571 tcp_retransmit_skb(sk, skb, 1);
573 goto out_reset_timer;
576 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
577 if (tcp_write_timeout(sk))
580 if (icsk->icsk_retransmits == 0) {
583 if (icsk->icsk_ca_state == TCP_CA_Recovery) {
585 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
587 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
588 } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
589 mib_idx = LINUX_MIB_TCPLOSSFAILURES;
590 } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
593 mib_idx = LINUX_MIB_TCPSACKFAILURES;
595 mib_idx = LINUX_MIB_TCPRENOFAILURES;
598 __NET_INC_STATS(sock_net(sk), mib_idx);
603 tcp_update_rto_stats(sk);
604 if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) {
605 /* Retransmission failed because of local congestion,
606 * Let senders fight for local resources conservatively.
608 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
609 TCP_RESOURCE_PROBE_INTERVAL,
614 /* Increase the timeout each time we retransmit. Note that
615 * we do not increase the rtt estimate. rto is initialized
616 * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests
617 * that doubling rto each time is the least we can get away with.
618 * In KA9Q, Karn uses this for the first few times, and then
619 * goes to quadratic. netBSD doubles, but only goes up to *64,
620 * and clamps at 1 to 64 sec afterwards. Note that 120 sec is
621 * defined in the protocol as the maximum possible RTT. I guess
622 * we'll have to use something other than TCP to talk to the
623 * University of Mars.
625 * PAWS allows us longer timeouts and large windows, so once
626 * implemented ftp to mars will work nicely. We will have to fix
627 * the 120 second clamps though!
631 /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
632 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
633 * might be increased if the stream oscillates between thin and thick,
634 * thus the old value might already be too high compared to the value
635 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
636 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
637 * exponential backoff behaviour to avoid continue hammering
638 * linear-timeout retransmissions into a black hole
640 if (sk->sk_state == TCP_ESTABLISHED &&
641 (tp->thin_lto || READ_ONCE(net->ipv4.sysctl_tcp_thin_linear_timeouts)) &&
642 tcp_stream_is_thin(tp) &&
643 icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
644 icsk->icsk_backoff = 0;
645 icsk->icsk_rto = clamp(__tcp_set_rto(tp),
648 } else if (sk->sk_state != TCP_SYN_SENT ||
650 READ_ONCE(net->ipv4.sysctl_tcp_syn_linear_timeouts)) {
651 /* Use normal (exponential) backoff unless linear timeouts are
654 icsk->icsk_backoff++;
655 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
657 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
658 tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX);
659 if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1) + 1, 0))
665 /* Called with bottom-half processing disabled.
666 Called by tcp_write_timer() */
667 void tcp_write_timer_handler(struct sock *sk)
669 struct inet_connection_sock *icsk = inet_csk(sk);
672 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
676 if (time_after(icsk->icsk_timeout, jiffies)) {
677 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
681 tcp_mstamp_refresh(tcp_sk(sk));
682 event = icsk->icsk_pending;
685 case ICSK_TIME_REO_TIMEOUT:
686 tcp_rack_reo_timeout(sk);
688 case ICSK_TIME_LOSS_PROBE:
689 tcp_send_loss_probe(sk);
691 case ICSK_TIME_RETRANS:
692 icsk->icsk_pending = 0;
693 tcp_retransmit_timer(sk);
695 case ICSK_TIME_PROBE0:
696 icsk->icsk_pending = 0;
702 static void tcp_write_timer(struct timer_list *t)
704 struct inet_connection_sock *icsk =
705 from_timer(icsk, t, icsk_retransmit_timer);
706 struct sock *sk = &icsk->icsk_inet.sk;
709 if (!sock_owned_by_user(sk)) {
710 tcp_write_timer_handler(sk);
712 /* delegate our work to tcp_release_cb() */
713 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
720 void tcp_syn_ack_timeout(const struct request_sock *req)
722 struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
724 __NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
726 EXPORT_SYMBOL(tcp_syn_ack_timeout);
728 void tcp_set_keepalive(struct sock *sk, int val)
730 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
733 if (val && !sock_flag(sk, SOCK_KEEPOPEN))
734 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
736 inet_csk_delete_keepalive_timer(sk);
738 EXPORT_SYMBOL_GPL(tcp_set_keepalive);
741 static void tcp_keepalive_timer (struct timer_list *t)
743 struct sock *sk = from_timer(sk, t, sk_timer);
744 struct inet_connection_sock *icsk = inet_csk(sk);
745 struct tcp_sock *tp = tcp_sk(sk);
748 /* Only process if socket is not in use. */
750 if (sock_owned_by_user(sk)) {
751 /* Try again later. */
752 inet_csk_reset_keepalive_timer (sk, HZ/20);
756 if (sk->sk_state == TCP_LISTEN) {
757 pr_err("Hmm... keepalive on a LISTEN ???\n");
761 tcp_mstamp_refresh(tp);
762 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
763 if (READ_ONCE(tp->linger2) >= 0) {
764 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
767 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
771 tcp_send_active_reset(sk, GFP_ATOMIC);
775 if (!sock_flag(sk, SOCK_KEEPOPEN) ||
776 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
779 elapsed = keepalive_time_when(tp);
781 /* It is alive without keepalive 8) */
782 if (tp->packets_out || !tcp_write_queue_empty(sk))
785 elapsed = keepalive_time_elapsed(tp);
787 if (elapsed >= keepalive_time_when(tp)) {
788 u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
790 /* If the TCP_USER_TIMEOUT option is enabled, use that
791 * to determine when to timeout instead.
793 if ((user_timeout != 0 &&
794 elapsed >= msecs_to_jiffies(user_timeout) &&
795 icsk->icsk_probes_out > 0) ||
796 (user_timeout == 0 &&
797 icsk->icsk_probes_out >= keepalive_probes(tp))) {
798 tcp_send_active_reset(sk, GFP_ATOMIC);
802 if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
803 icsk->icsk_probes_out++;
804 elapsed = keepalive_intvl_when(tp);
806 /* If keepalive was lost due to local congestion,
809 elapsed = TCP_RESOURCE_PROBE_INTERVAL;
812 /* It is tp->rcv_tstamp + keepalive_time_when(tp) */
813 elapsed = keepalive_time_when(tp) - elapsed;
817 inet_csk_reset_keepalive_timer (sk, elapsed);
828 static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
830 struct tcp_sock *tp = container_of(timer, struct tcp_sock, compressed_ack_timer);
831 struct sock *sk = (struct sock *)tp;
834 if (!sock_owned_by_user(sk)) {
835 if (tp->compressed_ack) {
836 /* Since we have to send one ack finally,
837 * subtract one from tp->compressed_ack to keep
838 * LINUX_MIB_TCPACKCOMPRESSED accurate.
840 tp->compressed_ack--;
844 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
852 return HRTIMER_NORESTART;
855 void tcp_init_xmit_timers(struct sock *sk)
857 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
858 &tcp_keepalive_timer);
859 hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC,
860 HRTIMER_MODE_ABS_PINNED_SOFT);
861 tcp_sk(sk)->pacing_timer.function = tcp_pace_kick;
863 hrtimer_init(&tcp_sk(sk)->compressed_ack_timer, CLOCK_MONOTONIC,
864 HRTIMER_MODE_REL_PINNED_SOFT);
865 tcp_sk(sk)->compressed_ack_timer.function = tcp_compressed_ack_kick;