1 // SPDX-License-Identifier: GPL-2.0-only
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * Implementation of the Transmission Control Protocol(TCP).
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Mark Evans, <evansmp@uhura.aston.ac.uk>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche, <flla@stud.uni-sb.de>
14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15 * Linus Torvalds, <torvalds@cs.helsinki.fi>
16 * Alan Cox, <gw4pts@gw4pts.ampr.org>
17 * Matthew Dillon, <dillon@apollo.west.oic.com>
18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19 * Jorge Cwik, <jorge@laser.satlink.net>
24 #include <net/busy_poll.h>
26 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
30 if (after(end_seq, s_win) && before(seq, e_win))
32 return seq == e_win && seq == end_seq;
35 static enum tcp_tw_status
36 tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
37 const struct sk_buff *skb, int mib_idx)
39 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
41 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
42 &tcptw->tw_last_oow_ack_time)) {
43 /* Send ACK. Note, we do not put the bucket,
44 * it will be released by caller.
49 /* We are rate-limiting, so just release the tw sock and drop skb. */
51 return TCP_TW_SUCCESS;
54 static void twsk_rcv_nxt_update(struct tcp_timewait_sock *tcptw, u32 seq)
57 struct tcp_ao_info *ao;
59 ao = rcu_dereference(tcptw->ao_info);
60 if (unlikely(ao && seq < tcptw->tw_rcv_nxt))
61 WRITE_ONCE(ao->rcv_sne, ao->rcv_sne + 1);
63 tcptw->tw_rcv_nxt = seq;
67 * * Main purpose of TIME-WAIT state is to close connection gracefully,
68 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
69 * (and, probably, tail of data) and one or more our ACKs are lost.
70 * * What is TIME-WAIT timeout? It is associated with maximal packet
71 * lifetime in the internet, which results in wrong conclusion, that
72 * it is set to catch "old duplicate segments" wandering out of their path.
73 * It is not quite correct. This timeout is calculated so that it exceeds
74 * maximal retransmission timeout enough to allow to lose one (or more)
75 * segments sent by peer and our ACKs. This time may be calculated from RTO.
76 * * When TIME-WAIT socket receives RST, it means that another end
77 * finally closed and we are allowed to kill TIME-WAIT too.
78 * * Second purpose of TIME-WAIT is catching old duplicate segments.
79 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
80 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
81 * * If we invented some more clever way to catch duplicates
82 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
84 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
85 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
86 * from the very beginning.
88 * NOTE. With recycling (and later with fin-wait-2) TW bucket
89 * is _not_ stateless. It means, that strictly speaking we must
90 * spinlock it. I do not want! Well, probability of misbehaviour
91 * is ridiculously low and, seems, we could use some mb() tricks
92 * to avoid misread sequence numbers, states etc. --ANK
94 * We don't need to initialize tmp_out.sack_ok as we don't use the results
97 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
98 const struct tcphdr *th)
100 struct tcp_options_received tmp_opt;
101 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
102 bool paws_reject = false;
104 tmp_opt.saw_tstamp = 0;
105 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
106 tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
108 if (tmp_opt.saw_tstamp) {
109 if (tmp_opt.rcv_tsecr)
110 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
111 tmp_opt.ts_recent = tcptw->tw_ts_recent;
112 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
113 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
117 if (tw->tw_substate == TCP_FIN_WAIT2) {
118 /* Just repeat all the checks of tcp_rcv_state_process() */
120 /* Out of window, send ACK */
122 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
124 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
125 return tcp_timewait_check_oow_rate_limit(
126 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
131 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
136 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
137 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
139 return TCP_TW_SUCCESS;
142 /* New data or FIN. If new data arrive after half-duplex close,
146 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
149 /* FIN arrived, enter true time-wait state. */
150 tw->tw_substate = TCP_TIME_WAIT;
151 twsk_rcv_nxt_update(tcptw, TCP_SKB_CB(skb)->end_seq);
153 if (tmp_opt.saw_tstamp) {
154 tcptw->tw_ts_recent_stamp = ktime_get_seconds();
155 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
158 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
163 * Now real TIME-WAIT state.
166 * "When a connection is [...] on TIME-WAIT state [...]
167 * [a TCP] MAY accept a new SYN from the remote TCP to
168 * reopen the connection directly, if it:
170 * (1) assigns its initial sequence number for the new
171 * connection to be larger than the largest sequence
172 * number it used on the previous connection incarnation,
175 * (2) returns to TIME-WAIT state if the SYN turns out
176 * to be an old duplicate".
180 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
181 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
182 /* In window segment, it may be only reset or bare ack. */
185 /* This is TIME_WAIT assassination, in two flavors.
186 * Oh well... nobody has a sufficient solution to this
189 if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) {
191 inet_twsk_deschedule_put(tw);
192 return TCP_TW_SUCCESS;
195 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
198 if (tmp_opt.saw_tstamp) {
199 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
200 tcptw->tw_ts_recent_stamp = ktime_get_seconds();
204 return TCP_TW_SUCCESS;
207 /* Out of window segment.
209 All the segments are ACKed immediately.
211 The only exception is new SYN. We accept it, if it is
212 not old duplicate and we are not in danger to be killed
213 by delayed old duplicates. RFC check is that it has
214 newer sequence number works at rates <40Mbit/sec.
215 However, if paws works, it is reliable AND even more,
216 we even may relax silly seq space cutoff.
218 RED-PEN: we violate main RFC requirement, if this SYN will appear
219 old duplicate (i.e. we receive RST in reply to SYN-ACK),
220 we must return socket to time-wait state. It is not good,
224 if (th->syn && !th->rst && !th->ack && !paws_reject &&
225 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
226 (tmp_opt.saw_tstamp &&
227 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
228 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
231 TCP_SKB_CB(skb)->tcp_tw_isn = isn;
236 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
239 /* In this case we must reset the TIMEWAIT timer.
241 * If it is ACKless SYN it may be both old duplicate
242 * and new good SYN with random sequence number <rcv_nxt.
243 * Do not reschedule in the last case.
245 if (paws_reject || th->ack)
246 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
248 return tcp_timewait_check_oow_rate_limit(
249 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
252 return TCP_TW_SUCCESS;
254 EXPORT_SYMBOL(tcp_timewait_state_process);
256 static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw)
258 #ifdef CONFIG_TCP_MD5SIG
259 const struct tcp_sock *tp = tcp_sk(sk);
260 struct tcp_md5sig_key *key;
263 * The timewait bucket does not have the key DB from the
264 * sock structure. We just make a quick copy of the
265 * md5 key being used (if indeed we are using one)
266 * so the timewait ack generating code has the key.
268 tcptw->tw_md5_key = NULL;
269 if (!static_branch_unlikely(&tcp_md5_needed.key))
272 key = tp->af_specific->md5_lookup(sk, sk);
274 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
275 if (!tcptw->tw_md5_key)
277 if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key))
279 tcp_md5_add_sigpool();
284 kfree(tcptw->tw_md5_key);
285 tcptw->tw_md5_key = NULL;
290 * Move a socket to time-wait or dead fin-wait-2 state.
292 void tcp_time_wait(struct sock *sk, int state, int timeo)
294 const struct inet_connection_sock *icsk = inet_csk(sk);
295 struct tcp_sock *tp = tcp_sk(sk);
296 struct net *net = sock_net(sk);
297 struct inet_timewait_sock *tw;
299 tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state);
302 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
303 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
305 tw->tw_transparent = inet_test_bit(TRANSPARENT, sk);
306 tw->tw_mark = sk->sk_mark;
307 tw->tw_priority = READ_ONCE(sk->sk_priority);
308 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
309 tcptw->tw_rcv_nxt = tp->rcv_nxt;
310 tcptw->tw_snd_nxt = tp->snd_nxt;
311 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
312 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
313 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
314 tcptw->tw_ts_offset = tp->tsoffset;
315 tw->tw_usec_ts = tp->tcp_usec_ts;
316 tcptw->tw_last_oow_ack_time = 0;
317 tcptw->tw_tx_delay = tp->tcp_tx_delay;
318 tw->tw_txhash = sk->sk_txhash;
319 #if IS_ENABLED(CONFIG_IPV6)
320 if (tw->tw_family == PF_INET6) {
321 struct ipv6_pinfo *np = inet6_sk(sk);
323 tw->tw_v6_daddr = sk->sk_v6_daddr;
324 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
325 tw->tw_tclass = np->tclass;
326 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
327 tw->tw_ipv6only = sk->sk_ipv6only;
331 tcp_time_wait_init(sk, tcptw);
332 tcp_ao_time_wait(tcptw, tp);
334 /* Get the TIME_WAIT timeout firing. */
338 if (state == TCP_TIME_WAIT)
339 timeo = TCP_TIMEWAIT_LEN;
341 /* tw_timer is pinned, so we need to make sure BH are disabled
342 * in following section, otherwise timer handler could run before
343 * we complete the initialization.
346 inet_twsk_schedule(tw, timeo);
348 * Note that access to tw after this point is illegal.
350 inet_twsk_hashdance(tw, sk, net->ipv4.tcp_death_row.hashinfo);
353 /* Sorry, if we're out of memory, just CLOSE this
354 * socket up. We've got bigger problems than
355 * non-graceful socket closings.
357 NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW);
360 tcp_update_metrics(sk);
363 EXPORT_SYMBOL(tcp_time_wait);
365 #ifdef CONFIG_TCP_MD5SIG
366 static void tcp_md5_twsk_free_rcu(struct rcu_head *head)
368 struct tcp_md5sig_key *key;
370 key = container_of(head, struct tcp_md5sig_key, rcu);
372 static_branch_slow_dec_deferred(&tcp_md5_needed);
373 tcp_md5_release_sigpool();
377 void tcp_twsk_destructor(struct sock *sk)
379 #ifdef CONFIG_TCP_MD5SIG
380 if (static_branch_unlikely(&tcp_md5_needed.key)) {
381 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
383 if (twsk->tw_md5_key)
384 call_rcu(&twsk->tw_md5_key->rcu, tcp_md5_twsk_free_rcu);
387 tcp_ao_destroy_sock(sk, true);
389 EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
391 void tcp_twsk_purge(struct list_head *net_exit_list, int family)
393 bool purged_once = false;
396 list_for_each_entry(net, net_exit_list, exit_list) {
397 if (net->ipv4.tcp_death_row.hashinfo->pernet) {
398 /* Even if tw_refcount == 1, we must clean up kernel reqsk */
399 inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo, family);
400 } else if (!purged_once) {
401 /* The last refcount is decremented in tcp_sk_exit_batch() */
402 if (refcount_read(&net->ipv4.tcp_death_row.tw_refcount) == 1)
405 inet_twsk_purge(&tcp_hashinfo, family);
410 EXPORT_SYMBOL_GPL(tcp_twsk_purge);
412 /* Warning : This function is called without sk_listener being locked.
413 * Be sure to read socket fields once, as their value could change under us.
415 void tcp_openreq_init_rwin(struct request_sock *req,
416 const struct sock *sk_listener,
417 const struct dst_entry *dst)
419 struct inet_request_sock *ireq = inet_rsk(req);
420 const struct tcp_sock *tp = tcp_sk(sk_listener);
421 int full_space = tcp_full_space(sk_listener);
427 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
428 window_clamp = READ_ONCE(tp->window_clamp);
429 /* Set this up on the first call only */
430 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
432 /* limit the window selection if the user enforce a smaller rx buffer */
433 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
434 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
435 req->rsk_window_clamp = full_space;
437 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
439 rcv_wnd = dst_metric(dst, RTAX_INITRWND);
440 else if (full_space < rcv_wnd * mss)
441 full_space = rcv_wnd * mss;
443 /* tcp_full_space because it is guaranteed to be the first packet */
444 tcp_select_initial_window(sk_listener, full_space,
445 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
447 &req->rsk_window_clamp,
451 ireq->rcv_wscale = rcv_wscale;
453 EXPORT_SYMBOL(tcp_openreq_init_rwin);
455 static void tcp_ecn_openreq_child(struct tcp_sock *tp,
456 const struct request_sock *req)
458 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
461 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
463 struct inet_connection_sock *icsk = inet_csk(sk);
464 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
465 bool ca_got_dst = false;
467 if (ca_key != TCP_CA_UNSPEC) {
468 const struct tcp_congestion_ops *ca;
471 ca = tcp_ca_find_key(ca_key);
472 if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
473 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
474 icsk->icsk_ca_ops = ca;
480 /* If no valid choice made yet, assign current system default ca. */
482 (!icsk->icsk_ca_setsockopt ||
483 !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner)))
484 tcp_assign_congestion_control(sk);
486 tcp_set_ca_state(sk, TCP_CA_Open);
488 EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
490 static void smc_check_reset_syn_req(const struct tcp_sock *oldtp,
491 struct request_sock *req,
492 struct tcp_sock *newtp)
494 #if IS_ENABLED(CONFIG_SMC)
495 struct inet_request_sock *ireq;
497 if (static_branch_unlikely(&tcp_have_smc)) {
498 ireq = inet_rsk(req);
499 if (oldtp->syn_smc && !ireq->smc_ok)
505 /* This is not only more efficient than what we used to do, it eliminates
506 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
508 * Actually, we could lots of memory writes here. tp of listening
509 * socket contains all necessary default parameters.
511 struct sock *tcp_create_openreq_child(const struct sock *sk,
512 struct request_sock *req,
515 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
516 const struct inet_request_sock *ireq = inet_rsk(req);
517 struct tcp_request_sock *treq = tcp_rsk(req);
518 struct inet_connection_sock *newicsk;
519 const struct tcp_sock *oldtp;
520 struct tcp_sock *newtp;
523 struct tcp_ao_key *ao_key;
529 newicsk = inet_csk(newsk);
530 newtp = tcp_sk(newsk);
533 smc_check_reset_syn_req(oldtp, req, newtp);
535 /* Now setup tcp_sock */
536 newtp->pred_flags = 0;
538 seq = treq->rcv_isn + 1;
539 newtp->rcv_wup = seq;
540 WRITE_ONCE(newtp->copied_seq, seq);
541 WRITE_ONCE(newtp->rcv_nxt, seq);
544 seq = treq->snt_isn + 1;
545 newtp->snd_sml = newtp->snd_una = seq;
546 WRITE_ONCE(newtp->snd_nxt, seq);
549 INIT_LIST_HEAD(&newtp->tsq_node);
550 INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
552 tcp_init_wl(newtp, treq->rcv_isn);
554 minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
555 newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
557 newtp->lsndtime = tcp_jiffies32;
558 newsk->sk_txhash = READ_ONCE(treq->txhash);
559 newtp->total_retrans = req->num_retrans;
561 tcp_init_xmit_timers(newsk);
562 WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
564 if (sock_flag(newsk, SOCK_KEEPOPEN))
565 inet_csk_reset_keepalive_timer(newsk,
566 keepalive_time_when(newtp));
568 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
569 newtp->rx_opt.sack_ok = ireq->sack_ok;
570 newtp->window_clamp = req->rsk_window_clamp;
571 newtp->rcv_ssthresh = req->rsk_rcv_wnd;
572 newtp->rcv_wnd = req->rsk_rcv_wnd;
573 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
574 if (newtp->rx_opt.wscale_ok) {
575 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
576 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
578 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
579 newtp->window_clamp = min(newtp->window_clamp, 65535U);
581 newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
582 newtp->max_window = newtp->snd_wnd;
584 if (newtp->rx_opt.tstamp_ok) {
585 newtp->tcp_usec_ts = treq->req_usec_ts;
586 newtp->rx_opt.ts_recent = READ_ONCE(req->ts_recent);
587 newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
588 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
590 newtp->tcp_usec_ts = 0;
591 newtp->rx_opt.ts_recent_stamp = 0;
592 newtp->tcp_header_len = sizeof(struct tcphdr);
594 if (req->num_timeout) {
595 newtp->total_rto = req->num_timeout;
596 newtp->undo_marker = treq->snt_isn;
597 if (newtp->tcp_usec_ts) {
598 newtp->retrans_stamp = treq->snt_synack;
599 newtp->total_rto_time = (u32)(tcp_clock_us() -
600 newtp->retrans_stamp) / USEC_PER_MSEC;
602 newtp->retrans_stamp = div_u64(treq->snt_synack,
603 USEC_PER_SEC / TCP_TS_HZ);
604 newtp->total_rto_time = tcp_clock_ms() -
605 newtp->retrans_stamp;
607 newtp->total_rto_recoveries = 1;
609 newtp->tsoffset = treq->ts_off;
610 #ifdef CONFIG_TCP_MD5SIG
611 newtp->md5sig_info = NULL; /*XXX*/
614 newtp->ao_info = NULL;
615 ao_key = treq->af_specific->ao_lookup(sk, req,
616 tcp_rsk(req)->ao_keyid, -1);
618 newtp->tcp_header_len += tcp_ao_len_aligned(ao_key);
620 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
621 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
622 newtp->rx_opt.mss_clamp = req->mss;
623 tcp_ecn_openreq_child(newtp, req);
624 newtp->fastopen_req = NULL;
625 RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
627 newtp->bpf_chg_cc_inprogress = 0;
628 tcp_bpf_clone(sk, newsk);
630 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
634 EXPORT_SYMBOL(tcp_create_openreq_child);
637 * Process an incoming packet for SYN_RECV sockets represented as a
638 * request_sock. Normally sk is the listener socket but for TFO it
639 * points to the child socket.
641 * XXX (TFO) - The current impl contains a special check for ack
642 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
644 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
646 * Note: If @fastopen is true, this can be called from process context.
647 * Otherwise, this is from BH context.
650 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
651 struct request_sock *req,
652 bool fastopen, bool *req_stolen)
654 struct tcp_options_received tmp_opt;
656 const struct tcphdr *th = tcp_hdr(skb);
657 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
658 bool paws_reject = false;
661 tmp_opt.saw_tstamp = 0;
662 if (th->doff > (sizeof(struct tcphdr)>>2)) {
663 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
665 if (tmp_opt.saw_tstamp) {
666 tmp_opt.ts_recent = READ_ONCE(req->ts_recent);
667 if (tmp_opt.rcv_tsecr)
668 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
669 /* We do not store true stamp, but it is not required,
670 * it can be estimated (approximately)
673 tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ;
674 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
678 /* Check for pure retransmitted SYN. */
679 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
680 flg == TCP_FLAG_SYN &&
683 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
684 * this case on figure 6 and figure 8, but formal
685 * protocol description says NOTHING.
686 * To be more exact, it says that we should send ACK,
687 * because this segment (at least, if it has no data)
690 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
691 * describe SYN-RECV state. All the description
692 * is wrong, we cannot believe to it and should
693 * rely only on common sense and implementation
696 * Enforce "SYN-ACK" according to figure 8, figure 6
697 * of RFC793, fixed by RFC1122.
699 * Note that even if there is new data in the SYN packet
700 * they will be thrown away too.
702 * Reset timer after retransmitting SYNACK, similar to
703 * the idea of fast retransmit in recovery.
705 if (!tcp_oow_rate_limited(sock_net(sk), skb,
706 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
707 &tcp_rsk(req)->last_oow_ack_time) &&
709 !inet_rtx_syn_ack(sk, req)) {
710 unsigned long expires = jiffies;
712 expires += reqsk_timeout(req, TCP_RTO_MAX);
714 mod_timer_pending(&req->rsk_timer, expires);
716 req->rsk_timer.expires = expires;
721 /* Further reproduces section "SEGMENT ARRIVES"
722 for state SYN-RECEIVED of RFC793.
723 It is broken, however, it does not work only
724 when SYNs are crossed.
726 You would think that SYN crossing is impossible here, since
727 we should have a SYN_SENT socket (from connect()) on our end,
728 but this is not true if the crossed SYNs were sent to both
729 ends by a malicious third party. We must defend against this,
730 and to do that we first verify the ACK (as per RFC793, page
731 36) and reset if it is invalid. Is this a true full defense?
732 To convince ourselves, let us consider a way in which the ACK
733 test can still pass in this 'malicious crossed SYNs' case.
734 Malicious sender sends identical SYNs (and thus identical sequence
735 numbers) to both A and B:
740 By our good fortune, both A and B select the same initial
741 send sequence number of seven :-)
743 A: sends SYN|ACK, seq=7, ack_seq=8
744 B: sends SYN|ACK, seq=7, ack_seq=8
746 So we are now A eating this SYN|ACK, ACK test passes. So
747 does sequence test, SYN is truncated, and thus we consider
750 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
751 bare ACK. Otherwise, we create an established connection. Both
752 ends (listening sockets) accept the new incoming connection and try
753 to talk to each other. 8-)
755 Note: This case is both harmless, and rare. Possibility is about the
756 same as us discovering intelligent life on another plant tomorrow.
758 But generally, we should (RFC lies!) to accept ACK
759 from SYNACK both here and in tcp_rcv_state_process().
760 tcp_rcv_state_process() does not, hence, we do not too.
762 Note that the case is absolutely generic:
763 we cannot optimize anything here without
764 violating protocol. All the checks must be made
765 before attempt to create socket.
768 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
769 * and the incoming segment acknowledges something not yet
770 * sent (the segment carries an unacceptable ACK) ...
773 * Invalid ACK: reset will be sent by listening socket.
774 * Note that the ACK validity check for a Fast Open socket is done
775 * elsewhere and is checked directly against the child socket rather
776 * than req because user data may have been sent out.
778 if ((flg & TCP_FLAG_ACK) && !fastopen &&
779 (TCP_SKB_CB(skb)->ack_seq !=
780 tcp_rsk(req)->snt_isn + 1))
783 /* Also, it would be not so bad idea to check rcv_tsecr, which
784 * is essentially ACK extension and too early or too late values
785 * should cause reset in unsynchronized states.
788 /* RFC793: "first check sequence number". */
790 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
791 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
792 /* Out of window: send ACK and drop. */
793 if (!(flg & TCP_FLAG_RST) &&
794 !tcp_oow_rate_limited(sock_net(sk), skb,
795 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
796 &tcp_rsk(req)->last_oow_ack_time))
797 req->rsk_ops->send_ack(sk, skb, req);
799 NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
803 /* In sequence, PAWS is OK. */
805 /* TODO: We probably should defer ts_recent change once
806 * we take ownership of @req.
808 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
809 WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval);
811 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
812 /* Truncate SYN, it is out of window starting
813 at tcp_rsk(req)->rcv_isn + 1. */
814 flg &= ~TCP_FLAG_SYN;
817 /* RFC793: "second check the RST bit" and
818 * "fourth, check the SYN bit"
820 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
821 TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
822 goto embryonic_reset;
825 /* ACK sequence verified above, just make sure ACK is
826 * set. If ACK not set, just silently drop the packet.
828 * XXX (TFO) - if we ever allow "data after SYN", the
829 * following check needs to be removed.
831 if (!(flg & TCP_FLAG_ACK))
834 /* For Fast Open no more processing is needed (sk is the
840 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
841 if (req->num_timeout < READ_ONCE(inet_csk(sk)->icsk_accept_queue.rskq_defer_accept) &&
842 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
843 inet_rsk(req)->acked = 1;
844 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
848 /* OK, ACK is valid, create big socket and
849 * feed this segment to it. It will repeat all
850 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
851 * ESTABLISHED STATE. If it will be dropped after
852 * socket is created, wait for troubles.
854 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
857 goto listen_overflow;
859 if (own_req && rsk_drop_req(req)) {
860 reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
861 inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
865 sock_rps_save_rxhash(child, skb);
866 tcp_synack_rtt_meas(child, req);
867 *req_stolen = !own_req;
868 return inet_csk_complete_hashdance(sk, child, req, own_req);
871 if (sk != req->rsk_listener)
872 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
874 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) {
875 inet_rsk(req)->acked = 1;
880 if (!(flg & TCP_FLAG_RST)) {
881 /* Received a bad SYN pkt - for TFO We try not to reset
882 * the local connection unless it's really necessary to
883 * avoid becoming vulnerable to outside attack aiming at
884 * resetting legit local connections.
886 req->rsk_ops->send_reset(sk, skb);
887 } else if (fastopen) { /* received a valid RST pkt */
888 reqsk_fastopen_remove(sk, req, true);
892 bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
895 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
896 *req_stolen = !unlinked;
900 EXPORT_SYMBOL(tcp_check_req);
903 * Queue segment on the new socket if the new socket is active,
904 * otherwise we just shortcircuit this and continue with
907 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
908 * when entering. But other states are possible due to a race condition
909 * where after __inet_lookup_established() fails but before the listener
910 * locked is obtained, other packets cause the same connection to
914 int tcp_child_process(struct sock *parent, struct sock *child,
916 __releases(&((child)->sk_lock.slock))
919 int state = child->sk_state;
921 /* record sk_napi_id and sk_rx_queue_mapping of child. */
922 sk_mark_napi_id_set(child, skb);
924 tcp_segs_in(tcp_sk(child), skb);
925 if (!sock_owned_by_user(child)) {
926 ret = tcp_rcv_state_process(child, skb);
927 /* Wakeup parent, send SIGIO */
928 if (state == TCP_SYN_RECV && child->sk_state != state)
929 parent->sk_data_ready(parent);
931 /* Alas, it is possible again, because we do lookup
932 * in main socket hash table and lock on listening
933 * socket does not protect us more.
935 __sk_add_backlog(child, skb);
938 bh_unlock_sock(child);
942 EXPORT_SYMBOL(tcp_child_process);