1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2017 - 2019, Intel Corporation.
7 #define pr_fmt(fmt) "MPTCP: " fmt
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <crypto/algapi.h>
13 #include <crypto/sha2.h>
15 #include <net/inet_common.h>
16 #include <net/inet_hashtables.h>
17 #include <net/protocol.h>
19 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
20 #include <net/ip6_route.h>
21 #include <net/transp_v6.h>
23 #include <net/mptcp.h>
24 #include <uapi/linux/mptcp.h>
28 #include <trace/events/mptcp.h>
29 #include <trace/events/sock.h>
31 static void mptcp_subflow_ops_undo_override(struct sock *ssk);
33 static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
34 enum linux_mptcp_mib_field field)
36 MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
39 static void subflow_req_destructor(struct request_sock *req)
41 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
43 pr_debug("subflow_req=%p", subflow_req);
46 sock_put((struct sock *)subflow_req->msk);
48 mptcp_token_destroy_request(req);
51 static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
56 put_unaligned_be32(nonce1, &msg[0]);
57 put_unaligned_be32(nonce2, &msg[4]);
59 mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
62 static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
64 return mptcp_is_fully_established((void *)msk) &&
65 ((mptcp_pm_is_userspace(msk) &&
66 mptcp_userspace_pm_active(msk)) ||
67 READ_ONCE(msk->pm.accept_subflow));
70 /* validate received token and create truncated hmac and nonce for SYN-ACK */
71 static void subflow_req_create_thmac(struct mptcp_subflow_request_sock *subflow_req)
73 struct mptcp_sock *msk = subflow_req->msk;
74 u8 hmac[SHA256_DIGEST_SIZE];
76 get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
78 subflow_generate_hmac(msk->local_key, msk->remote_key,
79 subflow_req->local_nonce,
80 subflow_req->remote_nonce, hmac);
82 subflow_req->thmac = get_unaligned_be64(hmac);
85 static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
87 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
88 struct mptcp_sock *msk;
91 msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token);
93 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
97 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
99 sock_put((struct sock *)msk);
102 subflow_req->local_id = local_id;
107 static void subflow_init_req(struct request_sock *req, const struct sock *sk_listener)
109 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
111 subflow_req->mp_capable = 0;
112 subflow_req->mp_join = 0;
113 subflow_req->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk_listener));
114 subflow_req->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk_listener));
115 subflow_req->msk = NULL;
116 mptcp_token_init_request(req);
119 static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk)
121 return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport;
124 static void subflow_add_reset_reason(struct sk_buff *skb, u8 reason)
126 struct mptcp_ext *mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
129 memset(mpext, 0, sizeof(*mpext));
130 mpext->reset_reason = reason;
134 /* Init mptcp request socket.
136 * Returns an error code if a JOIN has failed and a TCP reset
139 static int subflow_check_req(struct request_sock *req,
140 const struct sock *sk_listener,
143 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
144 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
145 struct mptcp_options_received mp_opt;
146 bool opt_mp_capable, opt_mp_join;
148 pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
150 #ifdef CONFIG_TCP_MD5SIG
151 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
154 if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
158 mptcp_get_options(skb, &mp_opt);
160 opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYN);
161 opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYN);
162 if (opt_mp_capable) {
163 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
167 } else if (opt_mp_join) {
168 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
171 if (opt_mp_capable && listener->request_mptcp) {
172 int err, retries = MPTCP_TOKEN_MAX_RETRIES;
174 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
177 get_random_bytes(&subflow_req->local_key, sizeof(subflow_req->local_key));
178 } while (subflow_req->local_key == 0);
180 if (unlikely(req->syncookie)) {
181 mptcp_crypto_key_sha(subflow_req->local_key,
184 if (mptcp_token_exists(subflow_req->token)) {
187 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT);
189 subflow_req->mp_capable = 1;
194 err = mptcp_token_new_request(req);
196 subflow_req->mp_capable = 1;
197 else if (retries-- > 0)
200 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT);
202 } else if (opt_mp_join && listener->request_mptcp) {
203 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
204 subflow_req->mp_join = 1;
205 subflow_req->backup = mp_opt.backup;
206 subflow_req->remote_id = mp_opt.join_id;
207 subflow_req->token = mp_opt.token;
208 subflow_req->remote_nonce = mp_opt.nonce;
209 subflow_req->msk = subflow_token_join_request(req);
211 /* Can't fall back to TCP in this case. */
212 if (!subflow_req->msk) {
213 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
217 if (subflow_use_different_sport(subflow_req->msk, sk_listener)) {
218 pr_debug("syn inet_sport=%d %d",
219 ntohs(inet_sk(sk_listener)->inet_sport),
220 ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
221 if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
222 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX);
225 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTSYNRX);
228 subflow_req_create_thmac(subflow_req);
230 if (unlikely(req->syncookie)) {
231 if (mptcp_can_accept_new_subflow(subflow_req->msk))
232 subflow_init_req_cookie_join_save(subflow_req, skb);
237 pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
238 subflow_req->remote_nonce, subflow_req->msk);
244 int mptcp_subflow_init_cookie_req(struct request_sock *req,
245 const struct sock *sk_listener,
248 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
249 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
250 struct mptcp_options_received mp_opt;
251 bool opt_mp_capable, opt_mp_join;
254 subflow_init_req(req, sk_listener);
255 mptcp_get_options(skb, &mp_opt);
257 opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_ACK);
258 opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK);
259 if (opt_mp_capable && opt_mp_join)
262 if (opt_mp_capable && listener->request_mptcp) {
263 if (mp_opt.sndr_key == 0)
266 subflow_req->local_key = mp_opt.rcvr_key;
267 err = mptcp_token_new_request(req);
271 subflow_req->mp_capable = 1;
272 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
273 } else if (opt_mp_join && listener->request_mptcp) {
274 if (!mptcp_token_join_cookie_init_state(subflow_req, skb))
277 subflow_req->mp_join = 1;
278 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
283 EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req);
285 static struct dst_entry *subflow_v4_route_req(const struct sock *sk,
288 struct request_sock *req)
290 struct dst_entry *dst;
293 tcp_rsk(req)->is_mptcp = 1;
294 subflow_init_req(req, sk);
296 dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req);
300 err = subflow_check_req(req, sk, skb);
306 tcp_request_sock_ops.send_reset(sk, skb);
310 static void subflow_prep_synack(const struct sock *sk, struct request_sock *req,
311 struct tcp_fastopen_cookie *foc,
312 enum tcp_synack_type synack_type)
314 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
315 struct inet_request_sock *ireq = inet_rsk(req);
317 /* clear tstamp_ok, as needed depending on cookie */
318 if (foc && foc->len > -1)
321 if (synack_type == TCP_SYNACK_FASTOPEN)
322 mptcp_fastopen_subflow_synack_set_params(subflow, req);
325 static int subflow_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
327 struct request_sock *req,
328 struct tcp_fastopen_cookie *foc,
329 enum tcp_synack_type synack_type,
330 struct sk_buff *syn_skb)
332 subflow_prep_synack(sk, req, foc, synack_type);
334 return tcp_request_sock_ipv4_ops.send_synack(sk, dst, fl, req, foc,
335 synack_type, syn_skb);
338 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
339 static int subflow_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
341 struct request_sock *req,
342 struct tcp_fastopen_cookie *foc,
343 enum tcp_synack_type synack_type,
344 struct sk_buff *syn_skb)
346 subflow_prep_synack(sk, req, foc, synack_type);
348 return tcp_request_sock_ipv6_ops.send_synack(sk, dst, fl, req, foc,
349 synack_type, syn_skb);
352 static struct dst_entry *subflow_v6_route_req(const struct sock *sk,
355 struct request_sock *req)
357 struct dst_entry *dst;
360 tcp_rsk(req)->is_mptcp = 1;
361 subflow_init_req(req, sk);
363 dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req);
367 err = subflow_check_req(req, sk, skb);
373 tcp6_request_sock_ops.send_reset(sk, skb);
378 /* validate received truncated hmac and create hmac for third ACK */
379 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
381 u8 hmac[SHA256_DIGEST_SIZE];
384 subflow_generate_hmac(subflow->remote_key, subflow->local_key,
385 subflow->remote_nonce, subflow->local_nonce,
388 thmac = get_unaligned_be64(hmac);
389 pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n",
390 subflow, subflow->token, thmac, subflow->thmac);
392 return thmac == subflow->thmac;
395 void mptcp_subflow_reset(struct sock *ssk)
397 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
398 struct sock *sk = subflow->conn;
400 /* mptcp_mp_fail_no_response() can reach here on an already closed
403 if (ssk->sk_state == TCP_CLOSE)
406 /* must hold: tcp_done() could drop last reference on parent */
409 tcp_send_active_reset(ssk, GFP_ATOMIC);
411 if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags))
412 mptcp_schedule_work(sk);
417 static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct sock *sk)
419 return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport;
422 void __mptcp_sync_state(struct sock *sk, int state)
424 struct mptcp_subflow_context *subflow;
425 struct mptcp_sock *msk = mptcp_sk(sk);
426 struct sock *ssk = msk->first;
428 subflow = mptcp_subflow_ctx(ssk);
429 __mptcp_propagate_sndbuf(sk, ssk);
430 if (!msk->rcvspace_init)
431 mptcp_rcv_space_init(msk, ssk);
433 if (sk->sk_state == TCP_SYN_SENT) {
434 /* subflow->idsn is always available is TCP_SYN_SENT state,
435 * even for the FASTOPEN scenarios
437 WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
438 WRITE_ONCE(msk->snd_nxt, msk->write_seq);
439 mptcp_set_state(sk, state);
440 sk->sk_state_change(sk);
444 static void subflow_set_remote_key(struct mptcp_sock *msk,
445 struct mptcp_subflow_context *subflow,
446 const struct mptcp_options_received *mp_opt)
448 /* active MPC subflow will reach here multiple times:
449 * at subflow_finish_connect() time and at 4th ack time
451 if (subflow->remote_key_valid)
454 subflow->remote_key_valid = 1;
455 subflow->remote_key = mp_opt->sndr_key;
456 mptcp_crypto_key_sha(subflow->remote_key, NULL, &subflow->iasn);
459 WRITE_ONCE(msk->remote_key, subflow->remote_key);
460 WRITE_ONCE(msk->ack_seq, subflow->iasn);
461 WRITE_ONCE(msk->can_ack, true);
462 atomic64_set(&msk->rcv_wnd_sent, subflow->iasn);
465 static void mptcp_propagate_state(struct sock *sk, struct sock *ssk,
466 struct mptcp_subflow_context *subflow,
467 const struct mptcp_options_received *mp_opt)
469 struct mptcp_sock *msk = mptcp_sk(sk);
473 /* Options are available only in the non fallback cases
474 * avoid updating rx path fields otherwise
476 WRITE_ONCE(msk->snd_una, subflow->idsn + 1);
477 WRITE_ONCE(msk->wnd_end, subflow->idsn + 1 + tcp_sk(ssk)->snd_wnd);
478 subflow_set_remote_key(msk, subflow, mp_opt);
481 if (!sock_owned_by_user(sk)) {
482 __mptcp_sync_state(sk, ssk->sk_state);
484 msk->pending_state = ssk->sk_state;
485 __set_bit(MPTCP_SYNC_STATE, &msk->cb_flags);
487 mptcp_data_unlock(sk);
490 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
492 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
493 struct mptcp_options_received mp_opt;
494 struct sock *parent = subflow->conn;
495 struct mptcp_sock *msk;
497 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
499 /* be sure no special action on any packet other than syn-ack */
500 if (subflow->conn_finished)
503 msk = mptcp_sk(parent);
504 subflow->rel_write_seq = 1;
505 subflow->conn_finished = 1;
506 subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
507 pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
509 mptcp_get_options(skb, &mp_opt);
510 if (subflow->request_mptcp) {
511 if (!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYNACK)) {
512 MPTCP_INC_STATS(sock_net(sk),
513 MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
514 mptcp_do_fallback(sk);
519 if (mp_opt.suboptions & OPTION_MPTCP_CSUMREQD)
520 WRITE_ONCE(msk->csum_enabled, true);
521 if (mp_opt.deny_join_id0)
522 WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
523 subflow->mp_capable = 1;
524 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
525 mptcp_finish_connect(sk);
526 mptcp_propagate_state(parent, sk, subflow, &mp_opt);
527 } else if (subflow->request_join) {
528 u8 hmac[SHA256_DIGEST_SIZE];
530 if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYNACK)) {
531 subflow->reset_reason = MPTCP_RST_EMPTCP;
535 subflow->backup = mp_opt.backup;
536 subflow->thmac = mp_opt.thmac;
537 subflow->remote_nonce = mp_opt.nonce;
538 WRITE_ONCE(subflow->remote_id, mp_opt.join_id);
539 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d",
540 subflow, subflow->thmac, subflow->remote_nonce,
543 if (!subflow_thmac_valid(subflow)) {
544 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
545 subflow->reset_reason = MPTCP_RST_EMPTCP;
549 if (!mptcp_finish_join(sk))
552 subflow_generate_hmac(subflow->local_key, subflow->remote_key,
553 subflow->local_nonce,
554 subflow->remote_nonce,
556 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
558 subflow->mp_join = 1;
559 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
561 if (subflow_use_different_dport(msk, sk)) {
562 pr_debug("synack inet_dport=%d %d",
563 ntohs(inet_sk(sk)->inet_dport),
564 ntohs(inet_sk(parent)->inet_dport));
565 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX);
567 } else if (mptcp_check_fallback(sk)) {
569 mptcp_propagate_state(parent, sk, subflow, NULL);
574 subflow->reset_transient = 0;
575 mptcp_subflow_reset(sk);
578 static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id)
580 WARN_ON_ONCE(local_id < 0 || local_id > 255);
581 WRITE_ONCE(subflow->local_id, local_id);
584 static int subflow_chk_local_id(struct sock *sk)
586 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
587 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
590 if (likely(subflow->local_id >= 0))
593 err = mptcp_pm_get_local_id(msk, (struct sock_common *)sk);
597 subflow_set_local_id(subflow, err);
601 static int subflow_rebuild_header(struct sock *sk)
603 int err = subflow_chk_local_id(sk);
605 if (unlikely(err < 0))
608 return inet_sk_rebuild_header(sk);
611 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
612 static int subflow_v6_rebuild_header(struct sock *sk)
614 int err = subflow_chk_local_id(sk);
616 if (unlikely(err < 0))
619 return inet6_sk_rebuild_header(sk);
623 static struct request_sock_ops mptcp_subflow_v4_request_sock_ops __ro_after_init;
624 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops __ro_after_init;
626 static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
628 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
630 pr_debug("subflow=%p", subflow);
632 /* Never answer to SYNs sent to broadcast or multicast */
633 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
636 return tcp_conn_request(&mptcp_subflow_v4_request_sock_ops,
637 &subflow_request_sock_ipv4_ops,
644 static void subflow_v4_req_destructor(struct request_sock *req)
646 subflow_req_destructor(req);
647 tcp_request_sock_ops.destructor(req);
650 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
651 static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init;
652 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init;
653 static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init;
654 static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init;
655 static struct proto tcpv6_prot_override __ro_after_init;
657 static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
659 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
661 pr_debug("subflow=%p", subflow);
663 if (skb->protocol == htons(ETH_P_IP))
664 return subflow_v4_conn_request(sk, skb);
666 if (!ipv6_unicast_destination(skb))
669 if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
670 __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
674 return tcp_conn_request(&mptcp_subflow_v6_request_sock_ops,
675 &subflow_request_sock_ipv6_ops, sk, skb);
679 return 0; /* don't send reset */
682 static void subflow_v6_req_destructor(struct request_sock *req)
684 subflow_req_destructor(req);
685 tcp6_request_sock_ops.destructor(req);
689 struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops,
690 struct sock *sk_listener,
691 bool attach_listener)
693 if (ops->family == AF_INET)
694 ops = &mptcp_subflow_v4_request_sock_ops;
695 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
696 else if (ops->family == AF_INET6)
697 ops = &mptcp_subflow_v6_request_sock_ops;
700 return inet_reqsk_alloc(ops, sk_listener, attach_listener);
702 EXPORT_SYMBOL(mptcp_subflow_reqsk_alloc);
704 /* validate hmac received in third ACK */
705 static bool subflow_hmac_valid(const struct request_sock *req,
706 const struct mptcp_options_received *mp_opt)
708 const struct mptcp_subflow_request_sock *subflow_req;
709 u8 hmac[SHA256_DIGEST_SIZE];
710 struct mptcp_sock *msk;
712 subflow_req = mptcp_subflow_rsk(req);
713 msk = subflow_req->msk;
717 subflow_generate_hmac(msk->remote_key, msk->local_key,
718 subflow_req->remote_nonce,
719 subflow_req->local_nonce, hmac);
721 return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
724 static void subflow_ulp_fallback(struct sock *sk,
725 struct mptcp_subflow_context *old_ctx)
727 struct inet_connection_sock *icsk = inet_csk(sk);
729 mptcp_subflow_tcp_fallback(sk, old_ctx);
730 icsk->icsk_ulp_ops = NULL;
731 rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
732 tcp_sk(sk)->is_mptcp = 0;
734 mptcp_subflow_ops_undo_override(sk);
737 void mptcp_subflow_drop_ctx(struct sock *ssk)
739 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
744 list_del(&mptcp_subflow_ctx(ssk)->node);
745 if (inet_csk(ssk)->icsk_ulp_ops) {
746 subflow_ulp_fallback(ssk, ctx);
754 void __mptcp_subflow_fully_established(struct mptcp_sock *msk,
755 struct mptcp_subflow_context *subflow,
756 const struct mptcp_options_received *mp_opt)
758 subflow_set_remote_key(msk, subflow, mp_opt);
759 subflow->fully_established = 1;
760 WRITE_ONCE(msk->fully_established, true);
762 if (subflow->is_mptfo)
763 __mptcp_fastopen_gen_msk_ackseq(msk, subflow, mp_opt);
766 static struct sock *subflow_syn_recv_sock(const struct sock *sk,
768 struct request_sock *req,
769 struct dst_entry *dst,
770 struct request_sock *req_unhash,
773 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
774 struct mptcp_subflow_request_sock *subflow_req;
775 struct mptcp_options_received mp_opt;
776 bool fallback, fallback_is_fatal;
777 struct mptcp_sock *owner;
780 pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
782 /* After child creation we must look for MPC even when options
785 mp_opt.suboptions = 0;
787 /* hopefully temporary handling for MP_JOIN+syncookie */
788 subflow_req = mptcp_subflow_rsk(req);
789 fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join;
790 fallback = !tcp_rsk(req)->is_mptcp;
794 /* if the sk is MP_CAPABLE, we try to fetch the client key */
795 if (subflow_req->mp_capable) {
796 /* we can receive and accept an in-window, out-of-order pkt,
797 * which may not carry the MP_CAPABLE opt even on mptcp enabled
798 * paths: always try to extract the peer key, and fallback
799 * for packets missing it.
800 * Even OoO DSS packets coming legitly after dropped or
801 * reordered MPC will cause fallback, but we don't have other
804 mptcp_get_options(skb, &mp_opt);
805 if (!(mp_opt.suboptions &
806 (OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_ACK)))
809 } else if (subflow_req->mp_join) {
810 mptcp_get_options(skb, &mp_opt);
811 if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK) ||
812 !subflow_hmac_valid(req, &mp_opt) ||
813 !mptcp_can_accept_new_subflow(subflow_req->msk)) {
814 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
820 child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
821 req_unhash, own_req);
823 if (child && *own_req) {
824 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
826 tcp_rsk(req)->drop_req = false;
828 /* we need to fallback on ctx allocation failure and on pre-reqs
829 * checking above. In the latter scenario we additionally need
830 * to reset the context to non MPTCP status.
832 if (!ctx || fallback) {
833 if (fallback_is_fatal) {
834 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
840 /* ssk inherits options of listener sk */
841 ctx->setsockopt_seq = listener->setsockopt_seq;
843 if (ctx->mp_capable) {
844 ctx->conn = mptcp_sk_clone_init(listener->conn, &mp_opt, child, req);
849 owner = mptcp_sk(ctx->conn);
850 mptcp_pm_new_connection(owner, child, 1);
852 /* with OoO packets we can reach here without ingress
855 if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) {
856 mptcp_pm_fully_established(owner, child);
857 ctx->pm_notified = 1;
859 } else if (ctx->mp_join) {
860 owner = subflow_req->msk;
862 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
866 /* move the msk reference ownership to the subflow */
867 subflow_req->msk = NULL;
868 ctx->conn = (struct sock *)owner;
870 if (subflow_use_different_sport(owner, sk)) {
871 pr_debug("ack inet_sport=%d %d",
872 ntohs(inet_sk(sk)->inet_sport),
873 ntohs(inet_sk((struct sock *)owner)->inet_sport));
874 if (!mptcp_pm_sport_in_anno_list(owner, sk)) {
875 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTACKRX);
878 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTACKRX);
881 if (!mptcp_finish_join(child))
884 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
885 tcp_rsk(req)->drop_req = true;
889 /* check for expected invariant - should never trigger, just help
890 * catching eariler subtle bugs
892 WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
893 (!mptcp_subflow_ctx(child) ||
894 !mptcp_subflow_ctx(child)->conn));
898 mptcp_subflow_drop_ctx(child);
899 tcp_rsk(req)->drop_req = true;
900 inet_csk_prepare_for_destroy_sock(child);
902 req->rsk_ops->send_reset(sk, skb);
904 /* The last child reference will be released by the caller */
909 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
910 mptcp_subflow_drop_ctx(child);
914 static struct inet_connection_sock_af_ops subflow_specific __ro_after_init;
915 static struct proto tcp_prot_override __ro_after_init;
917 enum mapping_status {
926 static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
928 pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
929 ssn, subflow->map_subflow_seq, subflow->map_data_len);
932 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
934 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
935 unsigned int skb_consumed;
937 skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
938 if (WARN_ON_ONCE(skb_consumed >= skb->len))
941 return skb->len - skb_consumed <= subflow->map_data_len -
942 mptcp_subflow_get_map_offset(subflow);
945 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
947 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
948 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
950 if (unlikely(before(ssn, subflow->map_subflow_seq))) {
951 /* Mapping covers data later in the subflow stream,
952 * currently unsupported.
954 dbg_bad_map(subflow, ssn);
957 if (unlikely(!before(ssn, subflow->map_subflow_seq +
958 subflow->map_data_len))) {
959 /* Mapping does covers past subflow data, invalid */
960 dbg_bad_map(subflow, ssn);
966 static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *skb,
969 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
970 u32 offset, seq, delta;
977 /* mapping already validated on previous traversal */
978 if (subflow->map_csum_len == subflow->map_data_len)
981 /* traverse the receive queue, ensuring it contains a full
982 * DSS mapping and accumulating the related csum.
983 * Preserve the accoumlate csum across multiple calls, to compute
986 delta = subflow->map_data_len - subflow->map_csum_len;
988 seq = tcp_sk(ssk)->copied_seq + subflow->map_csum_len;
989 offset = seq - TCP_SKB_CB(skb)->seq;
991 /* if the current skb has not been accounted yet, csum its contents
992 * up to the amount covered by the current DSS
994 if (offset < skb->len) {
997 len = min(skb->len - offset, delta);
998 csum = skb_checksum(skb, offset, len, 0);
999 subflow->map_data_csum = csum_block_add(subflow->map_data_csum, csum,
1000 subflow->map_csum_len);
1003 subflow->map_csum_len += len;
1008 if (skb_queue_is_last(&ssk->sk_receive_queue, skb)) {
1009 /* if this subflow is closed, the partial mapping
1010 * will be never completed; flush the pending skbs, so
1011 * that subflow_sched_work_if_closed() can kick in
1013 if (unlikely(ssk->sk_state == TCP_CLOSE))
1014 while ((skb = skb_peek(&ssk->sk_receive_queue)))
1015 sk_eat_skb(ssk, skb);
1017 /* not enough data to validate the csum */
1018 return MAPPING_EMPTY;
1021 /* the DSS mapping for next skbs will be validated later,
1022 * when a get_mapping_status call will process such skb
1027 /* note that 'map_data_len' accounts only for the carried data, does
1028 * not include the eventual seq increment due to the data fin,
1029 * while the pseudo header requires the original DSS data len,
1032 csum = __mptcp_make_csum(subflow->map_seq,
1033 subflow->map_subflow_seq,
1034 subflow->map_data_len + subflow->map_data_fin,
1035 subflow->map_data_csum);
1036 if (unlikely(csum)) {
1037 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR);
1038 return MAPPING_BAD_CSUM;
1041 subflow->valid_csum_seen = 1;
1045 static enum mapping_status get_mapping_status(struct sock *ssk,
1046 struct mptcp_sock *msk)
1048 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1049 bool csum_reqd = READ_ONCE(msk->csum_enabled);
1050 struct mptcp_ext *mpext;
1051 struct sk_buff *skb;
1055 skb = skb_peek(&ssk->sk_receive_queue);
1057 return MAPPING_EMPTY;
1059 if (mptcp_check_fallback(ssk))
1060 return MAPPING_DUMMY;
1062 mpext = mptcp_get_ext(skb);
1063 if (!mpext || !mpext->use_map) {
1064 if (!subflow->map_valid && !skb->len) {
1065 /* the TCP stack deliver 0 len FIN pkt to the receive
1066 * queue, that is the only 0len pkts ever expected here,
1067 * and we can admit no mapping only for 0 len pkts
1069 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1070 WARN_ONCE(1, "0len seq %d:%d flags %x",
1071 TCP_SKB_CB(skb)->seq,
1072 TCP_SKB_CB(skb)->end_seq,
1073 TCP_SKB_CB(skb)->tcp_flags);
1074 sk_eat_skb(ssk, skb);
1075 return MAPPING_EMPTY;
1078 if (!subflow->map_valid)
1079 return MAPPING_INVALID;
1084 trace_get_mapping_status(mpext);
1086 data_len = mpext->data_len;
1087 if (data_len == 0) {
1088 pr_debug("infinite mapping received");
1089 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
1090 subflow->map_data_len = 0;
1091 return MAPPING_INVALID;
1094 if (mpext->data_fin == 1) {
1095 if (data_len == 1) {
1096 bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
1098 pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
1099 if (subflow->map_valid) {
1100 /* A DATA_FIN might arrive in a DSS
1101 * option before the previous mapping
1102 * has been fully consumed. Continue
1103 * handling the existing mapping.
1105 skb_ext_del(skb, SKB_EXT_MPTCP);
1109 mptcp_schedule_work((struct sock *)msk);
1111 return MAPPING_DATA_FIN;
1114 u64 data_fin_seq = mpext->data_seq + data_len - 1;
1116 /* If mpext->data_seq is a 32-bit value, data_fin_seq
1117 * must also be limited to 32 bits.
1120 data_fin_seq &= GENMASK_ULL(31, 0);
1122 mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
1123 pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d",
1124 data_fin_seq, mpext->dsn64);
1127 /* Adjust for DATA_FIN using 1 byte of sequence space */
1131 map_seq = mptcp_expand_seq(READ_ONCE(msk->ack_seq), mpext->data_seq, mpext->dsn64);
1132 WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64);
1134 if (subflow->map_valid) {
1135 /* Allow replacing only with an identical map */
1136 if (subflow->map_seq == map_seq &&
1137 subflow->map_subflow_seq == mpext->subflow_seq &&
1138 subflow->map_data_len == data_len &&
1139 subflow->map_csum_reqd == mpext->csum_reqd) {
1140 skb_ext_del(skb, SKB_EXT_MPTCP);
1144 /* If this skb data are fully covered by the current mapping,
1145 * the new map would need caching, which is not supported
1147 if (skb_is_fully_mapped(ssk, skb)) {
1148 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
1149 return MAPPING_INVALID;
1152 /* will validate the next map after consuming the current one */
1156 subflow->map_seq = map_seq;
1157 subflow->map_subflow_seq = mpext->subflow_seq;
1158 subflow->map_data_len = data_len;
1159 subflow->map_valid = 1;
1160 subflow->map_data_fin = mpext->data_fin;
1161 subflow->mpc_map = mpext->mpc_map;
1162 subflow->map_csum_reqd = mpext->csum_reqd;
1163 subflow->map_csum_len = 0;
1164 subflow->map_data_csum = csum_unfold(mpext->csum);
1166 /* Cfr RFC 8684 Section 3.3.0 */
1167 if (unlikely(subflow->map_csum_reqd != csum_reqd))
1168 return MAPPING_INVALID;
1170 pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
1171 subflow->map_seq, subflow->map_subflow_seq,
1172 subflow->map_data_len, subflow->map_csum_reqd,
1173 subflow->map_data_csum);
1176 /* we revalidate valid mapping on new skb, because we must ensure
1177 * the current skb is completely covered by the available mapping
1179 if (!validate_mapping(ssk, skb)) {
1180 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSTCPMISMATCH);
1181 return MAPPING_INVALID;
1184 skb_ext_del(skb, SKB_EXT_MPTCP);
1187 return validate_data_csum(ssk, skb, csum_reqd);
1190 static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
1193 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1194 bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
1197 incr = limit >= skb->len ? skb->len + fin : limit;
1199 pr_debug("discarding=%d len=%d seq=%d", incr, skb->len,
1200 subflow->map_subflow_seq);
1201 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
1202 tcp_sk(ssk)->copied_seq += incr;
1203 if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
1204 sk_eat_skb(ssk, skb);
1205 if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
1206 subflow->map_valid = 0;
1209 /* sched mptcp worker to remove the subflow if no more data is pending */
1210 static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
1212 if (likely(ssk->sk_state != TCP_CLOSE))
1215 if (skb_queue_empty(&ssk->sk_receive_queue) &&
1216 !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
1217 mptcp_schedule_work((struct sock *)msk);
1220 static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
1222 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
1224 if (subflow->mp_join)
1226 else if (READ_ONCE(msk->csum_enabled))
1227 return !subflow->valid_csum_seen;
1229 return !subflow->fully_established;
1232 static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
1234 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1235 unsigned long fail_tout;
1237 /* greceful failure can happen only on the MPC subflow */
1238 if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first)))
1241 /* since the close timeout take precedence on the fail one,
1242 * no need to start the latter when the first is already set
1244 if (sock_flag((struct sock *)msk, SOCK_DEAD))
1247 /* we don't need extreme accuracy here, use a zero fail_tout as special
1248 * value meaning no fail timeout at all;
1250 fail_tout = jiffies + TCP_RTO_MAX;
1253 WRITE_ONCE(subflow->fail_tout, fail_tout);
1256 mptcp_reset_tout_timer(msk, subflow->fail_tout);
1259 static bool subflow_check_data_avail(struct sock *ssk)
1261 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1262 enum mapping_status status;
1263 struct mptcp_sock *msk;
1264 struct sk_buff *skb;
1266 if (!skb_peek(&ssk->sk_receive_queue))
1267 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
1268 if (subflow->data_avail)
1271 msk = mptcp_sk(subflow->conn);
1276 status = get_mapping_status(ssk, msk);
1277 trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue));
1278 if (unlikely(status == MAPPING_INVALID || status == MAPPING_DUMMY ||
1279 status == MAPPING_BAD_CSUM))
1282 if (status != MAPPING_OK)
1285 skb = skb_peek(&ssk->sk_receive_queue);
1286 if (WARN_ON_ONCE(!skb))
1289 if (unlikely(!READ_ONCE(msk->can_ack)))
1292 old_ack = READ_ONCE(msk->ack_seq);
1293 ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
1294 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
1296 if (unlikely(before64(ack_seq, old_ack))) {
1297 mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
1301 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
1307 subflow_sched_work_if_closed(msk, ssk);
1311 if (!__mptcp_check_fallback(msk)) {
1312 /* RFC 8684 section 3.7. */
1313 if (status == MAPPING_BAD_CSUM &&
1314 (subflow->mp_join || subflow->valid_csum_seen)) {
1315 subflow->send_mp_fail = 1;
1317 if (!READ_ONCE(msk->allow_infinite_fallback)) {
1318 subflow->reset_transient = 0;
1319 subflow->reset_reason = MPTCP_RST_EMIDDLEBOX;
1322 mptcp_subflow_fail(msk, ssk);
1323 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
1327 if (!subflow_can_fallback(subflow) && subflow->map_data_len) {
1328 /* fatal protocol error, close the socket.
1329 * subflow_error_report() will introduce the appropriate barriers
1331 subflow->reset_transient = 0;
1332 subflow->reset_reason = MPTCP_RST_EMPTCP;
1335 WRITE_ONCE(ssk->sk_err, EBADMSG);
1336 tcp_set_state(ssk, TCP_CLOSE);
1337 while ((skb = skb_peek(&ssk->sk_receive_queue)))
1338 sk_eat_skb(ssk, skb);
1339 tcp_send_active_reset(ssk, GFP_ATOMIC);
1340 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
1344 mptcp_do_fallback(ssk);
1347 skb = skb_peek(&ssk->sk_receive_queue);
1348 subflow->map_valid = 1;
1349 subflow->map_seq = READ_ONCE(msk->ack_seq);
1350 subflow->map_data_len = skb->len;
1351 subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
1352 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
1356 bool mptcp_subflow_data_available(struct sock *sk)
1358 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1360 /* check if current mapping is still valid */
1361 if (subflow->map_valid &&
1362 mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
1363 subflow->map_valid = 0;
1364 WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
1366 pr_debug("Done with mapping: seq=%u data_len=%u",
1367 subflow->map_subflow_seq,
1368 subflow->map_data_len);
1371 return subflow_check_data_avail(sk);
1374 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
1377 * In mptcp, rwin is about the mptcp-level connection data.
1379 * Data that is still on the ssk rx queue can thus be ignored,
1380 * as far as mptcp peer is concerned that data is still inflight.
1381 * DSS ACK is updated when skb is moved to the mptcp rx queue.
1383 void mptcp_space(const struct sock *ssk, int *space, int *full_space)
1385 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1386 const struct sock *sk = subflow->conn;
1388 *space = __mptcp_space(sk);
1389 *full_space = mptcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
1392 static void subflow_error_report(struct sock *ssk)
1394 struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1396 /* bail early if this is a no-op, so that we avoid introducing a
1397 * problematic lockdep dependency between TCP accept queue lock
1398 * and msk socket spinlock
1403 mptcp_data_lock(sk);
1404 if (!sock_owned_by_user(sk))
1405 __mptcp_error_report(sk);
1407 __set_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->cb_flags);
1408 mptcp_data_unlock(sk);
1411 static void subflow_data_ready(struct sock *sk)
1413 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1414 u16 state = 1 << inet_sk_state_load(sk);
1415 struct sock *parent = subflow->conn;
1416 struct mptcp_sock *msk;
1418 trace_sk_data_ready(sk);
1420 msk = mptcp_sk(parent);
1421 if (state & TCPF_LISTEN) {
1422 /* MPJ subflow are removed from accept queue before reaching here,
1423 * avoid stray wakeups
1425 if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
1428 parent->sk_data_ready(parent);
1432 WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
1433 !subflow->mp_join && !(state & TCPF_CLOSE));
1435 if (mptcp_subflow_data_available(sk))
1436 mptcp_data_ready(parent, sk);
1437 else if (unlikely(sk->sk_err))
1438 subflow_error_report(sk);
1441 static void subflow_write_space(struct sock *ssk)
1443 struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1445 mptcp_propagate_sndbuf(sk, ssk);
1446 mptcp_write_space(sk);
1449 static const struct inet_connection_sock_af_ops *
1450 subflow_default_af_ops(struct sock *sk)
1452 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1453 if (sk->sk_family == AF_INET6)
1454 return &subflow_v6_specific;
1456 return &subflow_specific;
1459 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1460 void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
1462 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1463 struct inet_connection_sock *icsk = inet_csk(sk);
1464 const struct inet_connection_sock_af_ops *target;
1466 target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
1468 pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
1469 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
1471 if (likely(icsk->icsk_af_ops == target))
1474 subflow->icsk_af_ops = icsk->icsk_af_ops;
1475 icsk->icsk_af_ops = target;
1479 void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
1480 struct sockaddr_storage *addr,
1481 unsigned short family)
1483 memset(addr, 0, sizeof(*addr));
1484 addr->ss_family = family;
1485 if (addr->ss_family == AF_INET) {
1486 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr;
1488 if (info->family == AF_INET)
1489 in_addr->sin_addr = info->addr;
1490 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1491 else if (ipv6_addr_v4mapped(&info->addr6))
1492 in_addr->sin_addr.s_addr = info->addr6.s6_addr32[3];
1494 in_addr->sin_port = info->port;
1496 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1497 else if (addr->ss_family == AF_INET6) {
1498 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr;
1500 if (info->family == AF_INET)
1501 ipv6_addr_set_v4mapped(info->addr.s_addr,
1502 &in6_addr->sin6_addr);
1504 in6_addr->sin6_addr = info->addr6;
1505 in6_addr->sin6_port = info->port;
1510 int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
1511 const struct mptcp_addr_info *remote)
1513 struct mptcp_sock *msk = mptcp_sk(sk);
1514 struct mptcp_subflow_context *subflow;
1515 struct sockaddr_storage addr;
1516 int remote_id = remote->id;
1517 int local_id = loc->id;
1518 int err = -ENOTCONN;
1526 if (!mptcp_is_fully_established(sk))
1529 err = mptcp_subflow_create_socket(sk, loc->family, &sf);
1534 subflow = mptcp_subflow_ctx(ssk);
1536 get_random_bytes(&subflow->local_nonce, sizeof(u32));
1537 } while (!subflow->local_nonce);
1540 subflow_set_local_id(subflow, local_id);
1542 mptcp_pm_get_flags_and_ifindex_by_id(msk, local_id,
1544 subflow->remote_key_valid = 1;
1545 subflow->remote_key = msk->remote_key;
1546 subflow->local_key = msk->local_key;
1547 subflow->token = msk->token;
1548 mptcp_info2sockaddr(loc, &addr, ssk->sk_family);
1550 addrlen = sizeof(struct sockaddr_in);
1551 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1552 if (addr.ss_family == AF_INET6)
1553 addrlen = sizeof(struct sockaddr_in6);
1555 mptcp_sockopt_sync(msk, ssk);
1557 ssk->sk_bound_dev_if = ifindex;
1558 err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
1562 mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
1563 pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
1564 remote_token, local_id, remote_id);
1565 subflow->remote_token = remote_token;
1566 WRITE_ONCE(subflow->remote_id, remote_id);
1567 subflow->request_join = 1;
1568 subflow->request_bkup = !!(flags & MPTCP_PM_ADDR_FLAG_BACKUP);
1569 subflow->subflow_id = msk->subflow_id++;
1570 mptcp_info2sockaddr(remote, &addr, ssk->sk_family);
1573 list_add_tail(&subflow->node, &msk->conn_list);
1574 err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
1575 if (err && err != -EINPROGRESS)
1578 /* discard the subflow socket */
1579 mptcp_sock_graft(ssk, sk->sk_socket);
1580 iput(SOCK_INODE(sf));
1581 WRITE_ONCE(msk->allow_infinite_fallback, false);
1582 mptcp_stop_tout_timer(sk);
1586 list_del(&subflow->node);
1587 sock_put(mptcp_subflow_tcp_sock(subflow));
1590 subflow->disposable = 1;
1594 /* we account subflows before the creation, and this failures will not
1595 * be caught by sk_state_change()
1597 mptcp_pm_close_subflow(msk);
1601 static void mptcp_attach_cgroup(struct sock *parent, struct sock *child)
1603 #ifdef CONFIG_SOCK_CGROUP_DATA
1604 struct sock_cgroup_data *parent_skcd = &parent->sk_cgrp_data,
1605 *child_skcd = &child->sk_cgrp_data;
1607 /* only the additional subflows created by kworkers have to be modified */
1608 if (cgroup_id(sock_cgroup_ptr(parent_skcd)) !=
1609 cgroup_id(sock_cgroup_ptr(child_skcd))) {
1611 struct mem_cgroup *memcg = parent->sk_memcg;
1613 mem_cgroup_sk_free(child);
1614 if (memcg && css_tryget(&memcg->css))
1615 child->sk_memcg = memcg;
1616 #endif /* CONFIG_MEMCG */
1618 cgroup_sk_free(child_skcd);
1619 *child_skcd = *parent_skcd;
1620 cgroup_sk_clone(child_skcd);
1622 #endif /* CONFIG_SOCK_CGROUP_DATA */
1625 static void mptcp_subflow_ops_override(struct sock *ssk)
1627 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1628 if (ssk->sk_prot == &tcpv6_prot)
1629 ssk->sk_prot = &tcpv6_prot_override;
1632 ssk->sk_prot = &tcp_prot_override;
1635 static void mptcp_subflow_ops_undo_override(struct sock *ssk)
1637 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1638 if (ssk->sk_prot == &tcpv6_prot_override)
1639 ssk->sk_prot = &tcpv6_prot;
1642 ssk->sk_prot = &tcp_prot;
1645 int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
1646 struct socket **new_sock)
1648 struct mptcp_subflow_context *subflow;
1649 struct net *net = sock_net(sk);
1653 /* un-accepted server sockets can reach here - on bad configuration
1654 * bail early to avoid greater trouble later
1656 if (unlikely(!sk->sk_socket))
1659 err = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP, &sf);
1663 lock_sock_nested(sf->sk, SINGLE_DEPTH_NESTING);
1665 err = security_mptcp_add_subflow(sk, sf->sk);
1669 /* the newly created socket has to be in the same cgroup as its parent */
1670 mptcp_attach_cgroup(sk, sf->sk);
1672 /* kernel sockets do not by default acquire net ref, but TCP timer
1674 * Update ns_tracker to current stack trace and refcounted tracker.
1676 __netns_tracker_free(net, &sf->sk->ns_tracker, false);
1677 sf->sk->sk_net_refcnt = 1;
1678 get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL);
1679 sock_inuse_add(net, 1);
1680 err = tcp_set_ulp(sf->sk, "mptcp");
1683 release_sock(sf->sk);
1690 /* the newly created socket really belongs to the owning MPTCP master
1691 * socket, even if for additional subflows the allocation is performed
1692 * by a kernel workqueue. Adjust inode references, so that the
1693 * procfs/diag interfaces really show this one belonging to the correct
1696 SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino;
1697 SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid;
1698 SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
1700 subflow = mptcp_subflow_ctx(sf->sk);
1701 pr_debug("subflow=%p", subflow);
1706 mptcp_subflow_ops_override(sf->sk);
1711 static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
1714 struct inet_connection_sock *icsk = inet_csk(sk);
1715 struct mptcp_subflow_context *ctx;
1717 ctx = kzalloc(sizeof(*ctx), priority);
1721 rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
1722 INIT_LIST_HEAD(&ctx->node);
1723 INIT_LIST_HEAD(&ctx->delegated_node);
1725 pr_debug("subflow=%p", ctx);
1728 WRITE_ONCE(ctx->local_id, -1);
1733 static void __subflow_state_change(struct sock *sk)
1735 struct socket_wq *wq;
1738 wq = rcu_dereference(sk->sk_wq);
1739 if (skwq_has_sleeper(wq))
1740 wake_up_interruptible_all(&wq->wait);
1744 static bool subflow_is_done(const struct sock *sk)
1746 return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
1749 static void subflow_state_change(struct sock *sk)
1751 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1752 struct sock *parent = subflow->conn;
1753 struct mptcp_sock *msk;
1755 __subflow_state_change(sk);
1757 msk = mptcp_sk(parent);
1758 if (subflow_simultaneous_connect(sk)) {
1759 mptcp_do_fallback(sk);
1761 subflow->conn_finished = 1;
1762 mptcp_propagate_state(parent, sk, subflow, NULL);
1765 /* as recvmsg() does not acquire the subflow socket for ssk selection
1766 * a fin packet carrying a DSS can be unnoticed if we don't trigger
1767 * the data available machinery here.
1769 if (mptcp_subflow_data_available(sk))
1770 mptcp_data_ready(parent, sk);
1771 else if (unlikely(sk->sk_err))
1772 subflow_error_report(sk);
1774 subflow_sched_work_if_closed(mptcp_sk(parent), sk);
1776 /* when the fallback subflow closes the rx side, trigger a 'dummy'
1777 * ingress data fin, so that the msk state will follow along
1779 if (__mptcp_check_fallback(msk) && subflow_is_done(sk) && msk->first == sk &&
1780 mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), true))
1781 mptcp_schedule_work(parent);
1784 void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
1786 struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
1787 struct request_sock *req, *head, *tail;
1788 struct mptcp_subflow_context *subflow;
1789 struct sock *sk, *ssk;
1791 /* Due to lock dependencies no relevant lock can be acquired under rskq_lock.
1792 * Splice the req list, so that accept() can not reach the pending ssk after
1793 * the listener socket is released below.
1795 spin_lock_bh(&queue->rskq_lock);
1796 head = queue->rskq_accept_head;
1797 tail = queue->rskq_accept_tail;
1798 queue->rskq_accept_head = NULL;
1799 queue->rskq_accept_tail = NULL;
1800 spin_unlock_bh(&queue->rskq_lock);
1805 /* can't acquire the msk socket lock under the subflow one,
1806 * or will cause ABBA deadlock
1808 release_sock(listener_ssk);
1810 for (req = head; req; req = req->dl_next) {
1812 if (!sk_is_mptcp(ssk))
1815 subflow = mptcp_subflow_ctx(ssk);
1816 if (!subflow || !subflow->conn)
1822 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1823 __mptcp_unaccepted_force_close(sk);
1826 /* lockdep will report a false positive ABBA deadlock
1827 * between cancel_work_sync and the listener socket.
1828 * The involved locks belong to different sockets WRT
1829 * the existing AB chain.
1830 * Using a per socket key is problematic as key
1831 * deregistration requires process context and must be
1832 * performed at socket disposal time, in atomic
1834 * Just tell lockdep to consider the listener socket
1837 mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_);
1838 mptcp_cancel_work(sk);
1839 mutex_acquire(&listener_sk->sk_lock.dep_map, 0, 0, _RET_IP_);
1844 /* we are still under the listener msk socket lock */
1845 lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING);
1847 /* restore the listener queue, to let the TCP code clean it up */
1848 spin_lock_bh(&queue->rskq_lock);
1849 WARN_ON_ONCE(queue->rskq_accept_head);
1850 queue->rskq_accept_head = head;
1851 queue->rskq_accept_tail = tail;
1852 spin_unlock_bh(&queue->rskq_lock);
1855 static int subflow_ulp_init(struct sock *sk)
1857 struct inet_connection_sock *icsk = inet_csk(sk);
1858 struct mptcp_subflow_context *ctx;
1859 struct tcp_sock *tp = tcp_sk(sk);
1862 /* disallow attaching ULP to a socket unless it has been
1863 * created with sock_create_kern()
1865 if (!sk->sk_kern_sock) {
1870 ctx = subflow_create_ctx(sk, GFP_KERNEL);
1876 pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
1879 ctx->icsk_af_ops = icsk->icsk_af_ops;
1880 icsk->icsk_af_ops = subflow_default_af_ops(sk);
1881 ctx->tcp_state_change = sk->sk_state_change;
1882 ctx->tcp_error_report = sk->sk_error_report;
1884 WARN_ON_ONCE(sk->sk_data_ready != sock_def_readable);
1885 WARN_ON_ONCE(sk->sk_write_space != sk_stream_write_space);
1887 sk->sk_data_ready = subflow_data_ready;
1888 sk->sk_write_space = subflow_write_space;
1889 sk->sk_state_change = subflow_state_change;
1890 sk->sk_error_report = subflow_error_report;
1895 static void subflow_ulp_release(struct sock *ssk)
1897 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
1898 bool release = true;
1906 /* if the msk has been orphaned, keep the ctx
1907 * alive, will be freed by __mptcp_close_ssk(),
1908 * when the subflow is still unaccepted
1910 release = ctx->disposable || list_empty(&ctx->node);
1912 /* inet_child_forget() does not call sk_state_change(),
1913 * explicitly trigger the socket close machinery
1915 if (!release && !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW,
1916 &mptcp_sk(sk)->flags))
1917 mptcp_schedule_work(sk);
1921 mptcp_subflow_ops_undo_override(ssk);
1923 kfree_rcu(ctx, rcu);
1926 static void subflow_ulp_clone(const struct request_sock *req,
1928 const gfp_t priority)
1930 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
1931 struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk);
1932 struct mptcp_subflow_context *new_ctx;
1934 if (!tcp_rsk(req)->is_mptcp ||
1935 (!subflow_req->mp_capable && !subflow_req->mp_join)) {
1936 subflow_ulp_fallback(newsk, old_ctx);
1940 new_ctx = subflow_create_ctx(newsk, priority);
1942 subflow_ulp_fallback(newsk, old_ctx);
1946 new_ctx->conn_finished = 1;
1947 new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
1948 new_ctx->tcp_state_change = old_ctx->tcp_state_change;
1949 new_ctx->tcp_error_report = old_ctx->tcp_error_report;
1950 new_ctx->rel_write_seq = 1;
1951 new_ctx->tcp_sock = newsk;
1953 if (subflow_req->mp_capable) {
1954 /* see comments in subflow_syn_recv_sock(), MPTCP connection
1955 * is fully established only after we receive the remote key
1957 new_ctx->mp_capable = 1;
1958 new_ctx->local_key = subflow_req->local_key;
1959 new_ctx->token = subflow_req->token;
1960 new_ctx->ssn_offset = subflow_req->ssn_offset;
1961 new_ctx->idsn = subflow_req->idsn;
1963 /* this is the first subflow, id is always 0 */
1964 subflow_set_local_id(new_ctx, 0);
1965 } else if (subflow_req->mp_join) {
1966 new_ctx->ssn_offset = subflow_req->ssn_offset;
1967 new_ctx->mp_join = 1;
1968 new_ctx->fully_established = 1;
1969 new_ctx->remote_key_valid = 1;
1970 new_ctx->backup = subflow_req->backup;
1971 WRITE_ONCE(new_ctx->remote_id, subflow_req->remote_id);
1972 new_ctx->token = subflow_req->token;
1973 new_ctx->thmac = subflow_req->thmac;
1975 /* the subflow req id is valid, fetched via subflow_check_req()
1976 * and subflow_token_join_request()
1978 subflow_set_local_id(new_ctx, subflow_req->local_id);
1982 static void tcp_release_cb_override(struct sock *ssk)
1984 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1987 /* process and clear all the pending actions, but leave the subflow into
1988 * the napi queue. To respect locking, only the same CPU that originated
1989 * the action can touch the list. mptcp_napi_poll will take care of it.
1991 status = set_mask_bits(&subflow->delegated_status, MPTCP_DELEGATE_ACTIONS_MASK, 0);
1993 mptcp_subflow_process_delegated(ssk, status);
1995 tcp_release_cb(ssk);
1998 static int tcp_abort_override(struct sock *ssk, int err)
2000 /* closing a listener subflow requires a great deal of care.
2001 * keep it simple and just prevent such operation
2003 if (inet_sk_state_load(ssk) == TCP_LISTEN)
2006 return tcp_abort(ssk, err);
2009 static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
2011 .owner = THIS_MODULE,
2012 .init = subflow_ulp_init,
2013 .release = subflow_ulp_release,
2014 .clone = subflow_ulp_clone,
2017 static int subflow_ops_init(struct request_sock_ops *subflow_ops)
2019 subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock);
2021 subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
2022 subflow_ops->obj_size, 0,
2024 SLAB_TYPESAFE_BY_RCU,
2026 if (!subflow_ops->slab)
2032 void __init mptcp_subflow_init(void)
2034 mptcp_subflow_v4_request_sock_ops = tcp_request_sock_ops;
2035 mptcp_subflow_v4_request_sock_ops.slab_name = "request_sock_subflow_v4";
2036 mptcp_subflow_v4_request_sock_ops.destructor = subflow_v4_req_destructor;
2038 if (subflow_ops_init(&mptcp_subflow_v4_request_sock_ops) != 0)
2039 panic("MPTCP: failed to init subflow v4 request sock ops\n");
2041 subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
2042 subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req;
2043 subflow_request_sock_ipv4_ops.send_synack = subflow_v4_send_synack;
2045 subflow_specific = ipv4_specific;
2046 subflow_specific.conn_request = subflow_v4_conn_request;
2047 subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
2048 subflow_specific.sk_rx_dst_set = subflow_finish_connect;
2049 subflow_specific.rebuild_header = subflow_rebuild_header;
2051 tcp_prot_override = tcp_prot;
2052 tcp_prot_override.release_cb = tcp_release_cb_override;
2053 tcp_prot_override.diag_destroy = tcp_abort_override;
2055 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
2056 /* In struct mptcp_subflow_request_sock, we assume the TCP request sock
2057 * structures for v4 and v6 have the same size. It should not changed in
2058 * the future but better to make sure to be warned if it is no longer
2061 BUILD_BUG_ON(sizeof(struct tcp_request_sock) != sizeof(struct tcp6_request_sock));
2063 mptcp_subflow_v6_request_sock_ops = tcp6_request_sock_ops;
2064 mptcp_subflow_v6_request_sock_ops.slab_name = "request_sock_subflow_v6";
2065 mptcp_subflow_v6_request_sock_ops.destructor = subflow_v6_req_destructor;
2067 if (subflow_ops_init(&mptcp_subflow_v6_request_sock_ops) != 0)
2068 panic("MPTCP: failed to init subflow v6 request sock ops\n");
2070 subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
2071 subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req;
2072 subflow_request_sock_ipv6_ops.send_synack = subflow_v6_send_synack;
2074 subflow_v6_specific = ipv6_specific;
2075 subflow_v6_specific.conn_request = subflow_v6_conn_request;
2076 subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
2077 subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
2078 subflow_v6_specific.rebuild_header = subflow_v6_rebuild_header;
2080 subflow_v6m_specific = subflow_v6_specific;
2081 subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
2082 subflow_v6m_specific.send_check = ipv4_specific.send_check;
2083 subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
2084 subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
2085 subflow_v6m_specific.net_frag_header_len = 0;
2086 subflow_v6m_specific.rebuild_header = subflow_rebuild_header;
2088 tcpv6_prot_override = tcpv6_prot;
2089 tcpv6_prot_override.release_cb = tcp_release_cb_override;
2090 tcpv6_prot_override.diag_destroy = tcp_abort_override;
2093 mptcp_diag_subflow_init(&subflow_ulp_ops);
2095 if (tcp_register_ulp(&subflow_ulp_ops) != 0)
2096 panic("MPTCP: failed to register subflows to ULP\n");