1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2017 - 2019, Intel Corporation.
7 #define pr_fmt(fmt) "MPTCP: " fmt
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <crypto/algapi.h>
13 #include <crypto/sha.h>
15 #include <net/inet_common.h>
16 #include <net/inet_hashtables.h>
17 #include <net/protocol.h>
19 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
20 #include <net/ip6_route.h>
22 #include <net/mptcp.h>
23 #include <uapi/linux/mptcp.h>
27 static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
28 enum linux_mptcp_mib_field field)
30 MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
33 static void subflow_req_destructor(struct request_sock *req)
35 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
37 pr_debug("subflow_req=%p", subflow_req);
40 sock_put((struct sock *)subflow_req->msk);
42 mptcp_token_destroy_request(req);
45 static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
50 put_unaligned_be32(nonce1, &msg[0]);
51 put_unaligned_be32(nonce2, &msg[4]);
53 mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
56 static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
58 return mptcp_is_fully_established((void *)msk) &&
59 READ_ONCE(msk->pm.accept_subflow);
62 /* validate received token and create truncated hmac and nonce for SYN-ACK */
63 static struct mptcp_sock *subflow_token_join_request(struct request_sock *req,
64 const struct sk_buff *skb)
66 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
67 u8 hmac[SHA256_DIGEST_SIZE];
68 struct mptcp_sock *msk;
71 msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token);
73 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
77 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
79 sock_put((struct sock *)msk);
82 subflow_req->local_id = local_id;
84 get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
86 subflow_generate_hmac(msk->local_key, msk->remote_key,
87 subflow_req->local_nonce,
88 subflow_req->remote_nonce, hmac);
90 subflow_req->thmac = get_unaligned_be64(hmac);
94 static int __subflow_init_req(struct request_sock *req, const struct sock *sk_listener)
96 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
98 subflow_req->mp_capable = 0;
99 subflow_req->mp_join = 0;
100 subflow_req->msk = NULL;
101 mptcp_token_init_request(req);
103 #ifdef CONFIG_TCP_MD5SIG
104 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
107 if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
114 static void subflow_init_req(struct request_sock *req,
115 const struct sock *sk_listener,
118 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
119 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
120 struct mptcp_options_received mp_opt;
123 pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
125 ret = __subflow_init_req(req, sk_listener);
129 mptcp_get_options(skb, &mp_opt);
131 if (mp_opt.mp_capable) {
132 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
136 } else if (mp_opt.mp_join) {
137 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
140 if (mp_opt.mp_capable && listener->request_mptcp) {
141 int err, retries = 4;
143 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
146 get_random_bytes(&subflow_req->local_key, sizeof(subflow_req->local_key));
147 } while (subflow_req->local_key == 0);
149 if (unlikely(req->syncookie)) {
150 mptcp_crypto_key_sha(subflow_req->local_key,
153 if (mptcp_token_exists(subflow_req->token)) {
157 subflow_req->mp_capable = 1;
162 err = mptcp_token_new_request(req);
164 subflow_req->mp_capable = 1;
165 else if (retries-- > 0)
168 } else if (mp_opt.mp_join && listener->request_mptcp) {
169 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
170 subflow_req->mp_join = 1;
171 subflow_req->backup = mp_opt.backup;
172 subflow_req->remote_id = mp_opt.join_id;
173 subflow_req->token = mp_opt.token;
174 subflow_req->remote_nonce = mp_opt.nonce;
175 subflow_req->msk = subflow_token_join_request(req, skb);
177 if (unlikely(req->syncookie) && subflow_req->msk) {
178 if (mptcp_can_accept_new_subflow(subflow_req->msk))
179 subflow_init_req_cookie_join_save(subflow_req, skb);
182 pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
183 subflow_req->remote_nonce, subflow_req->msk);
187 int mptcp_subflow_init_cookie_req(struct request_sock *req,
188 const struct sock *sk_listener,
191 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
192 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
193 struct mptcp_options_received mp_opt;
196 err = __subflow_init_req(req, sk_listener);
200 mptcp_get_options(skb, &mp_opt);
202 if (mp_opt.mp_capable && mp_opt.mp_join)
205 if (mp_opt.mp_capable && listener->request_mptcp) {
206 if (mp_opt.sndr_key == 0)
209 subflow_req->local_key = mp_opt.rcvr_key;
210 err = mptcp_token_new_request(req);
214 subflow_req->mp_capable = 1;
215 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
216 } else if (mp_opt.mp_join && listener->request_mptcp) {
217 if (!mptcp_token_join_cookie_init_state(subflow_req, skb))
220 if (mptcp_can_accept_new_subflow(subflow_req->msk))
221 subflow_req->mp_join = 1;
223 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
228 EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req);
230 static void subflow_v4_init_req(struct request_sock *req,
231 const struct sock *sk_listener,
234 tcp_rsk(req)->is_mptcp = 1;
236 tcp_request_sock_ipv4_ops.init_req(req, sk_listener, skb);
238 subflow_init_req(req, sk_listener, skb);
241 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
242 static void subflow_v6_init_req(struct request_sock *req,
243 const struct sock *sk_listener,
246 tcp_rsk(req)->is_mptcp = 1;
248 tcp_request_sock_ipv6_ops.init_req(req, sk_listener, skb);
250 subflow_init_req(req, sk_listener, skb);
254 /* validate received truncated hmac and create hmac for third ACK */
255 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
257 u8 hmac[SHA256_DIGEST_SIZE];
260 subflow_generate_hmac(subflow->remote_key, subflow->local_key,
261 subflow->remote_nonce, subflow->local_nonce,
264 thmac = get_unaligned_be64(hmac);
265 pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n",
266 subflow, subflow->token,
267 (unsigned long long)thmac,
268 (unsigned long long)subflow->thmac);
270 return thmac == subflow->thmac;
273 void mptcp_subflow_reset(struct sock *ssk)
275 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
276 struct sock *sk = subflow->conn;
278 tcp_send_active_reset(ssk, GFP_ATOMIC);
280 if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
281 schedule_work(&mptcp_sk(sk)->work))
285 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
287 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
288 struct mptcp_options_received mp_opt;
289 struct sock *parent = subflow->conn;
291 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
293 if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
294 inet_sk_state_store(parent, TCP_ESTABLISHED);
295 parent->sk_state_change(parent);
298 /* be sure no special action on any packet other than syn-ack */
299 if (subflow->conn_finished)
302 subflow->rel_write_seq = 1;
303 subflow->conn_finished = 1;
304 subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
305 pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
307 mptcp_get_options(skb, &mp_opt);
308 if (subflow->request_mptcp) {
309 if (!mp_opt.mp_capable) {
310 MPTCP_INC_STATS(sock_net(sk),
311 MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
312 mptcp_do_fallback(sk);
313 pr_fallback(mptcp_sk(subflow->conn));
317 subflow->mp_capable = 1;
318 subflow->can_ack = 1;
319 subflow->remote_key = mp_opt.sndr_key;
320 pr_debug("subflow=%p, remote_key=%llu", subflow,
321 subflow->remote_key);
322 mptcp_finish_connect(sk);
323 } else if (subflow->request_join) {
324 u8 hmac[SHA256_DIGEST_SIZE];
329 subflow->thmac = mp_opt.thmac;
330 subflow->remote_nonce = mp_opt.nonce;
331 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow,
332 subflow->thmac, subflow->remote_nonce);
334 if (!subflow_thmac_valid(subflow)) {
335 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
339 if (!mptcp_finish_join(sk))
342 subflow_generate_hmac(subflow->local_key, subflow->remote_key,
343 subflow->local_nonce,
344 subflow->remote_nonce,
346 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
348 subflow->mp_join = 1;
349 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
350 } else if (mptcp_check_fallback(sk)) {
352 mptcp_rcv_space_init(mptcp_sk(parent), sk);
357 mptcp_subflow_reset(sk);
360 static struct request_sock_ops mptcp_subflow_v4_request_sock_ops __ro_after_init;
361 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops __ro_after_init;
363 static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
365 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
367 pr_debug("subflow=%p", subflow);
369 /* Never answer to SYNs sent to broadcast or multicast */
370 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
373 return tcp_conn_request(&mptcp_subflow_v4_request_sock_ops,
374 &subflow_request_sock_ipv4_ops,
381 static void subflow_v4_req_destructor(struct request_sock *req)
383 subflow_req_destructor(req);
384 tcp_request_sock_ops.destructor(req);
387 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
388 static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init;
389 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init;
390 static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init;
391 static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init;
393 static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
395 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
397 pr_debug("subflow=%p", subflow);
399 if (skb->protocol == htons(ETH_P_IP))
400 return subflow_v4_conn_request(sk, skb);
402 if (!ipv6_unicast_destination(skb))
405 if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
406 __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
410 return tcp_conn_request(&mptcp_subflow_v6_request_sock_ops,
411 &subflow_request_sock_ipv6_ops, sk, skb);
415 return 0; /* don't send reset */
418 static void subflow_v6_req_destructor(struct request_sock *req)
420 subflow_req_destructor(req);
421 tcp6_request_sock_ops.destructor(req);
425 struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops,
426 struct sock *sk_listener,
427 bool attach_listener)
429 if (ops->family == AF_INET)
430 ops = &mptcp_subflow_v4_request_sock_ops;
431 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
432 else if (ops->family == AF_INET6)
433 ops = &mptcp_subflow_v6_request_sock_ops;
436 return inet_reqsk_alloc(ops, sk_listener, attach_listener);
438 EXPORT_SYMBOL(mptcp_subflow_reqsk_alloc);
440 /* validate hmac received in third ACK */
441 static bool subflow_hmac_valid(const struct request_sock *req,
442 const struct mptcp_options_received *mp_opt)
444 const struct mptcp_subflow_request_sock *subflow_req;
445 u8 hmac[SHA256_DIGEST_SIZE];
446 struct mptcp_sock *msk;
448 subflow_req = mptcp_subflow_rsk(req);
449 msk = subflow_req->msk;
453 subflow_generate_hmac(msk->remote_key, msk->local_key,
454 subflow_req->remote_nonce,
455 subflow_req->local_nonce, hmac);
457 return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
460 static void mptcp_sock_destruct(struct sock *sk)
462 /* if new mptcp socket isn't accepted, it is free'd
463 * from the tcp listener sockets request queue, linked
464 * from req->sk. The tcp socket is released.
465 * This calls the ULP release function which will
466 * also remove the mptcp socket, via
467 * sock_put(ctx->conn).
469 * Problem is that the mptcp socket will be in
470 * ESTABLISHED state and will not have the SOCK_DEAD flag.
471 * Both result in warnings from inet_sock_destruct.
473 if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
474 sk->sk_state = TCP_CLOSE;
475 WARN_ON_ONCE(sk->sk_socket);
479 mptcp_destroy_common(mptcp_sk(sk));
480 inet_sock_destruct(sk);
483 static void mptcp_force_close(struct sock *sk)
485 inet_sk_state_store(sk, TCP_CLOSE);
486 sk_common_release(sk);
489 static void subflow_ulp_fallback(struct sock *sk,
490 struct mptcp_subflow_context *old_ctx)
492 struct inet_connection_sock *icsk = inet_csk(sk);
494 mptcp_subflow_tcp_fallback(sk, old_ctx);
495 icsk->icsk_ulp_ops = NULL;
496 rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
497 tcp_sk(sk)->is_mptcp = 0;
500 static void subflow_drop_ctx(struct sock *ssk)
502 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
507 subflow_ulp_fallback(ssk, ctx);
514 void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
515 struct mptcp_options_received *mp_opt)
517 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
519 subflow->remote_key = mp_opt->sndr_key;
520 subflow->fully_established = 1;
521 subflow->can_ack = 1;
522 WRITE_ONCE(msk->fully_established, true);
525 static struct sock *subflow_syn_recv_sock(const struct sock *sk,
527 struct request_sock *req,
528 struct dst_entry *dst,
529 struct request_sock *req_unhash,
532 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
533 struct mptcp_subflow_request_sock *subflow_req;
534 struct mptcp_options_received mp_opt;
535 bool fallback, fallback_is_fatal;
536 struct sock *new_msk = NULL;
539 pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
541 /* After child creation we must look for 'mp_capable' even when options
544 mp_opt.mp_capable = 0;
546 /* hopefully temporary handling for MP_JOIN+syncookie */
547 subflow_req = mptcp_subflow_rsk(req);
548 fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join;
549 fallback = !tcp_rsk(req)->is_mptcp;
553 /* if the sk is MP_CAPABLE, we try to fetch the client key */
554 if (subflow_req->mp_capable) {
555 /* we can receive and accept an in-window, out-of-order pkt,
556 * which may not carry the MP_CAPABLE opt even on mptcp enabled
557 * paths: always try to extract the peer key, and fallback
558 * for packets missing it.
559 * Even OoO DSS packets coming legitly after dropped or
560 * reordered MPC will cause fallback, but we don't have other
563 mptcp_get_options(skb, &mp_opt);
564 if (!mp_opt.mp_capable) {
569 new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
572 } else if (subflow_req->mp_join) {
573 mptcp_get_options(skb, &mp_opt);
574 if (!mp_opt.mp_join || !subflow_hmac_valid(req, &mp_opt) ||
575 !mptcp_can_accept_new_subflow(subflow_req->msk)) {
576 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
582 child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
583 req_unhash, own_req);
585 if (child && *own_req) {
586 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
588 tcp_rsk(req)->drop_req = false;
590 /* we need to fallback on ctx allocation failure and on pre-reqs
591 * checking above. In the latter scenario we additionally need
592 * to reset the context to non MPTCP status.
594 if (!ctx || fallback) {
595 if (fallback_is_fatal)
599 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
601 subflow_drop_ctx(child);
605 if (ctx->mp_capable) {
606 /* this can't race with mptcp_close(), as the msk is
607 * not yet exposted to user-space
609 inet_sk_state_store((void *)new_msk, TCP_ESTABLISHED);
611 /* new mpc subflow takes ownership of the newly
612 * created mptcp socket
614 new_msk->sk_destruct = mptcp_sock_destruct;
615 mptcp_pm_new_connection(mptcp_sk(new_msk), 1);
616 mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
620 /* with OoO packets we can reach here without ingress
623 if (mp_opt.mp_capable)
624 mptcp_subflow_fully_established(ctx, &mp_opt);
625 } else if (ctx->mp_join) {
626 struct mptcp_sock *owner;
628 owner = subflow_req->msk;
632 /* move the msk reference ownership to the subflow */
633 subflow_req->msk = NULL;
634 ctx->conn = (struct sock *)owner;
635 if (!mptcp_finish_join(child))
638 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
639 tcp_rsk(req)->drop_req = true;
644 /* dispose of the left over mptcp master, if any */
645 if (unlikely(new_msk))
646 mptcp_force_close(new_msk);
648 /* check for expected invariant - should never trigger, just help
649 * catching eariler subtle bugs
651 WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
652 (!mptcp_subflow_ctx(child) ||
653 !mptcp_subflow_ctx(child)->conn));
657 subflow_drop_ctx(child);
658 tcp_rsk(req)->drop_req = true;
659 inet_csk_prepare_for_destroy_sock(child);
661 req->rsk_ops->send_reset(sk, skb);
663 /* The last child reference will be released by the caller */
667 static struct inet_connection_sock_af_ops subflow_specific __ro_after_init;
669 enum mapping_status {
677 static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq)
679 if ((u32)seq == (u32)old_seq)
682 /* Assume map covers data not mapped yet. */
683 return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32));
686 static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
688 pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
689 ssn, subflow->map_subflow_seq, subflow->map_data_len);
692 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
694 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
695 unsigned int skb_consumed;
697 skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
698 if (WARN_ON_ONCE(skb_consumed >= skb->len))
701 return skb->len - skb_consumed <= subflow->map_data_len -
702 mptcp_subflow_get_map_offset(subflow);
705 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
707 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
708 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
710 if (unlikely(before(ssn, subflow->map_subflow_seq))) {
711 /* Mapping covers data later in the subflow stream,
712 * currently unsupported.
714 dbg_bad_map(subflow, ssn);
717 if (unlikely(!before(ssn, subflow->map_subflow_seq +
718 subflow->map_data_len))) {
719 /* Mapping does covers past subflow data, invalid */
720 dbg_bad_map(subflow, ssn);
726 static enum mapping_status get_mapping_status(struct sock *ssk,
727 struct mptcp_sock *msk)
729 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
730 struct mptcp_ext *mpext;
735 skb = skb_peek(&ssk->sk_receive_queue);
737 return MAPPING_EMPTY;
739 if (mptcp_check_fallback(ssk))
740 return MAPPING_DUMMY;
742 mpext = mptcp_get_ext(skb);
743 if (!mpext || !mpext->use_map) {
744 if (!subflow->map_valid && !skb->len) {
745 /* the TCP stack deliver 0 len FIN pkt to the receive
746 * queue, that is the only 0len pkts ever expected here,
747 * and we can admit no mapping only for 0 len pkts
749 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
750 WARN_ONCE(1, "0len seq %d:%d flags %x",
751 TCP_SKB_CB(skb)->seq,
752 TCP_SKB_CB(skb)->end_seq,
753 TCP_SKB_CB(skb)->tcp_flags);
754 sk_eat_skb(ssk, skb);
755 return MAPPING_EMPTY;
758 if (!subflow->map_valid)
759 return MAPPING_INVALID;
764 pr_debug("seq=%llu is64=%d ssn=%u data_len=%u data_fin=%d",
765 mpext->data_seq, mpext->dsn64, mpext->subflow_seq,
766 mpext->data_len, mpext->data_fin);
768 data_len = mpext->data_len;
770 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
771 return MAPPING_INVALID;
774 if (mpext->data_fin == 1) {
776 bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
778 pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
779 if (subflow->map_valid) {
780 /* A DATA_FIN might arrive in a DSS
781 * option before the previous mapping
782 * has been fully consumed. Continue
783 * handling the existing mapping.
785 skb_ext_del(skb, SKB_EXT_MPTCP);
788 if (updated && schedule_work(&msk->work))
789 sock_hold((struct sock *)msk);
791 return MAPPING_DATA_FIN;
794 u64 data_fin_seq = mpext->data_seq + data_len - 1;
796 /* If mpext->data_seq is a 32-bit value, data_fin_seq
797 * must also be limited to 32 bits.
800 data_fin_seq &= GENMASK_ULL(31, 0);
802 mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
803 pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d",
804 data_fin_seq, mpext->dsn64);
807 /* Adjust for DATA_FIN using 1 byte of sequence space */
812 map_seq = expand_seq(subflow->map_seq, subflow->map_data_len,
814 pr_debug("expanded seq=%llu", subflow->map_seq);
816 map_seq = mpext->data_seq;
818 WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64);
820 if (subflow->map_valid) {
821 /* Allow replacing only with an identical map */
822 if (subflow->map_seq == map_seq &&
823 subflow->map_subflow_seq == mpext->subflow_seq &&
824 subflow->map_data_len == data_len) {
825 skb_ext_del(skb, SKB_EXT_MPTCP);
829 /* If this skb data are fully covered by the current mapping,
830 * the new map would need caching, which is not supported
832 if (skb_is_fully_mapped(ssk, skb)) {
833 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
834 return MAPPING_INVALID;
837 /* will validate the next map after consuming the current one */
841 subflow->map_seq = map_seq;
842 subflow->map_subflow_seq = mpext->subflow_seq;
843 subflow->map_data_len = data_len;
844 subflow->map_valid = 1;
845 subflow->mpc_map = mpext->mpc_map;
846 pr_debug("new map seq=%llu subflow_seq=%u data_len=%u",
847 subflow->map_seq, subflow->map_subflow_seq,
848 subflow->map_data_len);
851 /* we revalidate valid mapping on new skb, because we must ensure
852 * the current skb is completely covered by the available mapping
854 if (!validate_mapping(ssk, skb))
855 return MAPPING_INVALID;
857 skb_ext_del(skb, SKB_EXT_MPTCP);
861 static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
864 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
865 bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
868 incr = limit >= skb->len ? skb->len + fin : limit;
870 pr_debug("discarding=%d len=%d seq=%d", incr, skb->len,
871 subflow->map_subflow_seq);
872 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
873 tcp_sk(ssk)->copied_seq += incr;
874 if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
875 sk_eat_skb(ssk, skb);
876 if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
877 subflow->map_valid = 0;
879 tcp_cleanup_rbuf(ssk, incr);
882 static bool subflow_check_data_avail(struct sock *ssk)
884 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
885 enum mapping_status status;
886 struct mptcp_sock *msk;
889 pr_debug("msk=%p ssk=%p data_avail=%d skb=%p", subflow->conn, ssk,
890 subflow->data_avail, skb_peek(&ssk->sk_receive_queue));
891 if (!skb_peek(&ssk->sk_receive_queue))
892 subflow->data_avail = 0;
893 if (subflow->data_avail)
896 msk = mptcp_sk(subflow->conn);
901 status = get_mapping_status(ssk, msk);
902 pr_debug("msk=%p ssk=%p status=%d", msk, ssk, status);
903 if (status == MAPPING_INVALID) {
904 ssk->sk_err = EBADMSG;
907 if (status == MAPPING_DUMMY) {
908 __mptcp_do_fallback(msk);
909 skb = skb_peek(&ssk->sk_receive_queue);
910 subflow->map_valid = 1;
911 subflow->map_seq = READ_ONCE(msk->ack_seq);
912 subflow->map_data_len = skb->len;
913 subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq -
915 subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
919 if (status != MAPPING_OK)
922 skb = skb_peek(&ssk->sk_receive_queue);
923 if (WARN_ON_ONCE(!skb))
926 /* if msk lacks the remote key, this subflow must provide an
927 * MP_CAPABLE-based mapping
929 if (unlikely(!READ_ONCE(msk->can_ack))) {
930 if (!subflow->mpc_map) {
931 ssk->sk_err = EBADMSG;
934 WRITE_ONCE(msk->remote_key, subflow->remote_key);
935 WRITE_ONCE(msk->ack_seq, subflow->map_seq);
936 WRITE_ONCE(msk->can_ack, true);
939 old_ack = READ_ONCE(msk->ack_seq);
940 ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
941 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
943 if (ack_seq == old_ack) {
944 subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
946 } else if (after64(ack_seq, old_ack)) {
947 subflow->data_avail = MPTCP_SUBFLOW_OOO_DATA;
951 /* only accept in-sequence mapping. Old values are spurious
954 mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
959 /* fatal protocol error, close the socket */
960 /* This barrier is coupled with smp_rmb() in tcp_poll() */
962 ssk->sk_error_report(ssk);
963 tcp_set_state(ssk, TCP_CLOSE);
964 tcp_send_active_reset(ssk, GFP_ATOMIC);
965 subflow->data_avail = 0;
969 bool mptcp_subflow_data_available(struct sock *sk)
971 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
973 /* check if current mapping is still valid */
974 if (subflow->map_valid &&
975 mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
976 subflow->map_valid = 0;
977 subflow->data_avail = 0;
979 pr_debug("Done with mapping: seq=%u data_len=%u",
980 subflow->map_subflow_seq,
981 subflow->map_data_len);
984 return subflow_check_data_avail(sk);
987 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
990 * In mptcp, rwin is about the mptcp-level connection data.
992 * Data that is still on the ssk rx queue can thus be ignored,
993 * as far as mptcp peer is concerened that data is still inflight.
994 * DSS ACK is updated when skb is moved to the mptcp rx queue.
996 void mptcp_space(const struct sock *ssk, int *space, int *full_space)
998 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
999 const struct sock *sk = subflow->conn;
1001 *space = tcp_space(sk);
1002 *full_space = tcp_full_space(sk);
1005 static void subflow_data_ready(struct sock *sk)
1007 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1008 u16 state = 1 << inet_sk_state_load(sk);
1009 struct sock *parent = subflow->conn;
1010 struct mptcp_sock *msk;
1012 msk = mptcp_sk(parent);
1013 if (state & TCPF_LISTEN) {
1014 /* MPJ subflow are removed from accept queue before reaching here,
1015 * avoid stray wakeups
1017 if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
1020 set_bit(MPTCP_DATA_READY, &msk->flags);
1021 parent->sk_data_ready(parent);
1025 WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
1026 !subflow->mp_join && !(state & TCPF_CLOSE));
1028 if (mptcp_subflow_data_available(sk))
1029 mptcp_data_ready(parent, sk);
1032 static void subflow_write_space(struct sock *sk)
1034 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1035 struct sock *parent = subflow->conn;
1037 if (!sk_stream_is_writeable(sk))
1040 if (sk_stream_is_writeable(parent)) {
1041 set_bit(MPTCP_SEND_SPACE, &mptcp_sk(parent)->flags);
1042 smp_mb__after_atomic();
1043 /* set SEND_SPACE before sk_stream_write_space clears NOSPACE */
1044 sk_stream_write_space(parent);
1048 static const struct inet_connection_sock_af_ops *
1049 subflow_default_af_ops(struct sock *sk)
1051 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1052 if (sk->sk_family == AF_INET6)
1053 return &subflow_v6_specific;
1055 return &subflow_specific;
1058 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1059 void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
1061 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1062 struct inet_connection_sock *icsk = inet_csk(sk);
1063 const struct inet_connection_sock_af_ops *target;
1065 target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
1067 pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
1068 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
1070 if (likely(icsk->icsk_af_ops == target))
1073 subflow->icsk_af_ops = icsk->icsk_af_ops;
1074 icsk->icsk_af_ops = target;
1078 static void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
1079 struct sockaddr_storage *addr)
1081 memset(addr, 0, sizeof(*addr));
1082 addr->ss_family = info->family;
1083 if (addr->ss_family == AF_INET) {
1084 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr;
1086 in_addr->sin_addr = info->addr;
1087 in_addr->sin_port = info->port;
1089 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1090 else if (addr->ss_family == AF_INET6) {
1091 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr;
1093 in6_addr->sin6_addr = info->addr6;
1094 in6_addr->sin6_port = info->port;
1099 int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
1100 const struct mptcp_addr_info *remote)
1102 struct mptcp_sock *msk = mptcp_sk(sk);
1103 struct mptcp_subflow_context *subflow;
1104 struct sockaddr_storage addr;
1105 int remote_id = remote->id;
1106 int local_id = loc->id;
1113 if (!mptcp_is_fully_established(sk))
1116 err = mptcp_subflow_create_socket(sk, &sf);
1121 subflow = mptcp_subflow_ctx(ssk);
1123 get_random_bytes(&subflow->local_nonce, sizeof(u32));
1124 } while (!subflow->local_nonce);
1127 err = mptcp_pm_get_local_id(msk, (struct sock_common *)ssk);
1134 subflow->remote_key = msk->remote_key;
1135 subflow->local_key = msk->local_key;
1136 subflow->token = msk->token;
1137 mptcp_info2sockaddr(loc, &addr);
1139 addrlen = sizeof(struct sockaddr_in);
1140 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1141 if (loc->family == AF_INET6)
1142 addrlen = sizeof(struct sockaddr_in6);
1144 ssk->sk_bound_dev_if = loc->ifindex;
1145 err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
1149 mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
1150 pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
1151 remote_token, local_id, remote_id);
1152 subflow->remote_token = remote_token;
1153 subflow->local_id = local_id;
1154 subflow->remote_id = remote_id;
1155 subflow->request_join = 1;
1156 subflow->request_bkup = !!(loc->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
1157 mptcp_info2sockaddr(remote, &addr);
1159 err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
1160 if (err && err != -EINPROGRESS)
1163 spin_lock_bh(&msk->join_list_lock);
1164 list_add_tail(&subflow->node, &msk->join_list);
1165 spin_unlock_bh(&msk->join_list_lock);
1174 int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
1176 struct mptcp_subflow_context *subflow;
1177 struct net *net = sock_net(sk);
1181 /* un-accepted server sockets can reach here - on bad configuration
1182 * bail early to avoid greater trouble later
1184 if (unlikely(!sk->sk_socket))
1187 err = sock_create_kern(net, sk->sk_family, SOCK_STREAM, IPPROTO_TCP,
1194 /* kernel sockets do not by default acquire net ref, but TCP timer
1197 sf->sk->sk_net_refcnt = 1;
1199 #ifdef CONFIG_PROC_FS
1200 this_cpu_add(*net->core.sock_inuse, 1);
1202 err = tcp_set_ulp(sf->sk, "mptcp");
1203 release_sock(sf->sk);
1210 /* the newly created socket really belongs to the owning MPTCP master
1211 * socket, even if for additional subflows the allocation is performed
1212 * by a kernel workqueue. Adjust inode references, so that the
1213 * procfs/diag interaces really show this one belonging to the correct
1216 SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino;
1217 SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid;
1218 SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
1220 subflow = mptcp_subflow_ctx(sf->sk);
1221 pr_debug("subflow=%p", subflow);
1230 static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
1233 struct inet_connection_sock *icsk = inet_csk(sk);
1234 struct mptcp_subflow_context *ctx;
1236 ctx = kzalloc(sizeof(*ctx), priority);
1240 rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
1241 INIT_LIST_HEAD(&ctx->node);
1243 pr_debug("subflow=%p", ctx);
1250 static void __subflow_state_change(struct sock *sk)
1252 struct socket_wq *wq;
1255 wq = rcu_dereference(sk->sk_wq);
1256 if (skwq_has_sleeper(wq))
1257 wake_up_interruptible_all(&wq->wait);
1261 static bool subflow_is_done(const struct sock *sk)
1263 return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
1266 static void subflow_state_change(struct sock *sk)
1268 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1269 struct sock *parent = subflow->conn;
1271 __subflow_state_change(sk);
1273 if (subflow_simultaneous_connect(sk)) {
1274 mptcp_do_fallback(sk);
1275 mptcp_rcv_space_init(mptcp_sk(parent), sk);
1276 pr_fallback(mptcp_sk(parent));
1277 subflow->conn_finished = 1;
1278 if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
1279 inet_sk_state_store(parent, TCP_ESTABLISHED);
1280 parent->sk_state_change(parent);
1284 /* as recvmsg() does not acquire the subflow socket for ssk selection
1285 * a fin packet carrying a DSS can be unnoticed if we don't trigger
1286 * the data available machinery here.
1288 if (mptcp_subflow_data_available(sk))
1289 mptcp_data_ready(parent, sk);
1291 if (__mptcp_check_fallback(mptcp_sk(parent)) &&
1292 !(parent->sk_shutdown & RCV_SHUTDOWN) &&
1293 !subflow->rx_eof && subflow_is_done(sk)) {
1294 subflow->rx_eof = 1;
1295 mptcp_subflow_eof(parent);
1299 static int subflow_ulp_init(struct sock *sk)
1301 struct inet_connection_sock *icsk = inet_csk(sk);
1302 struct mptcp_subflow_context *ctx;
1303 struct tcp_sock *tp = tcp_sk(sk);
1306 /* disallow attaching ULP to a socket unless it has been
1307 * created with sock_create_kern()
1309 if (!sk->sk_kern_sock) {
1314 ctx = subflow_create_ctx(sk, GFP_KERNEL);
1320 pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
1323 ctx->icsk_af_ops = icsk->icsk_af_ops;
1324 icsk->icsk_af_ops = subflow_default_af_ops(sk);
1325 ctx->tcp_data_ready = sk->sk_data_ready;
1326 ctx->tcp_state_change = sk->sk_state_change;
1327 ctx->tcp_write_space = sk->sk_write_space;
1328 sk->sk_data_ready = subflow_data_ready;
1329 sk->sk_write_space = subflow_write_space;
1330 sk->sk_state_change = subflow_state_change;
1335 static void subflow_ulp_release(struct sock *sk)
1337 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(sk);
1343 sock_put(ctx->conn);
1345 kfree_rcu(ctx, rcu);
1348 static void subflow_ulp_clone(const struct request_sock *req,
1350 const gfp_t priority)
1352 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
1353 struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk);
1354 struct mptcp_subflow_context *new_ctx;
1356 if (!tcp_rsk(req)->is_mptcp ||
1357 (!subflow_req->mp_capable && !subflow_req->mp_join)) {
1358 subflow_ulp_fallback(newsk, old_ctx);
1362 new_ctx = subflow_create_ctx(newsk, priority);
1364 subflow_ulp_fallback(newsk, old_ctx);
1368 new_ctx->conn_finished = 1;
1369 new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
1370 new_ctx->tcp_data_ready = old_ctx->tcp_data_ready;
1371 new_ctx->tcp_state_change = old_ctx->tcp_state_change;
1372 new_ctx->tcp_write_space = old_ctx->tcp_write_space;
1373 new_ctx->rel_write_seq = 1;
1374 new_ctx->tcp_sock = newsk;
1376 if (subflow_req->mp_capable) {
1377 /* see comments in subflow_syn_recv_sock(), MPTCP connection
1378 * is fully established only after we receive the remote key
1380 new_ctx->mp_capable = 1;
1381 new_ctx->local_key = subflow_req->local_key;
1382 new_ctx->token = subflow_req->token;
1383 new_ctx->ssn_offset = subflow_req->ssn_offset;
1384 new_ctx->idsn = subflow_req->idsn;
1385 } else if (subflow_req->mp_join) {
1386 new_ctx->ssn_offset = subflow_req->ssn_offset;
1387 new_ctx->mp_join = 1;
1388 new_ctx->fully_established = 1;
1389 new_ctx->backup = subflow_req->backup;
1390 new_ctx->local_id = subflow_req->local_id;
1391 new_ctx->remote_id = subflow_req->remote_id;
1392 new_ctx->token = subflow_req->token;
1393 new_ctx->thmac = subflow_req->thmac;
1397 static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
1399 .owner = THIS_MODULE,
1400 .init = subflow_ulp_init,
1401 .release = subflow_ulp_release,
1402 .clone = subflow_ulp_clone,
1405 static int subflow_ops_init(struct request_sock_ops *subflow_ops)
1407 subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock);
1409 subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
1410 subflow_ops->obj_size, 0,
1412 SLAB_TYPESAFE_BY_RCU,
1414 if (!subflow_ops->slab)
1420 void __init mptcp_subflow_init(void)
1422 mptcp_subflow_v4_request_sock_ops = tcp_request_sock_ops;
1423 mptcp_subflow_v4_request_sock_ops.slab_name = "request_sock_subflow_v4";
1424 mptcp_subflow_v4_request_sock_ops.destructor = subflow_v4_req_destructor;
1426 if (subflow_ops_init(&mptcp_subflow_v4_request_sock_ops) != 0)
1427 panic("MPTCP: failed to init subflow v4 request sock ops\n");
1429 subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
1430 subflow_request_sock_ipv4_ops.init_req = subflow_v4_init_req;
1432 subflow_specific = ipv4_specific;
1433 subflow_specific.conn_request = subflow_v4_conn_request;
1434 subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
1435 subflow_specific.sk_rx_dst_set = subflow_finish_connect;
1437 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1438 /* In struct mptcp_subflow_request_sock, we assume the TCP request sock
1439 * structures for v4 and v6 have the same size. It should not changed in
1440 * the future but better to make sure to be warned if it is no longer
1443 BUILD_BUG_ON(sizeof(struct tcp_request_sock) != sizeof(struct tcp6_request_sock));
1445 mptcp_subflow_v6_request_sock_ops = tcp6_request_sock_ops;
1446 mptcp_subflow_v6_request_sock_ops.slab_name = "request_sock_subflow_v6";
1447 mptcp_subflow_v6_request_sock_ops.destructor = subflow_v6_req_destructor;
1449 if (subflow_ops_init(&mptcp_subflow_v6_request_sock_ops) != 0)
1450 panic("MPTCP: failed to init subflow v6 request sock ops\n");
1452 subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
1453 subflow_request_sock_ipv6_ops.init_req = subflow_v6_init_req;
1455 subflow_v6_specific = ipv6_specific;
1456 subflow_v6_specific.conn_request = subflow_v6_conn_request;
1457 subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
1458 subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
1460 subflow_v6m_specific = subflow_v6_specific;
1461 subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
1462 subflow_v6m_specific.send_check = ipv4_specific.send_check;
1463 subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
1464 subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
1465 subflow_v6m_specific.net_frag_header_len = 0;
1468 mptcp_diag_subflow_init(&subflow_ulp_ops);
1470 if (tcp_register_ulp(&subflow_ulp_ops) != 0)
1471 panic("MPTCP: failed to register subflows to ULP\n");