1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2020, Red Hat, Inc.
7 #define pr_fmt(fmt) "MPTCP: " fmt
9 #include <linux/inet.h>
10 #include <linux/kernel.h>
12 #include <net/inet_common.h>
13 #include <net/netns/generic.h>
14 #include <net/mptcp.h>
15 #include <net/genetlink.h>
16 #include <uapi/linux/mptcp.h>
21 /* forward declaration */
22 static struct genl_family mptcp_genl_family;
24 static int pm_nl_pernet_id;
26 struct mptcp_pm_add_entry {
27 struct list_head list;
28 struct mptcp_addr_info addr;
30 struct timer_list add_timer;
31 struct mptcp_sock *sock;
35 /* protects pernet updates */
37 struct list_head local_addr_list;
39 unsigned int stale_loss_cnt;
40 unsigned int add_addr_signal_max;
41 unsigned int add_addr_accept_max;
42 unsigned int local_addr_max;
43 unsigned int subflows_max;
45 DECLARE_BITMAP(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
48 #define MPTCP_PM_ADDR_MAX 8
49 #define ADD_ADDR_RETRANS_MAX 3
51 static struct pm_nl_pernet *pm_nl_get_pernet(const struct net *net)
53 return net_generic(net, pm_nl_pernet_id);
56 static struct pm_nl_pernet *
57 pm_nl_get_pernet_from_msk(const struct mptcp_sock *msk)
59 return pm_nl_get_pernet(sock_net((struct sock *)msk));
62 bool mptcp_addresses_equal(const struct mptcp_addr_info *a,
63 const struct mptcp_addr_info *b, bool use_port)
65 bool addr_equals = false;
67 if (a->family == b->family) {
68 if (a->family == AF_INET)
69 addr_equals = a->addr.s_addr == b->addr.s_addr;
70 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
72 addr_equals = !ipv6_addr_cmp(&a->addr6, &b->addr6);
73 } else if (a->family == AF_INET) {
74 if (ipv6_addr_v4mapped(&b->addr6))
75 addr_equals = a->addr.s_addr == b->addr6.s6_addr32[3];
76 } else if (b->family == AF_INET) {
77 if (ipv6_addr_v4mapped(&a->addr6))
78 addr_equals = a->addr6.s6_addr32[3] == b->addr.s_addr;
87 return a->port == b->port;
90 void mptcp_local_address(const struct sock_common *skc, struct mptcp_addr_info *addr)
92 addr->family = skc->skc_family;
93 addr->port = htons(skc->skc_num);
94 if (addr->family == AF_INET)
95 addr->addr.s_addr = skc->skc_rcv_saddr;
96 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
97 else if (addr->family == AF_INET6)
98 addr->addr6 = skc->skc_v6_rcv_saddr;
102 static void remote_address(const struct sock_common *skc,
103 struct mptcp_addr_info *addr)
105 addr->family = skc->skc_family;
106 addr->port = skc->skc_dport;
107 if (addr->family == AF_INET)
108 addr->addr.s_addr = skc->skc_daddr;
109 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
110 else if (addr->family == AF_INET6)
111 addr->addr6 = skc->skc_v6_daddr;
115 static bool lookup_subflow_by_saddr(const struct list_head *list,
116 const struct mptcp_addr_info *saddr)
118 struct mptcp_subflow_context *subflow;
119 struct mptcp_addr_info cur;
120 struct sock_common *skc;
122 list_for_each_entry(subflow, list, node) {
123 skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
125 mptcp_local_address(skc, &cur);
126 if (mptcp_addresses_equal(&cur, saddr, saddr->port))
133 static bool lookup_subflow_by_daddr(const struct list_head *list,
134 const struct mptcp_addr_info *daddr)
136 struct mptcp_subflow_context *subflow;
137 struct mptcp_addr_info cur;
138 struct sock_common *skc;
140 list_for_each_entry(subflow, list, node) {
141 skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
143 remote_address(skc, &cur);
144 if (mptcp_addresses_equal(&cur, daddr, daddr->port))
151 static struct mptcp_pm_addr_entry *
152 select_local_address(const struct pm_nl_pernet *pernet,
153 const struct mptcp_sock *msk)
155 struct mptcp_pm_addr_entry *entry, *ret = NULL;
157 msk_owned_by_me(msk);
160 list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
161 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW))
164 if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap))
174 static struct mptcp_pm_addr_entry *
175 select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk)
177 struct mptcp_pm_addr_entry *entry, *ret = NULL;
180 /* do not keep any additional per socket state, just signal
181 * the address list in order.
182 * Note: removal from the local address list during the msk life-cycle
183 * can lead to additional addresses not being announced.
185 list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
186 if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap))
189 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL))
199 unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk)
201 const struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
203 return READ_ONCE(pernet->add_addr_signal_max);
205 EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_signal_max);
207 unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk)
209 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
211 return READ_ONCE(pernet->add_addr_accept_max);
213 EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_accept_max);
215 unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk)
217 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
219 return READ_ONCE(pernet->subflows_max);
221 EXPORT_SYMBOL_GPL(mptcp_pm_get_subflows_max);
223 unsigned int mptcp_pm_get_local_addr_max(const struct mptcp_sock *msk)
225 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
227 return READ_ONCE(pernet->local_addr_max);
229 EXPORT_SYMBOL_GPL(mptcp_pm_get_local_addr_max);
231 bool mptcp_pm_nl_check_work_pending(struct mptcp_sock *msk)
233 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
235 if (msk->pm.subflows == mptcp_pm_get_subflows_max(msk) ||
236 (find_next_and_bit(pernet->id_bitmap, msk->pm.id_avail_bitmap,
237 MPTCP_PM_MAX_ADDR_ID + 1, 0) == MPTCP_PM_MAX_ADDR_ID + 1)) {
238 WRITE_ONCE(msk->pm.work_pending, false);
244 struct mptcp_pm_add_entry *
245 mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock *msk,
246 const struct mptcp_addr_info *addr)
248 struct mptcp_pm_add_entry *entry;
250 lockdep_assert_held(&msk->pm.lock);
252 list_for_each_entry(entry, &msk->pm.anno_list, list) {
253 if (mptcp_addresses_equal(&entry->addr, addr, true))
260 bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk)
262 struct mptcp_pm_add_entry *entry;
263 struct mptcp_addr_info saddr;
266 mptcp_local_address((struct sock_common *)sk, &saddr);
268 spin_lock_bh(&msk->pm.lock);
269 list_for_each_entry(entry, &msk->pm.anno_list, list) {
270 if (mptcp_addresses_equal(&entry->addr, &saddr, true)) {
277 spin_unlock_bh(&msk->pm.lock);
281 static void mptcp_pm_add_timer(struct timer_list *timer)
283 struct mptcp_pm_add_entry *entry = from_timer(entry, timer, add_timer);
284 struct mptcp_sock *msk = entry->sock;
285 struct sock *sk = (struct sock *)msk;
287 pr_debug("msk=%p", msk);
292 if (inet_sk_state_load(sk) == TCP_CLOSE)
298 if (mptcp_pm_should_add_signal_addr(msk)) {
299 sk_reset_timer(sk, timer, jiffies + TCP_RTO_MAX / 8);
303 spin_lock_bh(&msk->pm.lock);
305 if (!mptcp_pm_should_add_signal_addr(msk)) {
306 pr_debug("retransmit ADD_ADDR id=%d", entry->addr.id);
307 mptcp_pm_announce_addr(msk, &entry->addr, false);
308 mptcp_pm_add_addr_send_ack(msk);
309 entry->retrans_times++;
312 if (entry->retrans_times < ADD_ADDR_RETRANS_MAX)
313 sk_reset_timer(sk, timer,
314 jiffies + mptcp_get_add_addr_timeout(sock_net(sk)));
316 spin_unlock_bh(&msk->pm.lock);
318 if (entry->retrans_times == ADD_ADDR_RETRANS_MAX)
319 mptcp_pm_subflow_established(msk);
325 struct mptcp_pm_add_entry *
326 mptcp_pm_del_add_timer(struct mptcp_sock *msk,
327 const struct mptcp_addr_info *addr, bool check_id)
329 struct mptcp_pm_add_entry *entry;
330 struct sock *sk = (struct sock *)msk;
332 spin_lock_bh(&msk->pm.lock);
333 entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
334 if (entry && (!check_id || entry->addr.id == addr->id))
335 entry->retrans_times = ADD_ADDR_RETRANS_MAX;
336 spin_unlock_bh(&msk->pm.lock);
338 if (entry && (!check_id || entry->addr.id == addr->id))
339 sk_stop_timer_sync(sk, &entry->add_timer);
344 bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
345 const struct mptcp_addr_info *addr)
347 struct mptcp_pm_add_entry *add_entry = NULL;
348 struct sock *sk = (struct sock *)msk;
349 struct net *net = sock_net(sk);
351 lockdep_assert_held(&msk->pm.lock);
353 add_entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
356 if (mptcp_pm_is_kernel(msk))
359 sk_reset_timer(sk, &add_entry->add_timer,
360 jiffies + mptcp_get_add_addr_timeout(net));
364 add_entry = kmalloc(sizeof(*add_entry), GFP_ATOMIC);
368 list_add(&add_entry->list, &msk->pm.anno_list);
370 add_entry->addr = *addr;
371 add_entry->sock = msk;
372 add_entry->retrans_times = 0;
374 timer_setup(&add_entry->add_timer, mptcp_pm_add_timer, 0);
375 sk_reset_timer(sk, &add_entry->add_timer,
376 jiffies + mptcp_get_add_addr_timeout(net));
381 void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
383 struct mptcp_pm_add_entry *entry, *tmp;
384 struct sock *sk = (struct sock *)msk;
385 LIST_HEAD(free_list);
387 pr_debug("msk=%p", msk);
389 spin_lock_bh(&msk->pm.lock);
390 list_splice_init(&msk->pm.anno_list, &free_list);
391 spin_unlock_bh(&msk->pm.lock);
393 list_for_each_entry_safe(entry, tmp, &free_list, list) {
394 sk_stop_timer_sync(sk, &entry->add_timer);
399 /* Fill all the remote addresses into the array addrs[],
400 * and return the array size.
402 static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk,
403 struct mptcp_addr_info *local,
405 struct mptcp_addr_info *addrs)
407 bool deny_id0 = READ_ONCE(msk->pm.remote_deny_join_id0);
408 struct sock *sk = (struct sock *)msk, *ssk;
409 struct mptcp_subflow_context *subflow;
410 struct mptcp_addr_info remote = { 0 };
411 unsigned int subflows_max;
414 subflows_max = mptcp_pm_get_subflows_max(msk);
415 remote_address((struct sock_common *)sk, &remote);
417 /* Non-fullmesh endpoint, fill in the single entry
418 * corresponding to the primary MPC subflow remote address
424 if (!mptcp_pm_addr_families_match(sk, local, &remote))
430 DECLARE_BITMAP(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1);
432 /* Forbid creation of new subflows matching existing
433 * ones, possibly already created by incoming ADD_ADDR
435 bitmap_zero(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1);
436 mptcp_for_each_subflow(msk, subflow)
437 if (READ_ONCE(subflow->local_id) == local->id)
438 __set_bit(subflow->remote_id, unavail_id);
440 mptcp_for_each_subflow(msk, subflow) {
441 ssk = mptcp_subflow_tcp_sock(subflow);
442 remote_address((struct sock_common *)ssk, &addrs[i]);
443 addrs[i].id = READ_ONCE(subflow->remote_id);
444 if (deny_id0 && !addrs[i].id)
447 if (test_bit(addrs[i].id, unavail_id))
450 if (!mptcp_pm_addr_families_match(sk, local, &addrs[i]))
453 if (msk->pm.subflows < subflows_max) {
454 /* forbid creating multiple address towards
457 __set_bit(addrs[i].id, unavail_id);
467 static void __mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
468 bool prio, bool backup)
470 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
473 pr_debug("send ack for %s",
474 prio ? "mp_prio" : (mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr"));
476 slow = lock_sock_fast(ssk);
478 subflow->send_mp_prio = 1;
479 subflow->backup = backup;
480 subflow->request_bkup = backup;
483 __mptcp_subflow_send_ack(ssk);
484 unlock_sock_fast(ssk, slow);
487 static void mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
488 bool prio, bool backup)
490 spin_unlock_bh(&msk->pm.lock);
491 __mptcp_pm_send_ack(msk, subflow, prio, backup);
492 spin_lock_bh(&msk->pm.lock);
495 static struct mptcp_pm_addr_entry *
496 __lookup_addr_by_id(struct pm_nl_pernet *pernet, unsigned int id)
498 struct mptcp_pm_addr_entry *entry;
500 list_for_each_entry(entry, &pernet->local_addr_list, list) {
501 if (entry->addr.id == id)
507 static struct mptcp_pm_addr_entry *
508 __lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info,
511 struct mptcp_pm_addr_entry *entry;
513 list_for_each_entry(entry, &pernet->local_addr_list, list) {
514 if ((!lookup_by_id &&
515 mptcp_addresses_equal(&entry->addr, info, entry->addr.port)) ||
516 (lookup_by_id && entry->addr.id == info->id))
522 static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
524 struct sock *sk = (struct sock *)msk;
525 struct mptcp_pm_addr_entry *local;
526 unsigned int add_addr_signal_max;
527 unsigned int local_addr_max;
528 struct pm_nl_pernet *pernet;
529 unsigned int subflows_max;
531 pernet = pm_nl_get_pernet(sock_net(sk));
533 add_addr_signal_max = mptcp_pm_get_add_addr_signal_max(msk);
534 local_addr_max = mptcp_pm_get_local_addr_max(msk);
535 subflows_max = mptcp_pm_get_subflows_max(msk);
537 /* do lazy endpoint usage accounting for the MPC subflows */
538 if (unlikely(!(msk->pm.status & BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED))) && msk->first) {
539 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(msk->first);
540 struct mptcp_pm_addr_entry *entry;
541 struct mptcp_addr_info mpc_addr;
544 mptcp_local_address((struct sock_common *)msk->first, &mpc_addr);
546 entry = __lookup_addr(pernet, &mpc_addr, false);
548 __clear_bit(entry->addr.id, msk->pm.id_avail_bitmap);
549 msk->mpc_endpoint_id = entry->addr.id;
550 backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
555 mptcp_pm_send_ack(msk, subflow, true, backup);
557 msk->pm.status |= BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED);
560 pr_debug("local %d:%d signal %d:%d subflows %d:%d\n",
561 msk->pm.local_addr_used, local_addr_max,
562 msk->pm.add_addr_signaled, add_addr_signal_max,
563 msk->pm.subflows, subflows_max);
565 /* check first for announce */
566 if (msk->pm.add_addr_signaled < add_addr_signal_max) {
567 local = select_signal_address(pernet, msk);
569 /* due to racing events on both ends we can reach here while
570 * previous add address is still running: if we invoke now
571 * mptcp_pm_announce_addr(), that will fail and the
572 * corresponding id will be marked as used.
573 * Instead let the PM machinery reschedule us when the
574 * current address announce will be completed.
576 if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL))
580 if (mptcp_pm_alloc_anno_list(msk, &local->addr)) {
581 __clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
582 msk->pm.add_addr_signaled++;
583 mptcp_pm_announce_addr(msk, &local->addr, false);
584 mptcp_pm_nl_addr_send_ack(msk);
589 /* check if should create a new subflow */
590 while (msk->pm.local_addr_used < local_addr_max &&
591 msk->pm.subflows < subflows_max) {
592 struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX];
596 local = select_local_address(pernet, msk);
600 fullmesh = !!(local->flags & MPTCP_PM_ADDR_FLAG_FULLMESH);
602 msk->pm.local_addr_used++;
603 __clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
604 nr = fill_remote_addresses_vec(msk, &local->addr, fullmesh, addrs);
608 spin_unlock_bh(&msk->pm.lock);
609 for (i = 0; i < nr; i++)
610 __mptcp_subflow_connect(sk, &local->addr, &addrs[i]);
611 spin_lock_bh(&msk->pm.lock);
613 mptcp_pm_nl_check_work_pending(msk);
616 static void mptcp_pm_nl_fully_established(struct mptcp_sock *msk)
618 mptcp_pm_create_subflow_or_signal_addr(msk);
621 static void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk)
623 mptcp_pm_create_subflow_or_signal_addr(msk);
626 /* Fill all the local addresses into the array addrs[],
627 * and return the array size.
629 static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
630 struct mptcp_addr_info *remote,
631 struct mptcp_addr_info *addrs)
633 struct sock *sk = (struct sock *)msk;
634 struct mptcp_pm_addr_entry *entry;
635 struct pm_nl_pernet *pernet;
636 unsigned int subflows_max;
639 pernet = pm_nl_get_pernet_from_msk(msk);
640 subflows_max = mptcp_pm_get_subflows_max(msk);
643 list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
644 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH))
647 if (!mptcp_pm_addr_families_match(sk, &entry->addr, remote))
650 if (msk->pm.subflows < subflows_max) {
652 addrs[i++] = entry->addr;
657 /* If the array is empty, fill in the single
658 * 'IPADDRANY' local address
661 struct mptcp_addr_info local;
663 memset(&local, 0, sizeof(local));
665 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
666 remote->family == AF_INET6 &&
667 ipv6_addr_v4mapped(&remote->addr6) ? AF_INET :
671 if (!mptcp_pm_addr_families_match(sk, &local, remote))
681 static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
683 struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX];
684 struct sock *sk = (struct sock *)msk;
685 unsigned int add_addr_accept_max;
686 struct mptcp_addr_info remote;
687 unsigned int subflows_max;
690 add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
691 subflows_max = mptcp_pm_get_subflows_max(msk);
693 pr_debug("accepted %d:%d remote family %d",
694 msk->pm.add_addr_accepted, add_addr_accept_max,
695 msk->pm.remote.family);
697 remote = msk->pm.remote;
698 mptcp_pm_announce_addr(msk, &remote, true);
699 mptcp_pm_nl_addr_send_ack(msk);
701 if (lookup_subflow_by_daddr(&msk->conn_list, &remote))
704 /* pick id 0 port, if none is provided the remote address */
706 remote.port = sk->sk_dport;
708 /* connect to the specified remote address, using whatever
709 * local address the routing configuration will pick.
711 nr = fill_local_addresses_vec(msk, &remote, addrs);
715 msk->pm.add_addr_accepted++;
716 if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
717 msk->pm.subflows >= subflows_max)
718 WRITE_ONCE(msk->pm.accept_addr, false);
720 spin_unlock_bh(&msk->pm.lock);
721 for (i = 0; i < nr; i++)
722 __mptcp_subflow_connect(sk, &addrs[i], &remote);
723 spin_lock_bh(&msk->pm.lock);
726 void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
728 struct mptcp_subflow_context *subflow;
730 msk_owned_by_me(msk);
731 lockdep_assert_held(&msk->pm.lock);
733 if (!mptcp_pm_should_add_signal(msk) &&
734 !mptcp_pm_should_rm_signal(msk))
737 subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node);
739 mptcp_pm_send_ack(msk, subflow, false, false);
742 int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
743 struct mptcp_addr_info *addr,
744 struct mptcp_addr_info *rem,
747 struct mptcp_subflow_context *subflow;
749 pr_debug("bkup=%d", bkup);
751 mptcp_for_each_subflow(msk, subflow) {
752 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
753 struct mptcp_addr_info local, remote;
755 mptcp_local_address((struct sock_common *)ssk, &local);
756 if (!mptcp_addresses_equal(&local, addr, addr->port))
759 if (rem && rem->family != AF_UNSPEC) {
760 remote_address((struct sock_common *)ssk, &remote);
761 if (!mptcp_addresses_equal(&remote, rem, rem->port))
765 __mptcp_pm_send_ack(msk, subflow, true, bkup);
772 static bool mptcp_local_id_match(const struct mptcp_sock *msk, u8 local_id, u8 id)
774 return local_id == id || (!local_id && msk->mpc_endpoint_id == id);
777 static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
778 const struct mptcp_rm_list *rm_list,
779 enum linux_mptcp_mib_field rm_type)
781 struct mptcp_subflow_context *subflow, *tmp;
782 struct sock *sk = (struct sock *)msk;
785 pr_debug("%s rm_list_nr %d",
786 rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", rm_list->nr);
788 msk_owned_by_me(msk);
790 if (sk->sk_state == TCP_LISTEN)
796 if (list_empty(&msk->conn_list))
799 for (i = 0; i < rm_list->nr; i++) {
800 u8 rm_id = rm_list->ids[i];
801 bool removed = false;
803 mptcp_for_each_subflow_safe(msk, subflow, tmp) {
804 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
805 u8 remote_id = READ_ONCE(subflow->remote_id);
806 int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
807 u8 id = subflow_get_local_id(subflow);
809 if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id)
811 if (rm_type == MPTCP_MIB_RMSUBFLOW && !mptcp_local_id_match(msk, id, rm_id))
814 pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u",
815 rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow",
816 i, rm_id, id, remote_id, msk->mpc_endpoint_id);
817 spin_unlock_bh(&msk->pm.lock);
818 mptcp_subflow_shutdown(sk, ssk, how);
820 /* the following takes care of updating the subflows counter */
821 mptcp_close_ssk(sk, ssk, subflow);
822 spin_lock_bh(&msk->pm.lock);
825 __MPTCP_INC_STATS(sock_net(sk), rm_type);
827 if (rm_type == MPTCP_MIB_RMSUBFLOW)
828 __set_bit(rm_id ? rm_id : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap);
832 if (!mptcp_pm_is_kernel(msk))
835 if (rm_type == MPTCP_MIB_RMADDR) {
836 msk->pm.add_addr_accepted--;
837 WRITE_ONCE(msk->pm.accept_addr, true);
838 } else if (rm_type == MPTCP_MIB_RMSUBFLOW) {
839 msk->pm.local_addr_used--;
844 static void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk)
846 mptcp_pm_nl_rm_addr_or_subflow(msk, &msk->pm.rm_list_rx, MPTCP_MIB_RMADDR);
849 void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk,
850 const struct mptcp_rm_list *rm_list)
852 mptcp_pm_nl_rm_addr_or_subflow(msk, rm_list, MPTCP_MIB_RMSUBFLOW);
855 void mptcp_pm_nl_work(struct mptcp_sock *msk)
857 struct mptcp_pm_data *pm = &msk->pm;
859 msk_owned_by_me(msk);
861 if (!(pm->status & MPTCP_PM_WORK_MASK))
864 spin_lock_bh(&msk->pm.lock);
866 pr_debug("msk=%p status=%x", msk, pm->status);
867 if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
868 pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
869 mptcp_pm_nl_add_addr_received(msk);
871 if (pm->status & BIT(MPTCP_PM_ADD_ADDR_SEND_ACK)) {
872 pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_SEND_ACK);
873 mptcp_pm_nl_addr_send_ack(msk);
875 if (pm->status & BIT(MPTCP_PM_RM_ADDR_RECEIVED)) {
876 pm->status &= ~BIT(MPTCP_PM_RM_ADDR_RECEIVED);
877 mptcp_pm_nl_rm_addr_received(msk);
879 if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) {
880 pm->status &= ~BIT(MPTCP_PM_ESTABLISHED);
881 mptcp_pm_nl_fully_established(msk);
883 if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) {
884 pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED);
885 mptcp_pm_nl_subflow_established(msk);
888 spin_unlock_bh(&msk->pm.lock);
891 static bool address_use_port(struct mptcp_pm_addr_entry *entry)
893 return (entry->flags &
894 (MPTCP_PM_ADDR_FLAG_SIGNAL | MPTCP_PM_ADDR_FLAG_SUBFLOW)) ==
895 MPTCP_PM_ADDR_FLAG_SIGNAL;
898 /* caller must ensure the RCU grace period is already elapsed */
899 static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry)
902 sock_release(entry->lsk);
906 static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
907 struct mptcp_pm_addr_entry *entry,
910 struct mptcp_pm_addr_entry *cur, *del_entry = NULL;
911 unsigned int addr_max;
914 spin_lock_bh(&pernet->lock);
915 /* to keep the code simple, don't do IDR-like allocation for address ID,
916 * just bail when we exceed limits
918 if (pernet->next_id == MPTCP_PM_MAX_ADDR_ID)
920 if (pernet->addrs >= MPTCP_PM_ADDR_MAX) {
924 if (test_bit(entry->addr.id, pernet->id_bitmap)) {
929 /* do not insert duplicate address, differentiate on port only
932 if (!address_use_port(entry))
933 entry->addr.port = 0;
934 list_for_each_entry(cur, &pernet->local_addr_list, list) {
935 if (mptcp_addresses_equal(&cur->addr, &entry->addr,
936 cur->addr.port || entry->addr.port)) {
937 /* allow replacing the exiting endpoint only if such
938 * endpoint is an implicit one and the user-space
939 * did not provide an endpoint id
941 if (!(cur->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT)) {
949 entry->addr.id = cur->addr.id;
950 list_del_rcu(&cur->list);
956 if (!entry->addr.id && needs_id) {
958 entry->addr.id = find_next_zero_bit(pernet->id_bitmap,
959 MPTCP_PM_MAX_ADDR_ID + 1,
961 if (!entry->addr.id && pernet->next_id != 1) {
967 if (!entry->addr.id && needs_id)
970 __set_bit(entry->addr.id, pernet->id_bitmap);
971 if (entry->addr.id > pernet->next_id)
972 pernet->next_id = entry->addr.id;
974 if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL) {
975 addr_max = pernet->add_addr_signal_max;
976 WRITE_ONCE(pernet->add_addr_signal_max, addr_max + 1);
978 if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) {
979 addr_max = pernet->local_addr_max;
980 WRITE_ONCE(pernet->local_addr_max, addr_max + 1);
984 if (!entry->addr.port)
985 list_add_tail_rcu(&entry->list, &pernet->local_addr_list);
987 list_add_rcu(&entry->list, &pernet->local_addr_list);
988 ret = entry->addr.id;
991 spin_unlock_bh(&pernet->lock);
993 /* just replaced an existing entry, free it */
996 __mptcp_pm_release_addr_entry(del_entry);
1001 static struct lock_class_key mptcp_slock_keys[2];
1002 static struct lock_class_key mptcp_keys[2];
1004 static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
1005 struct mptcp_pm_addr_entry *entry)
1007 bool is_ipv6 = sk->sk_family == AF_INET6;
1008 int addrlen = sizeof(struct sockaddr_in);
1009 struct sockaddr_storage addr;
1010 struct sock *newsk, *ssk;
1014 err = sock_create_kern(sock_net(sk), entry->addr.family,
1015 SOCK_STREAM, IPPROTO_MPTCP, &entry->lsk);
1019 newsk = entry->lsk->sk;
1023 /* The subflow socket lock is acquired in a nested to the msk one
1024 * in several places, even by the TCP stack, and this msk is a kernel
1025 * socket: lockdep complains. Instead of propagating the _nested
1026 * modifiers in several places, re-init the lock class for the msk
1027 * socket to an mptcp specific one.
1029 sock_lock_init_class_and_name(newsk,
1030 is_ipv6 ? "mlock-AF_INET6" : "mlock-AF_INET",
1031 &mptcp_slock_keys[is_ipv6],
1032 is_ipv6 ? "msk_lock-AF_INET6" : "msk_lock-AF_INET",
1033 &mptcp_keys[is_ipv6]);
1036 ssk = __mptcp_nmpc_sk(mptcp_sk(newsk));
1037 release_sock(newsk);
1039 return PTR_ERR(ssk);
1041 mptcp_info2sockaddr(&entry->addr, &addr, entry->addr.family);
1042 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1043 if (entry->addr.family == AF_INET6)
1044 addrlen = sizeof(struct sockaddr_in6);
1046 if (ssk->sk_family == AF_INET)
1047 err = inet_bind_sk(ssk, (struct sockaddr *)&addr, addrlen);
1048 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1049 else if (ssk->sk_family == AF_INET6)
1050 err = inet6_bind_sk(ssk, (struct sockaddr *)&addr, addrlen);
1055 /* We don't use mptcp_set_state() here because it needs to be called
1056 * under the msk socket lock. For the moment, that will not bring
1057 * anything more than only calling inet_sk_state_store(), because the
1058 * old status is known (TCP_CLOSE).
1060 inet_sk_state_store(newsk, TCP_LISTEN);
1062 err = __inet_listen_sk(ssk, backlog);
1064 mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CREATED);
1069 int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc)
1071 struct mptcp_pm_addr_entry *entry;
1072 struct pm_nl_pernet *pernet;
1075 pernet = pm_nl_get_pernet_from_msk(msk);
1078 list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
1079 if (mptcp_addresses_equal(&entry->addr, skc, entry->addr.port)) {
1080 ret = entry->addr.id;
1088 /* address not found, add to local list */
1089 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
1095 entry->addr.port = 0;
1097 entry->flags = MPTCP_PM_ADDR_FLAG_IMPLICIT;
1099 ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true);
1106 #define MPTCP_PM_CMD_GRP_OFFSET 0
1107 #define MPTCP_PM_EV_GRP_OFFSET 1
1109 static const struct genl_multicast_group mptcp_pm_mcgrps[] = {
1110 [MPTCP_PM_CMD_GRP_OFFSET] = { .name = MPTCP_PM_CMD_GRP_NAME, },
1111 [MPTCP_PM_EV_GRP_OFFSET] = { .name = MPTCP_PM_EV_GRP_NAME,
1112 .flags = GENL_MCAST_CAP_NET_ADMIN,
1116 void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
1118 struct mptcp_subflow_context *iter, *subflow = mptcp_subflow_ctx(ssk);
1119 struct sock *sk = (struct sock *)msk;
1120 unsigned int active_max_loss_cnt;
1121 struct net *net = sock_net(sk);
1122 unsigned int stale_loss_cnt;
1125 stale_loss_cnt = mptcp_stale_loss_cnt(net);
1126 if (subflow->stale || !stale_loss_cnt || subflow->stale_count <= stale_loss_cnt)
1129 /* look for another available subflow not in loss state */
1130 active_max_loss_cnt = max_t(int, stale_loss_cnt - 1, 1);
1131 mptcp_for_each_subflow(msk, iter) {
1132 if (iter != subflow && mptcp_subflow_active(iter) &&
1133 iter->stale_count < active_max_loss_cnt) {
1134 /* we have some alternatives, try to mark this subflow as idle ...*/
1135 slow = lock_sock_fast(ssk);
1136 if (!tcp_rtx_and_write_queues_empty(ssk)) {
1138 __mptcp_retransmit_pending_data(sk);
1139 MPTCP_INC_STATS(net, MPTCP_MIB_SUBFLOWSTALE);
1141 unlock_sock_fast(ssk, slow);
1143 /* always try to push the pending data regardless of re-injections:
1144 * we can possibly use backup subflows now, and subflow selection
1145 * is cheap under the msk socket lock
1147 __mptcp_push_pending(sk, 0);
1153 static int mptcp_pm_family_to_addr(int family)
1155 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1156 if (family == AF_INET6)
1157 return MPTCP_PM_ADDR_ATTR_ADDR6;
1159 return MPTCP_PM_ADDR_ATTR_ADDR4;
1162 static int mptcp_pm_parse_pm_addr_attr(struct nlattr *tb[],
1163 const struct nlattr *attr,
1164 struct genl_info *info,
1165 struct mptcp_addr_info *addr,
1166 bool require_family)
1171 GENL_SET_ERR_MSG(info, "missing address info");
1175 /* no validation needed - was already done via nested policy */
1176 err = nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr,
1177 mptcp_pm_address_nl_policy, info->extack);
1181 if (tb[MPTCP_PM_ADDR_ATTR_ID])
1182 addr->id = nla_get_u8(tb[MPTCP_PM_ADDR_ATTR_ID]);
1184 if (!tb[MPTCP_PM_ADDR_ATTR_FAMILY]) {
1185 if (!require_family)
1188 NL_SET_ERR_MSG_ATTR(info->extack, attr,
1193 addr->family = nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_FAMILY]);
1194 if (addr->family != AF_INET
1195 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1196 && addr->family != AF_INET6
1199 NL_SET_ERR_MSG_ATTR(info->extack, attr,
1200 "unknown address family");
1203 addr_addr = mptcp_pm_family_to_addr(addr->family);
1204 if (!tb[addr_addr]) {
1205 NL_SET_ERR_MSG_ATTR(info->extack, attr,
1206 "missing address data");
1210 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1211 if (addr->family == AF_INET6)
1212 addr->addr6 = nla_get_in6_addr(tb[addr_addr]);
1215 addr->addr.s_addr = nla_get_in_addr(tb[addr_addr]);
1217 if (tb[MPTCP_PM_ADDR_ATTR_PORT])
1218 addr->port = htons(nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_PORT]));
1223 int mptcp_pm_parse_addr(struct nlattr *attr, struct genl_info *info,
1224 struct mptcp_addr_info *addr)
1226 struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1];
1228 memset(addr, 0, sizeof(*addr));
1230 return mptcp_pm_parse_pm_addr_attr(tb, attr, info, addr, true);
1233 int mptcp_pm_parse_entry(struct nlattr *attr, struct genl_info *info,
1234 bool require_family,
1235 struct mptcp_pm_addr_entry *entry)
1237 struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1];
1240 memset(entry, 0, sizeof(*entry));
1242 err = mptcp_pm_parse_pm_addr_attr(tb, attr, info, &entry->addr, require_family);
1246 if (tb[MPTCP_PM_ADDR_ATTR_IF_IDX]) {
1247 u32 val = nla_get_s32(tb[MPTCP_PM_ADDR_ATTR_IF_IDX]);
1249 entry->ifindex = val;
1252 if (tb[MPTCP_PM_ADDR_ATTR_FLAGS])
1253 entry->flags = nla_get_u32(tb[MPTCP_PM_ADDR_ATTR_FLAGS]);
1255 if (tb[MPTCP_PM_ADDR_ATTR_PORT])
1256 entry->addr.port = htons(nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_PORT]));
1261 static struct pm_nl_pernet *genl_info_pm_nl(struct genl_info *info)
1263 return pm_nl_get_pernet(genl_info_net(info));
1266 static int mptcp_nl_add_subflow_or_signal_addr(struct net *net)
1268 struct mptcp_sock *msk;
1269 long s_slot = 0, s_num = 0;
1271 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
1272 struct sock *sk = (struct sock *)msk;
1274 if (!READ_ONCE(msk->fully_established) ||
1275 mptcp_pm_is_userspace(msk))
1279 spin_lock_bh(&msk->pm.lock);
1280 mptcp_pm_create_subflow_or_signal_addr(msk);
1281 spin_unlock_bh(&msk->pm.lock);
1292 static bool mptcp_pm_has_addr_attr_id(const struct nlattr *attr,
1293 struct genl_info *info)
1295 struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1];
1297 if (!nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr,
1298 mptcp_pm_address_nl_policy, info->extack) &&
1299 tb[MPTCP_PM_ADDR_ATTR_ID])
1304 int mptcp_pm_nl_add_addr_doit(struct sk_buff *skb, struct genl_info *info)
1306 struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR];
1307 struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
1308 struct mptcp_pm_addr_entry addr, *entry;
1311 ret = mptcp_pm_parse_entry(attr, info, true, &addr);
1315 if (addr.addr.port && !(addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) {
1316 GENL_SET_ERR_MSG(info, "flags must have signal when using port");
1320 if (addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL &&
1321 addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) {
1322 GENL_SET_ERR_MSG(info, "flags mustn't have both signal and fullmesh");
1326 if (addr.flags & MPTCP_PM_ADDR_FLAG_IMPLICIT) {
1327 GENL_SET_ERR_MSG(info, "can't create IMPLICIT endpoint");
1331 entry = kzalloc(sizeof(*entry), GFP_KERNEL_ACCOUNT);
1333 GENL_SET_ERR_MSG(info, "can't allocate addr");
1338 if (entry->addr.port) {
1339 ret = mptcp_pm_nl_create_listen_socket(skb->sk, entry);
1341 GENL_SET_ERR_MSG_FMT(info, "create listen socket error: %d", ret);
1345 ret = mptcp_pm_nl_append_new_local_addr(pernet, entry,
1346 !mptcp_pm_has_addr_attr_id(attr, info));
1348 GENL_SET_ERR_MSG_FMT(info, "too many addresses or duplicate one: %d", ret);
1352 mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk));
1356 __mptcp_pm_release_addr_entry(entry);
1360 int mptcp_pm_nl_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id,
1361 u8 *flags, int *ifindex)
1363 struct mptcp_pm_addr_entry *entry;
1364 struct sock *sk = (struct sock *)msk;
1365 struct net *net = sock_net(sk);
1368 entry = __lookup_addr_by_id(pm_nl_get_pernet(net), id);
1370 *flags = entry->flags;
1371 *ifindex = entry->ifindex;
1378 static bool remove_anno_list_by_saddr(struct mptcp_sock *msk,
1379 const struct mptcp_addr_info *addr)
1381 struct mptcp_pm_add_entry *entry;
1383 entry = mptcp_pm_del_add_timer(msk, addr, false);
1385 list_del(&entry->list);
1393 static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
1394 const struct mptcp_addr_info *addr,
1397 struct mptcp_rm_list list = { .nr = 0 };
1400 list.ids[list.nr++] = addr->id;
1402 ret = remove_anno_list_by_saddr(msk, addr);
1404 spin_lock_bh(&msk->pm.lock);
1405 mptcp_pm_remove_addr(msk, &list);
1406 spin_unlock_bh(&msk->pm.lock);
1411 static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
1412 const struct mptcp_pm_addr_entry *entry)
1414 const struct mptcp_addr_info *addr = &entry->addr;
1415 struct mptcp_rm_list list = { .nr = 0 };
1416 long s_slot = 0, s_num = 0;
1417 struct mptcp_sock *msk;
1419 pr_debug("remove_id=%d", addr->id);
1421 list.ids[list.nr++] = addr->id;
1423 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
1424 struct sock *sk = (struct sock *)msk;
1425 bool remove_subflow;
1427 if (mptcp_pm_is_userspace(msk))
1430 if (list_empty(&msk->conn_list)) {
1431 mptcp_pm_remove_anno_addr(msk, addr, false);
1436 remove_subflow = lookup_subflow_by_saddr(&msk->conn_list, addr);
1437 mptcp_pm_remove_anno_addr(msk, addr, remove_subflow &&
1438 !(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT));
1440 mptcp_pm_remove_subflow(msk, &list);
1451 static int mptcp_nl_remove_id_zero_address(struct net *net,
1452 struct mptcp_addr_info *addr)
1454 struct mptcp_rm_list list = { .nr = 0 };
1455 long s_slot = 0, s_num = 0;
1456 struct mptcp_sock *msk;
1458 list.ids[list.nr++] = 0;
1460 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
1461 struct sock *sk = (struct sock *)msk;
1462 struct mptcp_addr_info msk_local;
1464 if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk))
1467 mptcp_local_address((struct sock_common *)msk, &msk_local);
1468 if (!mptcp_addresses_equal(&msk_local, addr, addr->port))
1472 spin_lock_bh(&msk->pm.lock);
1473 mptcp_pm_remove_addr(msk, &list);
1474 mptcp_pm_nl_rm_subflow_received(msk, &list);
1475 spin_unlock_bh(&msk->pm.lock);
1486 int mptcp_pm_nl_del_addr_doit(struct sk_buff *skb, struct genl_info *info)
1488 struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR];
1489 struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
1490 struct mptcp_pm_addr_entry addr, *entry;
1491 unsigned int addr_max;
1494 ret = mptcp_pm_parse_entry(attr, info, false, &addr);
1498 /* the zero id address is special: the first address used by the msk
1499 * always gets such an id, so different subflows can have different zero
1500 * id addresses. Additionally zero id is not accounted for in id_bitmap.
1501 * Let's use an 'mptcp_rm_list' instead of the common remove code.
1503 if (addr.addr.id == 0)
1504 return mptcp_nl_remove_id_zero_address(sock_net(skb->sk), &addr.addr);
1506 spin_lock_bh(&pernet->lock);
1507 entry = __lookup_addr_by_id(pernet, addr.addr.id);
1509 GENL_SET_ERR_MSG(info, "address not found");
1510 spin_unlock_bh(&pernet->lock);
1513 if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL) {
1514 addr_max = pernet->add_addr_signal_max;
1515 WRITE_ONCE(pernet->add_addr_signal_max, addr_max - 1);
1517 if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) {
1518 addr_max = pernet->local_addr_max;
1519 WRITE_ONCE(pernet->local_addr_max, addr_max - 1);
1523 list_del_rcu(&entry->list);
1524 __clear_bit(entry->addr.id, pernet->id_bitmap);
1525 spin_unlock_bh(&pernet->lock);
1527 mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), entry);
1529 __mptcp_pm_release_addr_entry(entry);
1534 void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
1536 struct mptcp_rm_list alist = { .nr = 0 };
1537 struct mptcp_pm_addr_entry *entry;
1539 list_for_each_entry(entry, rm_list, list) {
1540 if ((remove_anno_list_by_saddr(msk, &entry->addr) ||
1541 lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) &&
1542 alist.nr < MPTCP_RM_IDS_MAX)
1543 alist.ids[alist.nr++] = entry->addr.id;
1547 spin_lock_bh(&msk->pm.lock);
1548 mptcp_pm_remove_addr(msk, &alist);
1549 spin_unlock_bh(&msk->pm.lock);
1553 void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
1554 struct list_head *rm_list)
1556 struct mptcp_rm_list alist = { .nr = 0 }, slist = { .nr = 0 };
1557 struct mptcp_pm_addr_entry *entry;
1559 list_for_each_entry(entry, rm_list, list) {
1560 if (lookup_subflow_by_saddr(&msk->conn_list, &entry->addr) &&
1561 slist.nr < MPTCP_RM_IDS_MAX)
1562 slist.ids[slist.nr++] = entry->addr.id;
1564 if (remove_anno_list_by_saddr(msk, &entry->addr) &&
1565 alist.nr < MPTCP_RM_IDS_MAX)
1566 alist.ids[alist.nr++] = entry->addr.id;
1570 spin_lock_bh(&msk->pm.lock);
1571 mptcp_pm_remove_addr(msk, &alist);
1572 spin_unlock_bh(&msk->pm.lock);
1575 mptcp_pm_remove_subflow(msk, &slist);
1578 static void mptcp_nl_remove_addrs_list(struct net *net,
1579 struct list_head *rm_list)
1581 long s_slot = 0, s_num = 0;
1582 struct mptcp_sock *msk;
1584 if (list_empty(rm_list))
1587 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
1588 struct sock *sk = (struct sock *)msk;
1590 if (!mptcp_pm_is_userspace(msk)) {
1592 mptcp_pm_remove_addrs_and_subflows(msk, rm_list);
1601 /* caller must ensure the RCU grace period is already elapsed */
1602 static void __flush_addrs(struct list_head *list)
1604 while (!list_empty(list)) {
1605 struct mptcp_pm_addr_entry *cur;
1607 cur = list_entry(list->next,
1608 struct mptcp_pm_addr_entry, list);
1609 list_del_rcu(&cur->list);
1610 __mptcp_pm_release_addr_entry(cur);
1614 static void __reset_counters(struct pm_nl_pernet *pernet)
1616 WRITE_ONCE(pernet->add_addr_signal_max, 0);
1617 WRITE_ONCE(pernet->add_addr_accept_max, 0);
1618 WRITE_ONCE(pernet->local_addr_max, 0);
1622 int mptcp_pm_nl_flush_addrs_doit(struct sk_buff *skb, struct genl_info *info)
1624 struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
1625 LIST_HEAD(free_list);
1627 spin_lock_bh(&pernet->lock);
1628 list_splice_init(&pernet->local_addr_list, &free_list);
1629 __reset_counters(pernet);
1630 pernet->next_id = 1;
1631 bitmap_zero(pernet->id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
1632 spin_unlock_bh(&pernet->lock);
1633 mptcp_nl_remove_addrs_list(sock_net(skb->sk), &free_list);
1635 __flush_addrs(&free_list);
1639 static int mptcp_nl_fill_addr(struct sk_buff *skb,
1640 struct mptcp_pm_addr_entry *entry)
1642 struct mptcp_addr_info *addr = &entry->addr;
1643 struct nlattr *attr;
1645 attr = nla_nest_start(skb, MPTCP_PM_ATTR_ADDR);
1649 if (nla_put_u16(skb, MPTCP_PM_ADDR_ATTR_FAMILY, addr->family))
1650 goto nla_put_failure;
1651 if (nla_put_u16(skb, MPTCP_PM_ADDR_ATTR_PORT, ntohs(addr->port)))
1652 goto nla_put_failure;
1653 if (nla_put_u8(skb, MPTCP_PM_ADDR_ATTR_ID, addr->id))
1654 goto nla_put_failure;
1655 if (nla_put_u32(skb, MPTCP_PM_ADDR_ATTR_FLAGS, entry->flags))
1656 goto nla_put_failure;
1657 if (entry->ifindex &&
1658 nla_put_s32(skb, MPTCP_PM_ADDR_ATTR_IF_IDX, entry->ifindex))
1659 goto nla_put_failure;
1661 if (addr->family == AF_INET &&
1662 nla_put_in_addr(skb, MPTCP_PM_ADDR_ATTR_ADDR4,
1664 goto nla_put_failure;
1665 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1666 else if (addr->family == AF_INET6 &&
1667 nla_put_in6_addr(skb, MPTCP_PM_ADDR_ATTR_ADDR6, &addr->addr6))
1668 goto nla_put_failure;
1670 nla_nest_end(skb, attr);
1674 nla_nest_cancel(skb, attr);
1678 int mptcp_pm_nl_get_addr_doit(struct sk_buff *skb, struct genl_info *info)
1680 struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR];
1681 struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
1682 struct mptcp_pm_addr_entry addr, *entry;
1683 struct sk_buff *msg;
1687 ret = mptcp_pm_parse_entry(attr, info, false, &addr);
1691 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1695 reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0,
1696 info->genlhdr->cmd);
1698 GENL_SET_ERR_MSG(info, "not enough space in Netlink message");
1703 spin_lock_bh(&pernet->lock);
1704 entry = __lookup_addr_by_id(pernet, addr.addr.id);
1706 GENL_SET_ERR_MSG(info, "address not found");
1711 ret = mptcp_nl_fill_addr(msg, entry);
1715 genlmsg_end(msg, reply);
1716 ret = genlmsg_reply(msg, info);
1717 spin_unlock_bh(&pernet->lock);
1721 spin_unlock_bh(&pernet->lock);
1728 int mptcp_pm_nl_get_addr_dumpit(struct sk_buff *msg,
1729 struct netlink_callback *cb)
1731 struct net *net = sock_net(msg->sk);
1732 struct mptcp_pm_addr_entry *entry;
1733 struct pm_nl_pernet *pernet;
1734 int id = cb->args[0];
1738 pernet = pm_nl_get_pernet(net);
1740 spin_lock_bh(&pernet->lock);
1741 for (i = id; i < MPTCP_PM_MAX_ADDR_ID + 1; i++) {
1742 if (test_bit(i, pernet->id_bitmap)) {
1743 entry = __lookup_addr_by_id(pernet, i);
1747 if (entry->addr.id <= id)
1750 hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).portid,
1751 cb->nlh->nlmsg_seq, &mptcp_genl_family,
1752 NLM_F_MULTI, MPTCP_PM_CMD_GET_ADDR);
1756 if (mptcp_nl_fill_addr(msg, entry) < 0) {
1757 genlmsg_cancel(msg, hdr);
1761 id = entry->addr.id;
1762 genlmsg_end(msg, hdr);
1765 spin_unlock_bh(&pernet->lock);
1771 static int parse_limit(struct genl_info *info, int id, unsigned int *limit)
1773 struct nlattr *attr = info->attrs[id];
1778 *limit = nla_get_u32(attr);
1779 if (*limit > MPTCP_PM_ADDR_MAX) {
1780 GENL_SET_ERR_MSG(info, "limit greater than maximum");
1786 int mptcp_pm_nl_set_limits_doit(struct sk_buff *skb, struct genl_info *info)
1788 struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
1789 unsigned int rcv_addrs, subflows;
1792 spin_lock_bh(&pernet->lock);
1793 rcv_addrs = pernet->add_addr_accept_max;
1794 ret = parse_limit(info, MPTCP_PM_ATTR_RCV_ADD_ADDRS, &rcv_addrs);
1798 subflows = pernet->subflows_max;
1799 ret = parse_limit(info, MPTCP_PM_ATTR_SUBFLOWS, &subflows);
1803 WRITE_ONCE(pernet->add_addr_accept_max, rcv_addrs);
1804 WRITE_ONCE(pernet->subflows_max, subflows);
1807 spin_unlock_bh(&pernet->lock);
1811 int mptcp_pm_nl_get_limits_doit(struct sk_buff *skb, struct genl_info *info)
1813 struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
1814 struct sk_buff *msg;
1817 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1821 reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0,
1822 MPTCP_PM_CMD_GET_LIMITS);
1826 if (nla_put_u32(msg, MPTCP_PM_ATTR_RCV_ADD_ADDRS,
1827 READ_ONCE(pernet->add_addr_accept_max)))
1830 if (nla_put_u32(msg, MPTCP_PM_ATTR_SUBFLOWS,
1831 READ_ONCE(pernet->subflows_max)))
1834 genlmsg_end(msg, reply);
1835 return genlmsg_reply(msg, info);
1838 GENL_SET_ERR_MSG(info, "not enough space in Netlink message");
1843 static void mptcp_pm_nl_fullmesh(struct mptcp_sock *msk,
1844 struct mptcp_addr_info *addr)
1846 struct mptcp_rm_list list = { .nr = 0 };
1848 list.ids[list.nr++] = addr->id;
1850 spin_lock_bh(&msk->pm.lock);
1851 mptcp_pm_nl_rm_subflow_received(msk, &list);
1852 mptcp_pm_create_subflow_or_signal_addr(msk);
1853 spin_unlock_bh(&msk->pm.lock);
1856 static int mptcp_nl_set_flags(struct net *net,
1857 struct mptcp_addr_info *addr,
1858 u8 bkup, u8 changed)
1860 long s_slot = 0, s_num = 0;
1861 struct mptcp_sock *msk;
1864 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
1865 struct sock *sk = (struct sock *)msk;
1867 if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk))
1871 if (changed & MPTCP_PM_ADDR_FLAG_BACKUP)
1872 ret = mptcp_pm_nl_mp_prio_send_ack(msk, addr, NULL, bkup);
1873 if (changed & MPTCP_PM_ADDR_FLAG_FULLMESH)
1874 mptcp_pm_nl_fullmesh(msk, addr);
1885 int mptcp_pm_nl_set_flags(struct net *net, struct mptcp_pm_addr_entry *addr, u8 bkup)
1887 struct pm_nl_pernet *pernet = pm_nl_get_pernet(net);
1888 u8 changed, mask = MPTCP_PM_ADDR_FLAG_BACKUP |
1889 MPTCP_PM_ADDR_FLAG_FULLMESH;
1890 struct mptcp_pm_addr_entry *entry;
1891 u8 lookup_by_id = 0;
1893 if (addr->addr.family == AF_UNSPEC) {
1899 spin_lock_bh(&pernet->lock);
1900 entry = __lookup_addr(pernet, &addr->addr, lookup_by_id);
1902 spin_unlock_bh(&pernet->lock);
1905 if ((addr->flags & MPTCP_PM_ADDR_FLAG_FULLMESH) &&
1906 (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) {
1907 spin_unlock_bh(&pernet->lock);
1911 changed = (addr->flags ^ entry->flags) & mask;
1912 entry->flags = (entry->flags & ~mask) | (addr->flags & mask);
1914 spin_unlock_bh(&pernet->lock);
1916 mptcp_nl_set_flags(net, &addr->addr, bkup, changed);
1920 int mptcp_pm_nl_set_flags_doit(struct sk_buff *skb, struct genl_info *info)
1922 struct mptcp_pm_addr_entry remote = { .addr = { .family = AF_UNSPEC }, };
1923 struct mptcp_pm_addr_entry addr = { .addr = { .family = AF_UNSPEC }, };
1924 struct nlattr *attr_rem = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE];
1925 struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
1926 struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
1927 struct net *net = sock_net(skb->sk);
1931 ret = mptcp_pm_parse_entry(attr, info, false, &addr);
1936 ret = mptcp_pm_parse_entry(attr_rem, info, false, &remote);
1941 if (addr.flags & MPTCP_PM_ADDR_FLAG_BACKUP)
1944 return mptcp_pm_set_flags(net, token, &addr, &remote, bkup);
1947 static void mptcp_nl_mcast_send(struct net *net, struct sk_buff *nlskb, gfp_t gfp)
1949 genlmsg_multicast_netns(&mptcp_genl_family, net,
1950 nlskb, 0, MPTCP_PM_EV_GRP_OFFSET, gfp);
1953 bool mptcp_userspace_pm_active(const struct mptcp_sock *msk)
1955 return genl_has_listeners(&mptcp_genl_family,
1956 sock_net((const struct sock *)msk),
1957 MPTCP_PM_EV_GRP_OFFSET);
1960 static int mptcp_event_add_subflow(struct sk_buff *skb, const struct sock *ssk)
1962 const struct inet_sock *issk = inet_sk(ssk);
1963 const struct mptcp_subflow_context *sf;
1965 if (nla_put_u16(skb, MPTCP_ATTR_FAMILY, ssk->sk_family))
1968 switch (ssk->sk_family) {
1970 if (nla_put_in_addr(skb, MPTCP_ATTR_SADDR4, issk->inet_saddr))
1972 if (nla_put_in_addr(skb, MPTCP_ATTR_DADDR4, issk->inet_daddr))
1975 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1977 const struct ipv6_pinfo *np = inet6_sk(ssk);
1979 if (nla_put_in6_addr(skb, MPTCP_ATTR_SADDR6, &np->saddr))
1981 if (nla_put_in6_addr(skb, MPTCP_ATTR_DADDR6, &ssk->sk_v6_daddr))
1991 if (nla_put_be16(skb, MPTCP_ATTR_SPORT, issk->inet_sport))
1993 if (nla_put_be16(skb, MPTCP_ATTR_DPORT, issk->inet_dport))
1996 sf = mptcp_subflow_ctx(ssk);
1997 if (WARN_ON_ONCE(!sf))
2000 if (nla_put_u8(skb, MPTCP_ATTR_LOC_ID, subflow_get_local_id(sf)))
2003 if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, sf->remote_id))
2009 static int mptcp_event_put_token_and_ssk(struct sk_buff *skb,
2010 const struct mptcp_sock *msk,
2011 const struct sock *ssk)
2013 const struct sock *sk = (const struct sock *)msk;
2014 const struct mptcp_subflow_context *sf;
2017 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token))
2020 if (mptcp_event_add_subflow(skb, ssk))
2023 sf = mptcp_subflow_ctx(ssk);
2024 if (WARN_ON_ONCE(!sf))
2027 if (nla_put_u8(skb, MPTCP_ATTR_BACKUP, sf->backup))
2030 if (ssk->sk_bound_dev_if &&
2031 nla_put_s32(skb, MPTCP_ATTR_IF_IDX, ssk->sk_bound_dev_if))
2034 sk_err = READ_ONCE(ssk->sk_err);
2035 if (sk_err && sk->sk_state == TCP_ESTABLISHED &&
2036 nla_put_u8(skb, MPTCP_ATTR_ERROR, sk_err))
2042 static int mptcp_event_sub_established(struct sk_buff *skb,
2043 const struct mptcp_sock *msk,
2044 const struct sock *ssk)
2046 return mptcp_event_put_token_and_ssk(skb, msk, ssk);
2049 static int mptcp_event_sub_closed(struct sk_buff *skb,
2050 const struct mptcp_sock *msk,
2051 const struct sock *ssk)
2053 const struct mptcp_subflow_context *sf;
2055 if (mptcp_event_put_token_and_ssk(skb, msk, ssk))
2058 sf = mptcp_subflow_ctx(ssk);
2059 if (!sf->reset_seen)
2062 if (nla_put_u32(skb, MPTCP_ATTR_RESET_REASON, sf->reset_reason))
2065 if (nla_put_u32(skb, MPTCP_ATTR_RESET_FLAGS, sf->reset_transient))
2071 static int mptcp_event_created(struct sk_buff *skb,
2072 const struct mptcp_sock *msk,
2073 const struct sock *ssk)
2075 int err = nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token);
2080 if (nla_put_u8(skb, MPTCP_ATTR_SERVER_SIDE, READ_ONCE(msk->pm.server_side)))
2083 return mptcp_event_add_subflow(skb, ssk);
2086 void mptcp_event_addr_removed(const struct mptcp_sock *msk, uint8_t id)
2088 struct net *net = sock_net((const struct sock *)msk);
2089 struct nlmsghdr *nlh;
2090 struct sk_buff *skb;
2092 if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET))
2095 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
2099 nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, MPTCP_EVENT_REMOVED);
2101 goto nla_put_failure;
2103 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token))
2104 goto nla_put_failure;
2106 if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, id))
2107 goto nla_put_failure;
2109 genlmsg_end(skb, nlh);
2110 mptcp_nl_mcast_send(net, skb, GFP_ATOMIC);
2117 void mptcp_event_addr_announced(const struct sock *ssk,
2118 const struct mptcp_addr_info *info)
2120 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
2121 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
2122 struct net *net = sock_net(ssk);
2123 struct nlmsghdr *nlh;
2124 struct sk_buff *skb;
2126 if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET))
2129 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
2133 nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0,
2134 MPTCP_EVENT_ANNOUNCED);
2136 goto nla_put_failure;
2138 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token))
2139 goto nla_put_failure;
2141 if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, info->id))
2142 goto nla_put_failure;
2144 if (nla_put_be16(skb, MPTCP_ATTR_DPORT,
2146 inet_sk(ssk)->inet_dport :
2148 goto nla_put_failure;
2150 switch (info->family) {
2152 if (nla_put_in_addr(skb, MPTCP_ATTR_DADDR4, info->addr.s_addr))
2153 goto nla_put_failure;
2155 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
2157 if (nla_put_in6_addr(skb, MPTCP_ATTR_DADDR6, &info->addr6))
2158 goto nla_put_failure;
2163 goto nla_put_failure;
2166 genlmsg_end(skb, nlh);
2167 mptcp_nl_mcast_send(net, skb, GFP_ATOMIC);
2174 void mptcp_event_pm_listener(const struct sock *ssk,
2175 enum mptcp_event_type event)
2177 const struct inet_sock *issk = inet_sk(ssk);
2178 struct net *net = sock_net(ssk);
2179 struct nlmsghdr *nlh;
2180 struct sk_buff *skb;
2182 if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET))
2185 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2189 nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, event);
2191 goto nla_put_failure;
2193 if (nla_put_u16(skb, MPTCP_ATTR_FAMILY, ssk->sk_family))
2194 goto nla_put_failure;
2196 if (nla_put_be16(skb, MPTCP_ATTR_SPORT, issk->inet_sport))
2197 goto nla_put_failure;
2199 switch (ssk->sk_family) {
2201 if (nla_put_in_addr(skb, MPTCP_ATTR_SADDR4, issk->inet_saddr))
2202 goto nla_put_failure;
2204 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
2206 const struct ipv6_pinfo *np = inet6_sk(ssk);
2208 if (nla_put_in6_addr(skb, MPTCP_ATTR_SADDR6, &np->saddr))
2209 goto nla_put_failure;
2215 goto nla_put_failure;
2218 genlmsg_end(skb, nlh);
2219 mptcp_nl_mcast_send(net, skb, GFP_KERNEL);
2226 void mptcp_event(enum mptcp_event_type type, const struct mptcp_sock *msk,
2227 const struct sock *ssk, gfp_t gfp)
2229 struct net *net = sock_net((const struct sock *)msk);
2230 struct nlmsghdr *nlh;
2231 struct sk_buff *skb;
2233 if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET))
2236 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
2240 nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, type);
2242 goto nla_put_failure;
2245 case MPTCP_EVENT_UNSPEC:
2248 case MPTCP_EVENT_CREATED:
2249 case MPTCP_EVENT_ESTABLISHED:
2250 if (mptcp_event_created(skb, msk, ssk) < 0)
2251 goto nla_put_failure;
2253 case MPTCP_EVENT_CLOSED:
2254 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token) < 0)
2255 goto nla_put_failure;
2257 case MPTCP_EVENT_ANNOUNCED:
2258 case MPTCP_EVENT_REMOVED:
2259 /* call mptcp_event_addr_announced()/removed instead */
2262 case MPTCP_EVENT_SUB_ESTABLISHED:
2263 case MPTCP_EVENT_SUB_PRIORITY:
2264 if (mptcp_event_sub_established(skb, msk, ssk) < 0)
2265 goto nla_put_failure;
2267 case MPTCP_EVENT_SUB_CLOSED:
2268 if (mptcp_event_sub_closed(skb, msk, ssk) < 0)
2269 goto nla_put_failure;
2271 case MPTCP_EVENT_LISTENER_CREATED:
2272 case MPTCP_EVENT_LISTENER_CLOSED:
2276 genlmsg_end(skb, nlh);
2277 mptcp_nl_mcast_send(net, skb, gfp);
2284 static struct genl_family mptcp_genl_family __ro_after_init = {
2285 .name = MPTCP_PM_NAME,
2286 .version = MPTCP_PM_VER,
2288 .module = THIS_MODULE,
2289 .ops = mptcp_pm_nl_ops,
2290 .n_ops = ARRAY_SIZE(mptcp_pm_nl_ops),
2291 .resv_start_op = MPTCP_PM_CMD_SUBFLOW_DESTROY + 1,
2292 .mcgrps = mptcp_pm_mcgrps,
2293 .n_mcgrps = ARRAY_SIZE(mptcp_pm_mcgrps),
2296 static int __net_init pm_nl_init_net(struct net *net)
2298 struct pm_nl_pernet *pernet = pm_nl_get_pernet(net);
2300 INIT_LIST_HEAD_RCU(&pernet->local_addr_list);
2302 /* Cit. 2 subflows ought to be enough for anybody. */
2303 pernet->subflows_max = 2;
2304 pernet->next_id = 1;
2305 pernet->stale_loss_cnt = 4;
2306 spin_lock_init(&pernet->lock);
2308 /* No need to initialize other pernet fields, the struct is zeroed at
2315 static void __net_exit pm_nl_exit_net(struct list_head *net_list)
2319 list_for_each_entry(net, net_list, exit_list) {
2320 struct pm_nl_pernet *pernet = pm_nl_get_pernet(net);
2322 /* net is removed from namespace list, can't race with
2323 * other modifiers, also netns core already waited for a
2326 __flush_addrs(&pernet->local_addr_list);
2330 static struct pernet_operations mptcp_pm_pernet_ops = {
2331 .init = pm_nl_init_net,
2332 .exit_batch = pm_nl_exit_net,
2333 .id = &pm_nl_pernet_id,
2334 .size = sizeof(struct pm_nl_pernet),
2337 void __init mptcp_pm_nl_init(void)
2339 if (register_pernet_subsys(&mptcp_pm_pernet_ops) < 0)
2340 panic("Failed to register MPTCP PM pernet subsystem.\n");
2342 if (genl_register_family(&mptcp_genl_family))
2343 panic("Failed to register MPTCP PM netlink family\n");