2 * net/tipc/node.c: TIPC node management routines
4 * Copyright (c) 2000-2006, 2012-2016, Ericsson AB
5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
40 #include "name_distr.h"
49 #define INVALID_NODE_SIG 0x10000
50 #define NODE_CLEANUP_AFTER 300000
52 /* Flags used to take different actions according to flag type
53 * TIPC_NOTIFY_NODE_DOWN: notify node is down
54 * TIPC_NOTIFY_NODE_UP: notify node is up
55 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
58 TIPC_NOTIFY_NODE_DOWN = (1 << 3),
59 TIPC_NOTIFY_NODE_UP = (1 << 4),
60 TIPC_NOTIFY_LINK_UP = (1 << 6),
61 TIPC_NOTIFY_LINK_DOWN = (1 << 7)
64 struct tipc_link_entry {
65 struct tipc_link *link;
66 spinlock_t lock; /* per link */
68 struct sk_buff_head inputq;
69 struct tipc_media_addr maddr;
72 struct tipc_bclink_entry {
73 struct tipc_link *link;
74 struct sk_buff_head inputq1;
75 struct sk_buff_head arrvq;
76 struct sk_buff_head inputq2;
77 struct sk_buff_head namedq;
83 * struct tipc_node - TIPC node structure
84 * @addr: network address of node
85 * @ref: reference counter to node object
86 * @lock: rwlock governing access to structure
87 * @net: the applicable net namespace
88 * @hash: links to adjacent nodes in unsorted hash chain
89 * @inputq: pointer to input queue containing messages for msg event
90 * @namedq: pointer to name table input queue with name table messages
91 * @active_links: bearer ids of active links, used as index into links[] array
92 * @links: array containing references to all links to node
93 * @action_flags: bit mask of different types of node actions
94 * @state: connectivity state vs peer node
95 * @preliminary: a preliminary node or not
96 * @sync_point: sequence number where synch/failover is finished
97 * @list: links to adjacent nodes in sorted list of cluster's nodes
98 * @working_links: number of working links to node (both active and standby)
99 * @link_cnt: number of links to node
100 * @capabilities: bitmap, indicating peer node's functional capabilities
101 * @signature: node instance identifier
102 * @link_id: local and remote bearer ids of changing link, if any
103 * @publ_list: list of publications
104 * @rcu: rcu struct for tipc_node
105 * @delete_at: indicates the time for deleting a down node
106 * @crypto_rx: RX crypto handler
113 struct hlist_node hash;
115 struct tipc_link_entry links[MAX_BEARERS];
116 struct tipc_bclink_entry bc_entry;
118 struct list_head list;
129 char peer_id_string[NODE_ID_STR_LEN];
130 struct list_head publ_list;
131 struct list_head conn_sks;
132 unsigned long keepalive_intv;
133 struct timer_list timer;
135 unsigned long delete_at;
136 struct net *peer_net;
138 #ifdef CONFIG_TIPC_CRYPTO
139 struct tipc_crypto *crypto_rx;
143 /* Node FSM states and events:
146 SELF_DOWN_PEER_DOWN = 0xdd,
147 SELF_UP_PEER_UP = 0xaa,
148 SELF_DOWN_PEER_LEAVING = 0xd1,
149 SELF_UP_PEER_COMING = 0xac,
150 SELF_COMING_PEER_UP = 0xca,
151 SELF_LEAVING_PEER_DOWN = 0x1d,
152 NODE_FAILINGOVER = 0xf0,
157 SELF_ESTABL_CONTACT_EVT = 0xece,
158 SELF_LOST_CONTACT_EVT = 0x1ce,
159 PEER_ESTABL_CONTACT_EVT = 0x9ece,
160 PEER_LOST_CONTACT_EVT = 0x91ce,
161 NODE_FAILOVER_BEGIN_EVT = 0xfbe,
162 NODE_FAILOVER_END_EVT = 0xfee,
163 NODE_SYNCH_BEGIN_EVT = 0xcbe,
164 NODE_SYNCH_END_EVT = 0xcee
167 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
168 struct sk_buff_head *xmitq,
169 struct tipc_media_addr **maddr);
170 static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
172 static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
173 static void tipc_node_delete(struct tipc_node *node);
174 static void tipc_node_timeout(struct timer_list *t);
175 static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
176 static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
177 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id);
178 static bool node_is_up(struct tipc_node *n);
179 static void tipc_node_delete_from_list(struct tipc_node *node);
181 struct tipc_sock_conn {
185 struct list_head list;
188 static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
190 int bearer_id = n->active_links[sel & 1];
192 if (unlikely(bearer_id == INVALID_BEARER_ID))
195 return n->links[bearer_id].link;
198 int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected)
202 unsigned int mtu = MAX_MSG_SIZE;
204 n = tipc_node_find(net, addr);
208 /* Allow MAX_MSG_SIZE when building connection oriented message
209 * if they are in the same core network
211 if (n->peer_net && connected) {
216 bearer_id = n->active_links[sel & 1];
217 if (likely(bearer_id != INVALID_BEARER_ID))
218 mtu = n->links[bearer_id].mtu;
223 bool tipc_node_get_id(struct net *net, u32 addr, u8 *id)
225 u8 *own_id = tipc_own_id(net);
231 if (addr == tipc_own_addr(net)) {
232 memcpy(id, own_id, TIPC_NODEID_LEN);
235 n = tipc_node_find(net, addr);
239 memcpy(id, &n->peer_id, TIPC_NODEID_LEN);
244 u16 tipc_node_get_capabilities(struct net *net, u32 addr)
249 n = tipc_node_find(net, addr);
251 return TIPC_NODE_CAPABILITIES;
252 caps = n->capabilities;
257 u32 tipc_node_get_addr(struct tipc_node *node)
259 return (node) ? node->addr : 0;
262 char *tipc_node_get_id_str(struct tipc_node *node)
264 return node->peer_id_string;
267 #ifdef CONFIG_TIPC_CRYPTO
269 * tipc_node_crypto_rx - Retrieve crypto RX handle from node
270 * Note: node ref counter must be held first!
272 struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n)
274 return (__n) ? __n->crypto_rx : NULL;
277 struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos)
279 return container_of(pos, struct tipc_node, list)->crypto_rx;
282 struct tipc_crypto *tipc_node_crypto_rx_by_addr(struct net *net, u32 addr)
286 n = tipc_node_find(net, addr);
287 return (n) ? n->crypto_rx : NULL;
291 static void tipc_node_free(struct rcu_head *rp)
293 struct tipc_node *n = container_of(rp, struct tipc_node, rcu);
295 #ifdef CONFIG_TIPC_CRYPTO
296 tipc_crypto_stop(&n->crypto_rx);
301 static void tipc_node_kref_release(struct kref *kref)
303 struct tipc_node *n = container_of(kref, struct tipc_node, kref);
305 kfree(n->bc_entry.link);
306 call_rcu(&n->rcu, tipc_node_free);
309 void tipc_node_put(struct tipc_node *node)
311 kref_put(&node->kref, tipc_node_kref_release);
314 void tipc_node_get(struct tipc_node *node)
316 kref_get(&node->kref);
320 * tipc_node_find - locate specified node object, if it exists
322 static struct tipc_node *tipc_node_find(struct net *net, u32 addr)
324 struct tipc_net *tn = tipc_net(net);
325 struct tipc_node *node;
326 unsigned int thash = tipc_hashfn(addr);
329 hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) {
330 if (node->addr != addr || node->preliminary)
332 if (!kref_get_unless_zero(&node->kref))
340 /* tipc_node_find_by_id - locate specified node object by its 128-bit id
341 * Note: this function is called only when a discovery request failed
342 * to find the node by its 32-bit id, and is not time critical
344 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id)
346 struct tipc_net *tn = tipc_net(net);
351 list_for_each_entry_rcu(n, &tn->node_list, list) {
352 read_lock_bh(&n->lock);
353 if (!memcmp(id, n->peer_id, 16) &&
354 kref_get_unless_zero(&n->kref))
356 read_unlock_bh(&n->lock);
361 return found ? n : NULL;
364 static void tipc_node_read_lock(struct tipc_node *n)
366 read_lock_bh(&n->lock);
369 static void tipc_node_read_unlock(struct tipc_node *n)
371 read_unlock_bh(&n->lock);
374 static void tipc_node_write_lock(struct tipc_node *n)
376 write_lock_bh(&n->lock);
379 static void tipc_node_write_unlock_fast(struct tipc_node *n)
381 write_unlock_bh(&n->lock);
384 static void tipc_node_write_unlock(struct tipc_node *n)
386 struct net *net = n->net;
388 u32 flags = n->action_flags;
391 struct list_head *publ_list;
393 if (likely(!flags)) {
394 write_unlock_bh(&n->lock);
399 link_id = n->link_id;
400 bearer_id = link_id & 0xffff;
401 publ_list = &n->publ_list;
403 n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
404 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
406 write_unlock_bh(&n->lock);
408 if (flags & TIPC_NOTIFY_NODE_DOWN)
409 tipc_publ_notify(net, publ_list, addr, n->capabilities);
411 if (flags & TIPC_NOTIFY_NODE_UP)
412 tipc_named_node_up(net, addr, n->capabilities);
414 if (flags & TIPC_NOTIFY_LINK_UP) {
415 tipc_mon_peer_up(net, addr, bearer_id);
416 tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
417 TIPC_NODE_SCOPE, link_id, link_id);
419 if (flags & TIPC_NOTIFY_LINK_DOWN) {
420 tipc_mon_peer_down(net, addr, bearer_id);
421 tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
426 static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes)
428 int net_id = tipc_netid(n->net);
429 struct tipc_net *tn_peer;
436 for_each_net_rcu(tmp) {
437 tn_peer = tipc_net(tmp);
440 /* Integrity checking whether node exists in namespace or not */
441 if (tn_peer->net_id != net_id)
443 if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN))
445 hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random);
446 if (hash_mixes ^ hash_chk)
449 n->peer_hash_mix = hash_mixes;
454 struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id,
455 u16 capabilities, u32 hash_mixes,
458 struct tipc_net *tn = net_generic(net, tipc_net_id);
459 struct tipc_link *l, *snd_l = tipc_bc_sndlink(net);
460 struct tipc_node *n, *temp_node;
465 spin_lock_bh(&tn->node_list_lock);
466 n = tipc_node_find(net, addr) ?:
467 tipc_node_find_by_id(net, peer_id);
473 /* A preliminary node becomes "real" now, refresh its data */
474 tipc_node_write_lock(n);
475 if (!tipc_link_bc_create(net, tipc_own_addr(net), addr, peer_id, U16_MAX,
476 tipc_link_min_win(snd_l), tipc_link_max_win(snd_l),
477 n->capabilities, &n->bc_entry.inputq1,
478 &n->bc_entry.namedq, snd_l, &n->bc_entry.link)) {
479 pr_warn("Broadcast rcv link refresh failed, no memory\n");
480 tipc_node_write_unlock_fast(n);
485 n->preliminary = false;
487 hlist_del_rcu(&n->hash);
488 hlist_add_head_rcu(&n->hash,
489 &tn->node_htable[tipc_hashfn(addr)]);
490 list_del_rcu(&n->list);
491 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
492 if (n->addr < temp_node->addr)
495 list_add_tail_rcu(&n->list, &temp_node->list);
496 tipc_node_write_unlock_fast(n);
499 if (n->peer_hash_mix ^ hash_mixes)
500 tipc_node_assign_peer_net(n, hash_mixes);
501 if (n->capabilities == capabilities)
503 /* Same node may come back with new capabilities */
504 tipc_node_write_lock(n);
505 n->capabilities = capabilities;
506 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
507 l = n->links[bearer_id].link;
509 tipc_link_update_caps(l, capabilities);
511 tipc_node_write_unlock_fast(n);
513 /* Calculate cluster capabilities */
514 tn->capabilities = TIPC_NODE_CAPABILITIES;
515 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
516 tn->capabilities &= temp_node->capabilities;
519 tipc_bcast_toggle_rcast(net,
520 (tn->capabilities & TIPC_BCAST_RCAST));
524 n = kzalloc(sizeof(*n), GFP_ATOMIC);
526 pr_warn("Node creation failed, no memory\n");
529 tipc_nodeid2string(n->peer_id_string, peer_id);
530 #ifdef CONFIG_TIPC_CRYPTO
531 if (unlikely(tipc_crypto_start(&n->crypto_rx, net, n))) {
532 pr_warn("Failed to start crypto RX(%s)!\n", n->peer_id_string);
539 n->preliminary = preliminary;
540 memcpy(&n->peer_id, peer_id, 16);
543 n->peer_hash_mix = 0;
544 /* Assign kernel local namespace if exists */
545 tipc_node_assign_peer_net(n, hash_mixes);
546 n->capabilities = capabilities;
548 rwlock_init(&n->lock);
549 INIT_HLIST_NODE(&n->hash);
550 INIT_LIST_HEAD(&n->list);
551 INIT_LIST_HEAD(&n->publ_list);
552 INIT_LIST_HEAD(&n->conn_sks);
553 skb_queue_head_init(&n->bc_entry.namedq);
554 skb_queue_head_init(&n->bc_entry.inputq1);
555 __skb_queue_head_init(&n->bc_entry.arrvq);
556 skb_queue_head_init(&n->bc_entry.inputq2);
557 for (i = 0; i < MAX_BEARERS; i++)
558 spin_lock_init(&n->links[i].lock);
559 n->state = SELF_DOWN_PEER_LEAVING;
560 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
561 n->signature = INVALID_NODE_SIG;
562 n->active_links[0] = INVALID_BEARER_ID;
563 n->active_links[1] = INVALID_BEARER_ID;
565 !tipc_link_bc_create(net, tipc_own_addr(net), addr, peer_id, U16_MAX,
566 tipc_link_min_win(snd_l), tipc_link_max_win(snd_l),
567 n->capabilities, &n->bc_entry.inputq1,
568 &n->bc_entry.namedq, snd_l, &n->bc_entry.link)) {
569 pr_warn("Broadcast rcv link creation failed, no memory\n");
575 timer_setup(&n->timer, tipc_node_timeout, 0);
576 /* Start a slow timer anyway, crypto needs it */
577 n->keepalive_intv = 10000;
578 intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
579 if (!mod_timer(&n->timer, intv))
581 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
582 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
583 if (n->addr < temp_node->addr)
586 list_add_tail_rcu(&n->list, &temp_node->list);
587 /* Calculate cluster capabilities */
588 tn->capabilities = TIPC_NODE_CAPABILITIES;
589 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
590 tn->capabilities &= temp_node->capabilities;
592 tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
593 trace_tipc_node_create(n, true, " ");
595 spin_unlock_bh(&tn->node_list_lock);
599 static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
601 unsigned long tol = tipc_link_tolerance(l);
602 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
604 /* Link with lowest tolerance determines timer interval */
605 if (intv < n->keepalive_intv)
606 n->keepalive_intv = intv;
608 /* Ensure link's abort limit corresponds to current tolerance */
609 tipc_link_set_abort_limit(l, tol / n->keepalive_intv);
612 static void tipc_node_delete_from_list(struct tipc_node *node)
614 #ifdef CONFIG_TIPC_CRYPTO
615 tipc_crypto_key_flush(node->crypto_rx);
617 list_del_rcu(&node->list);
618 hlist_del_rcu(&node->hash);
622 static void tipc_node_delete(struct tipc_node *node)
624 trace_tipc_node_delete(node, true, " ");
625 tipc_node_delete_from_list(node);
627 del_timer_sync(&node->timer);
631 void tipc_node_stop(struct net *net)
633 struct tipc_net *tn = tipc_net(net);
634 struct tipc_node *node, *t_node;
636 spin_lock_bh(&tn->node_list_lock);
637 list_for_each_entry_safe(node, t_node, &tn->node_list, list)
638 tipc_node_delete(node);
639 spin_unlock_bh(&tn->node_list_lock);
642 void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
646 if (in_own_node(net, addr))
649 n = tipc_node_find(net, addr);
651 pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr);
654 tipc_node_write_lock(n);
655 list_add_tail(subscr, &n->publ_list);
656 tipc_node_write_unlock_fast(n);
660 void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
664 if (in_own_node(net, addr))
667 n = tipc_node_find(net, addr);
669 pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr);
672 tipc_node_write_lock(n);
673 list_del_init(subscr);
674 tipc_node_write_unlock_fast(n);
678 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
680 struct tipc_node *node;
681 struct tipc_sock_conn *conn;
684 if (in_own_node(net, dnode))
687 node = tipc_node_find(net, dnode);
689 pr_warn("Connecting sock to node 0x%x failed\n", dnode);
690 return -EHOSTUNREACH;
692 conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
697 conn->peer_node = dnode;
699 conn->peer_port = peer_port;
701 tipc_node_write_lock(node);
702 list_add_tail(&conn->list, &node->conn_sks);
703 tipc_node_write_unlock(node);
709 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
711 struct tipc_node *node;
712 struct tipc_sock_conn *conn, *safe;
714 if (in_own_node(net, dnode))
717 node = tipc_node_find(net, dnode);
721 tipc_node_write_lock(node);
722 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
723 if (port != conn->port)
725 list_del(&conn->list);
728 tipc_node_write_unlock(node);
732 static void tipc_node_clear_links(struct tipc_node *node)
736 for (i = 0; i < MAX_BEARERS; i++) {
737 struct tipc_link_entry *le = &node->links[i];
747 /* tipc_node_cleanup - delete nodes that does not
748 * have active links for NODE_CLEANUP_AFTER time
750 static bool tipc_node_cleanup(struct tipc_node *peer)
752 struct tipc_node *temp_node;
753 struct tipc_net *tn = tipc_net(peer->net);
754 bool deleted = false;
756 /* If lock held by tipc_node_stop() the node will be deleted anyway */
757 if (!spin_trylock_bh(&tn->node_list_lock))
760 tipc_node_write_lock(peer);
762 if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
763 tipc_node_clear_links(peer);
764 tipc_node_delete_from_list(peer);
767 tipc_node_write_unlock(peer);
770 spin_unlock_bh(&tn->node_list_lock);
774 /* Calculate cluster capabilities */
775 tn->capabilities = TIPC_NODE_CAPABILITIES;
776 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
777 tn->capabilities &= temp_node->capabilities;
779 tipc_bcast_toggle_rcast(peer->net,
780 (tn->capabilities & TIPC_BCAST_RCAST));
781 spin_unlock_bh(&tn->node_list_lock);
785 /* tipc_node_timeout - handle expiration of node timer
787 static void tipc_node_timeout(struct timer_list *t)
789 struct tipc_node *n = from_timer(n, t, timer);
790 struct tipc_link_entry *le;
791 struct sk_buff_head xmitq;
792 int remains = n->link_cnt;
796 trace_tipc_node_timeout(n, false, " ");
797 if (!node_is_up(n) && tipc_node_cleanup(n)) {
798 /*Removing the reference of Timer*/
803 #ifdef CONFIG_TIPC_CRYPTO
804 /* Take any crypto key related actions first */
805 tipc_crypto_timeout(n->crypto_rx);
807 __skb_queue_head_init(&xmitq);
809 /* Initial node interval to value larger (10 seconds), then it will be
810 * recalculated with link lowest tolerance
812 tipc_node_read_lock(n);
813 n->keepalive_intv = 10000;
814 tipc_node_read_unlock(n);
815 for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
816 tipc_node_read_lock(n);
817 le = &n->links[bearer_id];
819 spin_lock_bh(&le->lock);
820 /* Link tolerance may change asynchronously: */
821 tipc_node_calculate_timer(n, le->link);
822 rc = tipc_link_timeout(le->link, &xmitq);
823 spin_unlock_bh(&le->lock);
826 tipc_node_read_unlock(n);
827 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr, n);
828 if (rc & TIPC_LINK_DOWN_EVT)
829 tipc_node_link_down(n, bearer_id, false);
831 mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv));
835 * __tipc_node_link_up - handle addition of link
836 * Node lock must be held by caller
837 * Link becomes active (alone or shared) or standby, depending on its priority.
839 static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
840 struct sk_buff_head *xmitq)
842 int *slot0 = &n->active_links[0];
843 int *slot1 = &n->active_links[1];
844 struct tipc_link *ol = node_active_link(n, 0);
845 struct tipc_link *nl = n->links[bearer_id].link;
847 if (!nl || tipc_link_is_up(nl))
850 tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT);
851 if (!tipc_link_is_up(nl))
855 n->action_flags |= TIPC_NOTIFY_LINK_UP;
856 n->link_id = tipc_link_id(nl);
858 /* Leave room for tunnel header when returning 'mtu' to users: */
859 n->links[bearer_id].mtu = tipc_link_mss(nl);
861 tipc_bearer_add_dest(n->net, bearer_id, n->addr);
862 tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
864 pr_debug("Established link <%s> on network plane %c\n",
865 tipc_link_name(nl), tipc_link_plane(nl));
866 trace_tipc_node_link_up(n, true, " ");
868 /* Ensure that a STATE message goes first */
869 tipc_link_build_state_msg(nl, xmitq);
871 /* First link? => give it both slots */
875 tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
876 n->action_flags |= TIPC_NOTIFY_NODE_UP;
877 tipc_link_set_active(nl, true);
878 tipc_bcast_add_peer(n->net, nl, xmitq);
882 /* Second link => redistribute slots */
883 if (tipc_link_prio(nl) > tipc_link_prio(ol)) {
884 pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol));
887 tipc_link_set_active(nl, true);
888 tipc_link_set_active(ol, false);
889 } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) {
890 tipc_link_set_active(nl, true);
893 pr_debug("New link <%s> is standby\n", tipc_link_name(nl));
896 /* Prepare synchronization with first link */
897 tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
901 * tipc_node_link_up - handle addition of link
903 * Link becomes active (alone or shared) or standby, depending on its priority.
905 static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
906 struct sk_buff_head *xmitq)
908 struct tipc_media_addr *maddr;
910 tipc_node_write_lock(n);
911 __tipc_node_link_up(n, bearer_id, xmitq);
912 maddr = &n->links[bearer_id].maddr;
913 tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr, n);
914 tipc_node_write_unlock(n);
918 * tipc_node_link_failover() - start failover in case "half-failover"
920 * This function is only called in a very special situation where link
921 * failover can be already started on peer node but not on this node.
922 * This can happen when e.g.
923 * 1. Both links <1A-2A>, <1B-2B> down
924 * 2. Link endpoint 2A up, but 1A still down (e.g. due to network
925 * disturbance, wrong session, etc.)
927 * 4. Link endpoint 2A down (e.g. due to link tolerance timeout)
928 * 5. Node 2 starts failover onto link <1B-2B>
930 * ==> Node 1 does never start link/node failover!
932 * @n: tipc node structure
933 * @l: link peer endpoint failingover (- can be NULL)
935 * @xmitq: queue for messages to be xmited on tnl link later
937 static void tipc_node_link_failover(struct tipc_node *n, struct tipc_link *l,
938 struct tipc_link *tnl,
939 struct sk_buff_head *xmitq)
941 /* Avoid to be "self-failover" that can never end */
942 if (!tipc_link_is_up(tnl))
945 /* Don't rush, failure link may be in the process of resetting */
946 if (l && !tipc_link_is_reset(l))
949 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
950 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
952 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
953 tipc_link_failover_prepare(l, tnl, xmitq);
956 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
957 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
961 * __tipc_node_link_down - handle loss of link
963 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
964 struct sk_buff_head *xmitq,
965 struct tipc_media_addr **maddr)
967 struct tipc_link_entry *le = &n->links[*bearer_id];
968 int *slot0 = &n->active_links[0];
969 int *slot1 = &n->active_links[1];
970 int i, highest = 0, prio;
971 struct tipc_link *l, *_l, *tnl;
973 l = n->links[*bearer_id].link;
974 if (!l || tipc_link_is_reset(l))
978 n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
979 n->link_id = tipc_link_id(l);
981 tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
983 pr_debug("Lost link <%s> on network plane %c\n",
984 tipc_link_name(l), tipc_link_plane(l));
986 /* Select new active link if any available */
987 *slot0 = INVALID_BEARER_ID;
988 *slot1 = INVALID_BEARER_ID;
989 for (i = 0; i < MAX_BEARERS; i++) {
990 _l = n->links[i].link;
991 if (!_l || !tipc_link_is_up(_l))
995 prio = tipc_link_prio(_l);
998 if (prio > highest) {
1007 if (!node_is_up(n)) {
1008 if (tipc_link_peer_is_down(l))
1009 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
1010 tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT);
1011 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down!");
1012 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1014 tipc_link_build_reset_msg(l, xmitq);
1015 *maddr = &n->links[*bearer_id].maddr;
1016 node_lost_contact(n, &le->inputq);
1017 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
1020 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
1022 /* There is still a working link => initiate failover */
1023 *bearer_id = n->active_links[0];
1024 tnl = n->links[*bearer_id].link;
1025 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
1026 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
1027 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
1028 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
1029 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down -> failover!");
1031 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1032 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
1033 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
1034 *maddr = &n->links[*bearer_id].maddr;
1037 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
1039 struct tipc_link_entry *le = &n->links[bearer_id];
1040 struct tipc_media_addr *maddr = NULL;
1041 struct tipc_link *l = le->link;
1042 int old_bearer_id = bearer_id;
1043 struct sk_buff_head xmitq;
1048 __skb_queue_head_init(&xmitq);
1050 tipc_node_write_lock(n);
1051 if (!tipc_link_is_establishing(l)) {
1052 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
1054 /* Defuse pending tipc_node_link_up() */
1056 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1063 trace_tipc_node_link_down(n, true, "node link down or deleted!");
1064 tipc_node_write_unlock(n);
1066 tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
1067 if (!skb_queue_empty(&xmitq))
1068 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr, n);
1069 tipc_sk_rcv(n->net, &le->inputq);
1072 static bool node_is_up(struct tipc_node *n)
1074 return n->active_links[0] != INVALID_BEARER_ID;
1077 bool tipc_node_is_up(struct net *net, u32 addr)
1079 struct tipc_node *n;
1080 bool retval = false;
1082 if (in_own_node(net, addr))
1085 n = tipc_node_find(net, addr);
1088 retval = node_is_up(n);
1093 static u32 tipc_node_suggest_addr(struct net *net, u32 addr)
1095 struct tipc_node *n;
1097 addr ^= tipc_net(net)->random;
1098 while ((n = tipc_node_find(net, addr))) {
1105 /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not
1106 * Returns suggested address if any, otherwise 0
1108 u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
1110 struct tipc_net *tn = tipc_net(net);
1111 struct tipc_node *n;
1115 /* Suggest new address if some other peer is using this one */
1116 n = tipc_node_find(net, addr);
1118 if (!memcmp(n->peer_id, id, NODE_ID_LEN))
1123 return tipc_node_suggest_addr(net, addr);
1126 /* Suggest previously used address if peer is known */
1127 n = tipc_node_find_by_id(net, id);
1129 sugg_addr = n->addr;
1130 preliminary = n->preliminary;
1136 /* Even this node may be in conflict */
1137 if (tn->trial_addr == addr)
1138 return tipc_node_suggest_addr(net, addr);
1143 void tipc_node_check_dest(struct net *net, u32 addr,
1144 u8 *peer_id, struct tipc_bearer *b,
1145 u16 capabilities, u32 signature, u32 hash_mixes,
1146 struct tipc_media_addr *maddr,
1147 bool *respond, bool *dupl_addr)
1149 struct tipc_node *n;
1150 struct tipc_link *l;
1151 struct tipc_link_entry *le;
1152 bool addr_match = false;
1153 bool sign_match = false;
1154 bool link_up = false;
1155 bool accept_addr = false;
1164 n = tipc_node_create(net, addr, peer_id, capabilities, hash_mixes,
1169 tipc_node_write_lock(n);
1171 le = &n->links[b->identity];
1173 /* Prepare to validate requesting node's signature and media address */
1175 link_up = l && tipc_link_is_up(l);
1176 addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
1177 sign_match = (signature == n->signature);
1179 /* These three flags give us eight permutations: */
1181 if (sign_match && addr_match && link_up) {
1182 /* All is fine. Do nothing. */
1184 /* Peer node is not a container/local namespace */
1185 if (!n->peer_hash_mix)
1186 n->peer_hash_mix = hash_mixes;
1187 } else if (sign_match && addr_match && !link_up) {
1188 /* Respond. The link will come up in due time */
1190 } else if (sign_match && !addr_match && link_up) {
1191 /* Peer has changed i/f address without rebooting.
1192 * If so, the link will reset soon, and the next
1193 * discovery will be accepted. So we can ignore it.
1194 * It may also be an cloned or malicious peer having
1195 * chosen the same node address and signature as an
1197 * Ignore requests until the link goes down, if ever.
1200 } else if (sign_match && !addr_match && !link_up) {
1201 /* Peer link has changed i/f address without rebooting.
1202 * It may also be a cloned or malicious peer; we can't
1203 * distinguish between the two.
1204 * The signature is correct, so we must accept.
1208 } else if (!sign_match && addr_match && link_up) {
1209 /* Peer node rebooted. Two possibilities:
1210 * - Delayed re-discovery; this link endpoint has already
1211 * reset and re-established contact with the peer, before
1212 * receiving a discovery message from that node.
1213 * (The peer happened to receive one from this node first).
1214 * - The peer came back so fast that our side has not
1215 * discovered it yet. Probing from this side will soon
1216 * reset the link, since there can be no working link
1217 * endpoint at the peer end, and the link will re-establish.
1218 * Accept the signature, since it comes from a known peer.
1220 n->signature = signature;
1221 } else if (!sign_match && addr_match && !link_up) {
1222 /* The peer node has rebooted.
1223 * Accept signature, since it is a known peer.
1225 n->signature = signature;
1227 } else if (!sign_match && !addr_match && link_up) {
1228 /* Peer rebooted with new address, or a new/duplicate peer.
1229 * Ignore until the link goes down, if ever.
1232 } else if (!sign_match && !addr_match && !link_up) {
1233 /* Peer rebooted with new address, or it is a new peer.
1234 * Accept signature and address.
1236 n->signature = signature;
1244 /* Now create new link if not already existing */
1246 if (n->link_cnt == 2)
1249 if_name = strchr(b->name, ':') + 1;
1250 get_random_bytes(&session, sizeof(u16));
1251 if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
1252 b->net_plane, b->mtu, b->priority,
1253 b->min_win, b->max_win, session,
1254 tipc_own_addr(net), addr, peer_id,
1256 tipc_bc_sndlink(n->net), n->bc_entry.link,
1258 &n->bc_entry.namedq, &l)) {
1262 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link created!");
1264 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1265 if (n->state == NODE_FAILINGOVER)
1266 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
1269 tipc_node_calculate_timer(n, l);
1270 if (n->link_cnt == 1) {
1271 intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
1272 if (!mod_timer(&n->timer, intv))
1276 memcpy(&le->maddr, maddr, sizeof(*maddr));
1278 tipc_node_write_unlock(n);
1279 if (reset && l && !tipc_link_is_reset(l))
1280 tipc_node_link_down(n, b->identity, false);
1284 void tipc_node_delete_links(struct net *net, int bearer_id)
1286 struct tipc_net *tn = net_generic(net, tipc_net_id);
1287 struct tipc_node *n;
1290 list_for_each_entry_rcu(n, &tn->node_list, list) {
1291 tipc_node_link_down(n, bearer_id, true);
1296 static void tipc_node_reset_links(struct tipc_node *n)
1300 pr_warn("Resetting all links to %x\n", n->addr);
1302 trace_tipc_node_reset_links(n, true, " ");
1303 for (i = 0; i < MAX_BEARERS; i++) {
1304 tipc_node_link_down(n, i, false);
1308 /* tipc_node_fsm_evt - node finite state machine
1309 * Determines when contact is allowed with peer node
1311 static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
1313 int state = n->state;
1316 case SELF_DOWN_PEER_DOWN:
1318 case SELF_ESTABL_CONTACT_EVT:
1319 state = SELF_UP_PEER_COMING;
1321 case PEER_ESTABL_CONTACT_EVT:
1322 state = SELF_COMING_PEER_UP;
1324 case SELF_LOST_CONTACT_EVT:
1325 case PEER_LOST_CONTACT_EVT:
1327 case NODE_SYNCH_END_EVT:
1328 case NODE_SYNCH_BEGIN_EVT:
1329 case NODE_FAILOVER_BEGIN_EVT:
1330 case NODE_FAILOVER_END_EVT:
1335 case SELF_UP_PEER_UP:
1337 case SELF_LOST_CONTACT_EVT:
1338 state = SELF_DOWN_PEER_LEAVING;
1340 case PEER_LOST_CONTACT_EVT:
1341 state = SELF_LEAVING_PEER_DOWN;
1343 case NODE_SYNCH_BEGIN_EVT:
1344 state = NODE_SYNCHING;
1346 case NODE_FAILOVER_BEGIN_EVT:
1347 state = NODE_FAILINGOVER;
1349 case SELF_ESTABL_CONTACT_EVT:
1350 case PEER_ESTABL_CONTACT_EVT:
1351 case NODE_SYNCH_END_EVT:
1352 case NODE_FAILOVER_END_EVT:
1358 case SELF_DOWN_PEER_LEAVING:
1360 case PEER_LOST_CONTACT_EVT:
1361 state = SELF_DOWN_PEER_DOWN;
1363 case SELF_ESTABL_CONTACT_EVT:
1364 case PEER_ESTABL_CONTACT_EVT:
1365 case SELF_LOST_CONTACT_EVT:
1367 case NODE_SYNCH_END_EVT:
1368 case NODE_SYNCH_BEGIN_EVT:
1369 case NODE_FAILOVER_BEGIN_EVT:
1370 case NODE_FAILOVER_END_EVT:
1375 case SELF_UP_PEER_COMING:
1377 case PEER_ESTABL_CONTACT_EVT:
1378 state = SELF_UP_PEER_UP;
1380 case SELF_LOST_CONTACT_EVT:
1381 state = SELF_DOWN_PEER_DOWN;
1383 case SELF_ESTABL_CONTACT_EVT:
1384 case PEER_LOST_CONTACT_EVT:
1385 case NODE_SYNCH_END_EVT:
1386 case NODE_FAILOVER_BEGIN_EVT:
1388 case NODE_SYNCH_BEGIN_EVT:
1389 case NODE_FAILOVER_END_EVT:
1394 case SELF_COMING_PEER_UP:
1396 case SELF_ESTABL_CONTACT_EVT:
1397 state = SELF_UP_PEER_UP;
1399 case PEER_LOST_CONTACT_EVT:
1400 state = SELF_DOWN_PEER_DOWN;
1402 case SELF_LOST_CONTACT_EVT:
1403 case PEER_ESTABL_CONTACT_EVT:
1405 case NODE_SYNCH_END_EVT:
1406 case NODE_SYNCH_BEGIN_EVT:
1407 case NODE_FAILOVER_BEGIN_EVT:
1408 case NODE_FAILOVER_END_EVT:
1413 case SELF_LEAVING_PEER_DOWN:
1415 case SELF_LOST_CONTACT_EVT:
1416 state = SELF_DOWN_PEER_DOWN;
1418 case SELF_ESTABL_CONTACT_EVT:
1419 case PEER_ESTABL_CONTACT_EVT:
1420 case PEER_LOST_CONTACT_EVT:
1422 case NODE_SYNCH_END_EVT:
1423 case NODE_SYNCH_BEGIN_EVT:
1424 case NODE_FAILOVER_BEGIN_EVT:
1425 case NODE_FAILOVER_END_EVT:
1430 case NODE_FAILINGOVER:
1432 case SELF_LOST_CONTACT_EVT:
1433 state = SELF_DOWN_PEER_LEAVING;
1435 case PEER_LOST_CONTACT_EVT:
1436 state = SELF_LEAVING_PEER_DOWN;
1438 case NODE_FAILOVER_END_EVT:
1439 state = SELF_UP_PEER_UP;
1441 case NODE_FAILOVER_BEGIN_EVT:
1442 case SELF_ESTABL_CONTACT_EVT:
1443 case PEER_ESTABL_CONTACT_EVT:
1445 case NODE_SYNCH_BEGIN_EVT:
1446 case NODE_SYNCH_END_EVT:
1453 case SELF_LOST_CONTACT_EVT:
1454 state = SELF_DOWN_PEER_LEAVING;
1456 case PEER_LOST_CONTACT_EVT:
1457 state = SELF_LEAVING_PEER_DOWN;
1459 case NODE_SYNCH_END_EVT:
1460 state = SELF_UP_PEER_UP;
1462 case NODE_FAILOVER_BEGIN_EVT:
1463 state = NODE_FAILINGOVER;
1465 case NODE_SYNCH_BEGIN_EVT:
1466 case SELF_ESTABL_CONTACT_EVT:
1467 case PEER_ESTABL_CONTACT_EVT:
1469 case NODE_FAILOVER_END_EVT:
1475 pr_err("Unknown node fsm state %x\n", state);
1478 trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
1483 pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
1484 trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
1487 static void node_lost_contact(struct tipc_node *n,
1488 struct sk_buff_head *inputq)
1490 struct tipc_sock_conn *conn, *safe;
1491 struct tipc_link *l;
1492 struct list_head *conns = &n->conn_sks;
1493 struct sk_buff *skb;
1496 pr_debug("Lost contact with %x\n", n->addr);
1497 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
1498 trace_tipc_node_lost_contact(n, true, " ");
1500 /* Clean up broadcast state */
1501 tipc_bcast_remove_peer(n->net, n->bc_entry.link);
1502 skb_queue_purge(&n->bc_entry.namedq);
1504 /* Abort any ongoing link failover */
1505 for (i = 0; i < MAX_BEARERS; i++) {
1506 l = n->links[i].link;
1508 tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
1511 /* Notify publications from this node */
1512 n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
1514 n->peer_hash_mix = 0;
1515 /* Notify sockets connected to node */
1516 list_for_each_entry_safe(conn, safe, conns, list) {
1517 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
1518 SHORT_H_SIZE, 0, tipc_own_addr(n->net),
1519 conn->peer_node, conn->port,
1520 conn->peer_port, TIPC_ERR_NO_NODE);
1522 skb_queue_tail(inputq, skb);
1523 list_del(&conn->list);
1529 * tipc_node_get_linkname - get the name of a link
1531 * @bearer_id: id of the bearer
1532 * @addr: peer node address
1533 * @linkname: link name output buffer
1535 * Returns 0 on success
1537 int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
1538 char *linkname, size_t len)
1540 struct tipc_link *link;
1542 struct tipc_node *node = tipc_node_find(net, addr);
1547 if (bearer_id >= MAX_BEARERS)
1550 tipc_node_read_lock(node);
1551 link = node->links[bearer_id].link;
1553 strncpy(linkname, tipc_link_name(link), len);
1556 tipc_node_read_unlock(node);
1558 tipc_node_put(node);
1562 /* Caller should hold node lock for the passed node */
1563 static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
1566 struct nlattr *attrs;
1568 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1569 NLM_F_MULTI, TIPC_NL_NODE_GET);
1573 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NODE);
1577 if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
1579 if (node_is_up(node))
1580 if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
1583 nla_nest_end(msg->skb, attrs);
1584 genlmsg_end(msg->skb, hdr);
1589 nla_nest_cancel(msg->skb, attrs);
1591 genlmsg_cancel(msg->skb, hdr);
1596 static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
1598 struct tipc_msg *hdr = buf_msg(skb_peek(list));
1599 struct sk_buff_head inputq;
1601 switch (msg_user(hdr)) {
1602 case TIPC_LOW_IMPORTANCE:
1603 case TIPC_MEDIUM_IMPORTANCE:
1604 case TIPC_HIGH_IMPORTANCE:
1605 case TIPC_CRITICAL_IMPORTANCE:
1606 if (msg_connected(hdr) || msg_named(hdr) ||
1608 tipc_loopback_trace(peer_net, list);
1609 spin_lock_init(&list->lock);
1610 tipc_sk_rcv(peer_net, list);
1613 if (msg_mcast(hdr)) {
1614 tipc_loopback_trace(peer_net, list);
1615 skb_queue_head_init(&inputq);
1616 tipc_sk_mcast_rcv(peer_net, list, &inputq);
1617 __skb_queue_purge(list);
1618 skb_queue_purge(&inputq);
1622 case MSG_FRAGMENTER:
1623 if (tipc_msg_assemble(list)) {
1624 tipc_loopback_trace(peer_net, list);
1625 skb_queue_head_init(&inputq);
1626 tipc_sk_mcast_rcv(peer_net, list, &inputq);
1627 __skb_queue_purge(list);
1628 skb_queue_purge(&inputq);
1631 case GROUP_PROTOCOL:
1633 tipc_loopback_trace(peer_net, list);
1634 spin_lock_init(&list->lock);
1635 tipc_sk_rcv(peer_net, list);
1638 case NAME_DISTRIBUTOR:
1639 case TUNNEL_PROTOCOL:
1640 case BCAST_PROTOCOL:
1648 * tipc_node_xmit() is the general link level function for message sending
1649 * @net: the applicable net namespace
1650 * @list: chain of buffers containing message
1651 * @dnode: address of destination node
1652 * @selector: a number used for deterministic link selection
1653 * Consumes the buffer chain.
1654 * Returns 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF
1656 int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
1657 u32 dnode, int selector)
1659 struct tipc_link_entry *le = NULL;
1660 struct tipc_node *n;
1661 struct sk_buff_head xmitq;
1662 bool node_up = false;
1666 if (in_own_node(net, dnode)) {
1667 tipc_loopback_trace(net, list);
1668 spin_lock_init(&list->lock);
1669 tipc_sk_rcv(net, list);
1673 n = tipc_node_find(net, dnode);
1675 __skb_queue_purge(list);
1676 return -EHOSTUNREACH;
1679 tipc_node_read_lock(n);
1680 node_up = node_is_up(n);
1681 if (node_up && n->peer_net && check_net(n->peer_net)) {
1682 /* xmit inner linux container */
1683 tipc_lxc_xmit(n->peer_net, list);
1684 if (likely(skb_queue_empty(list))) {
1685 tipc_node_read_unlock(n);
1691 bearer_id = n->active_links[selector & 1];
1692 if (unlikely(bearer_id == INVALID_BEARER_ID)) {
1693 tipc_node_read_unlock(n);
1695 __skb_queue_purge(list);
1696 return -EHOSTUNREACH;
1699 __skb_queue_head_init(&xmitq);
1700 le = &n->links[bearer_id];
1701 spin_lock_bh(&le->lock);
1702 rc = tipc_link_xmit(le->link, list, &xmitq);
1703 spin_unlock_bh(&le->lock);
1704 tipc_node_read_unlock(n);
1706 if (unlikely(rc == -ENOBUFS))
1707 tipc_node_link_down(n, bearer_id, false);
1709 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
1716 /* tipc_node_xmit_skb(): send single buffer to destination
1717 * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
1718 * messages, which will not be rejected
1719 * The only exception is datagram messages rerouted after secondary
1720 * lookup, which are rare and safe to dispose of anyway.
1722 int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
1725 struct sk_buff_head head;
1727 __skb_queue_head_init(&head);
1728 __skb_queue_tail(&head, skb);
1729 tipc_node_xmit(net, &head, dnode, selector);
1733 /* tipc_node_distr_xmit(): send single buffer msgs to individual destinations
1734 * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected
1736 int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq)
1738 struct sk_buff *skb;
1739 u32 selector, dnode;
1741 while ((skb = __skb_dequeue(xmitq))) {
1742 selector = msg_origport(buf_msg(skb));
1743 dnode = msg_destnode(buf_msg(skb));
1744 tipc_node_xmit_skb(net, skb, dnode, selector);
1749 void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests)
1751 struct sk_buff_head xmitq;
1752 struct sk_buff *txskb;
1753 struct tipc_node *n;
1757 /* Use broadcast if all nodes support it */
1758 if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) {
1759 __skb_queue_head_init(&xmitq);
1760 __skb_queue_tail(&xmitq, skb);
1761 tipc_bcast_xmit(net, &xmitq, &dummy);
1765 /* Otherwise use legacy replicast method */
1767 list_for_each_entry_rcu(n, tipc_nodes(net), list) {
1769 if (in_own_node(net, dst))
1773 txskb = pskb_copy(skb, GFP_ATOMIC);
1776 msg_set_destnode(buf_msg(txskb), dst);
1777 tipc_node_xmit_skb(net, txskb, dst, 0);
1783 static void tipc_node_mcast_rcv(struct tipc_node *n)
1785 struct tipc_bclink_entry *be = &n->bc_entry;
1787 /* 'arrvq' is under inputq2's lock protection */
1788 spin_lock_bh(&be->inputq2.lock);
1789 spin_lock_bh(&be->inputq1.lock);
1790 skb_queue_splice_tail_init(&be->inputq1, &be->arrvq);
1791 spin_unlock_bh(&be->inputq1.lock);
1792 spin_unlock_bh(&be->inputq2.lock);
1793 tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2);
1796 static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr,
1797 int bearer_id, struct sk_buff_head *xmitq)
1799 struct tipc_link *ucl;
1802 rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr, xmitq);
1804 if (rc & TIPC_LINK_DOWN_EVT) {
1805 tipc_node_reset_links(n);
1809 if (!(rc & TIPC_LINK_SND_STATE))
1812 /* If probe message, a STATE response will be sent anyway */
1816 /* Produce a STATE message carrying broadcast NACK */
1817 tipc_node_read_lock(n);
1818 ucl = n->links[bearer_id].link;
1820 tipc_link_build_state_msg(ucl, xmitq);
1821 tipc_node_read_unlock(n);
1825 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node
1826 * @net: the applicable net namespace
1828 * @bearer_id: id of bearer message arrived on
1830 * Invoked with no locks held.
1832 static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id)
1835 struct sk_buff_head xmitq;
1836 struct tipc_bclink_entry *be;
1837 struct tipc_link_entry *le;
1838 struct tipc_msg *hdr = buf_msg(skb);
1839 int usr = msg_user(hdr);
1840 u32 dnode = msg_destnode(hdr);
1841 struct tipc_node *n;
1843 __skb_queue_head_init(&xmitq);
1845 /* If NACK for other node, let rcv link for that node peek into it */
1846 if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net)))
1847 n = tipc_node_find(net, dnode);
1849 n = tipc_node_find(net, msg_prevnode(hdr));
1855 le = &n->links[bearer_id];
1857 rc = tipc_bcast_rcv(net, be->link, skb);
1859 /* Broadcast ACKs are sent on a unicast link */
1860 if (rc & TIPC_LINK_SND_STATE) {
1861 tipc_node_read_lock(n);
1862 tipc_link_build_state_msg(le->link, &xmitq);
1863 tipc_node_read_unlock(n);
1866 if (!skb_queue_empty(&xmitq))
1867 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
1869 if (!skb_queue_empty(&be->inputq1))
1870 tipc_node_mcast_rcv(n);
1872 /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */
1873 if (!skb_queue_empty(&n->bc_entry.namedq))
1874 tipc_named_rcv(net, &n->bc_entry.namedq,
1875 &n->bc_entry.named_rcv_nxt,
1876 &n->bc_entry.named_open);
1878 /* If reassembly or retransmission failure => reset all links to peer */
1879 if (rc & TIPC_LINK_DOWN_EVT)
1880 tipc_node_reset_links(n);
1886 * tipc_node_check_state - check and if necessary update node state
1888 * @bearer_id: identity of bearer delivering the packet
1889 * Returns true if state and msg are ok, otherwise false
1891 static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1892 int bearer_id, struct sk_buff_head *xmitq)
1894 struct tipc_msg *hdr = buf_msg(skb);
1895 int usr = msg_user(hdr);
1896 int mtyp = msg_type(hdr);
1897 u16 oseqno = msg_seqno(hdr);
1898 u16 exp_pkts = msg_msgcnt(hdr);
1899 u16 rcv_nxt, syncpt, dlv_nxt, inputq_len;
1900 int state = n->state;
1901 struct tipc_link *l, *tnl, *pl = NULL;
1902 struct tipc_media_addr *maddr;
1905 if (trace_tipc_node_check_state_enabled()) {
1906 trace_tipc_skb_dump(skb, false, "skb for node state check");
1907 trace_tipc_node_check_state(n, true, " ");
1909 l = n->links[bearer_id].link;
1912 rcv_nxt = tipc_link_rcv_nxt(l);
1915 if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
1918 /* Find parallel link, if any */
1919 for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) {
1920 if ((pb_id != bearer_id) && n->links[pb_id].link) {
1921 pl = n->links[pb_id].link;
1926 if (!tipc_link_validate_msg(l, hdr)) {
1927 trace_tipc_skb_dump(skb, false, "PROTO invalid (2)!");
1928 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (2)!");
1932 /* Check and update node accesibility if applicable */
1933 if (state == SELF_UP_PEER_COMING) {
1934 if (!tipc_link_is_up(l))
1936 if (!msg_peer_link_is_up(hdr))
1938 tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
1941 if (state == SELF_DOWN_PEER_LEAVING) {
1942 if (msg_peer_node_is_up(hdr))
1944 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
1948 if (state == SELF_LEAVING_PEER_DOWN)
1951 /* Ignore duplicate packets */
1952 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
1955 /* Initiate or update failover mode if applicable */
1956 if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
1957 syncpt = oseqno + exp_pkts - 1;
1958 if (pl && !tipc_link_is_reset(pl)) {
1959 __tipc_node_link_down(n, &pb_id, xmitq, &maddr);
1960 trace_tipc_node_link_down(n, true,
1961 "node link down <- failover!");
1962 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
1963 tipc_link_inputq(l));
1966 /* If parallel link was already down, and this happened before
1967 * the tunnel link came up, node failover was never started.
1968 * Ensure that a FAILOVER_MSG is sent to get peer out of
1969 * NODE_FAILINGOVER state, also this node must accept
1970 * TUNNEL_MSGs from peer.
1972 if (n->state != NODE_FAILINGOVER)
1973 tipc_node_link_failover(n, pl, l, xmitq);
1975 /* If pkts arrive out of order, use lowest calculated syncpt */
1976 if (less(syncpt, n->sync_point))
1977 n->sync_point = syncpt;
1980 /* Open parallel link when tunnel link reaches synch point */
1981 if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) {
1982 if (!more(rcv_nxt, n->sync_point))
1984 tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
1986 tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT);
1990 /* No synching needed if only one link */
1991 if (!pl || !tipc_link_is_up(pl))
1994 /* Initiate synch mode if applicable */
1995 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
1996 if (n->capabilities & TIPC_TUNNEL_ENHANCED)
1997 syncpt = msg_syncpt(hdr);
1999 syncpt = msg_seqno(msg_inner_hdr(hdr)) + exp_pkts - 1;
2000 if (!tipc_link_is_up(l))
2001 __tipc_node_link_up(n, bearer_id, xmitq);
2002 if (n->state == SELF_UP_PEER_UP) {
2003 n->sync_point = syncpt;
2004 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
2005 tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
2009 /* Open tunnel link when parallel link reaches synch point */
2010 if (n->state == NODE_SYNCHING) {
2011 if (tipc_link_is_synching(l)) {
2017 inputq_len = skb_queue_len(tipc_link_inputq(pl));
2018 dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len;
2019 if (more(dlv_nxt, n->sync_point)) {
2020 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
2021 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
2026 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
2028 if (usr == LINK_PROTOCOL)
2036 * tipc_rcv - process TIPC packets/messages arriving from off-node
2037 * @net: the applicable net namespace
2039 * @b: pointer to bearer message arrived on
2041 * Invoked with no locks held. Bearer pointer must point to a valid bearer
2042 * structure (i.e. cannot be NULL), but bearer can be inactive.
2044 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
2046 struct sk_buff_head xmitq;
2047 struct tipc_link_entry *le;
2048 struct tipc_msg *hdr;
2049 struct tipc_node *n;
2050 int bearer_id = b->identity;
2051 u32 self = tipc_own_addr(net);
2054 #ifdef CONFIG_TIPC_CRYPTO
2055 struct tipc_ehdr *ehdr;
2057 /* Check if message must be decrypted first */
2058 if (TIPC_SKB_CB(skb)->decrypted || !tipc_ehdr_validate(skb))
2061 ehdr = (struct tipc_ehdr *)skb->data;
2062 if (likely(ehdr->user != LINK_CONFIG)) {
2063 n = tipc_node_find(net, ntohl(ehdr->addr));
2067 n = tipc_node_find_by_id(net, ehdr->id);
2069 tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b);
2075 /* Ensure message is well-formed before touching the header */
2076 if (unlikely(!tipc_msg_validate(&skb)))
2078 __skb_queue_head_init(&xmitq);
2080 usr = msg_user(hdr);
2081 bc_ack = msg_bcast_ack(hdr);
2083 /* Handle arrival of discovery or broadcast packet */
2084 if (unlikely(msg_non_seq(hdr))) {
2085 if (unlikely(usr == LINK_CONFIG))
2086 return tipc_disc_rcv(net, skb, b);
2088 return tipc_node_bc_rcv(net, skb, bearer_id);
2091 /* Discard unicast link messages destined for another node */
2092 if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self)))
2095 /* Locate neighboring node that sent packet */
2096 n = tipc_node_find(net, msg_prevnode(hdr));
2099 le = &n->links[bearer_id];
2101 /* Ensure broadcast reception is in synch with peer's send state */
2102 if (unlikely(usr == LINK_PROTOCOL)) {
2103 if (unlikely(skb_linearize(skb))) {
2108 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
2109 } else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) {
2110 tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr);
2113 /* Receive packet directly if conditions permit */
2114 tipc_node_read_lock(n);
2115 if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
2116 spin_lock_bh(&le->lock);
2118 rc = tipc_link_rcv(le->link, skb, &xmitq);
2121 spin_unlock_bh(&le->lock);
2123 tipc_node_read_unlock(n);
2125 /* Check/update node state before receiving */
2126 if (unlikely(skb)) {
2127 if (unlikely(skb_linearize(skb)))
2129 tipc_node_write_lock(n);
2130 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
2132 rc = tipc_link_rcv(le->link, skb, &xmitq);
2136 tipc_node_write_unlock(n);
2139 if (unlikely(rc & TIPC_LINK_UP_EVT))
2140 tipc_node_link_up(n, bearer_id, &xmitq);
2142 if (unlikely(rc & TIPC_LINK_DOWN_EVT))
2143 tipc_node_link_down(n, bearer_id, false);
2145 if (unlikely(!skb_queue_empty(&n->bc_entry.namedq)))
2146 tipc_named_rcv(net, &n->bc_entry.namedq,
2147 &n->bc_entry.named_rcv_nxt,
2148 &n->bc_entry.named_open);
2150 if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1)))
2151 tipc_node_mcast_rcv(n);
2153 if (!skb_queue_empty(&le->inputq))
2154 tipc_sk_rcv(net, &le->inputq);
2156 if (!skb_queue_empty(&xmitq))
2157 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
2165 void tipc_node_apply_property(struct net *net, struct tipc_bearer *b,
2168 struct tipc_net *tn = tipc_net(net);
2169 int bearer_id = b->identity;
2170 struct sk_buff_head xmitq;
2171 struct tipc_link_entry *e;
2172 struct tipc_node *n;
2174 __skb_queue_head_init(&xmitq);
2178 list_for_each_entry_rcu(n, &tn->node_list, list) {
2179 tipc_node_write_lock(n);
2180 e = &n->links[bearer_id];
2182 if (prop == TIPC_NLA_PROP_TOL)
2183 tipc_link_set_tolerance(e->link, b->tolerance,
2185 else if (prop == TIPC_NLA_PROP_MTU)
2186 tipc_link_set_mtu(e->link, b->mtu);
2188 /* Update MTU for node link entry */
2189 e->mtu = tipc_link_mss(e->link);
2192 tipc_node_write_unlock(n);
2193 tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL);
2199 int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
2201 struct net *net = sock_net(skb->sk);
2202 struct tipc_net *tn = net_generic(net, tipc_net_id);
2203 struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
2204 struct tipc_node *peer, *temp_node;
2208 /* We identify the peer by its net */
2209 if (!info->attrs[TIPC_NLA_NET])
2212 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX,
2213 info->attrs[TIPC_NLA_NET],
2214 tipc_nl_net_policy, info->extack);
2218 if (!attrs[TIPC_NLA_NET_ADDR])
2221 addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
2223 if (in_own_node(net, addr))
2226 spin_lock_bh(&tn->node_list_lock);
2227 peer = tipc_node_find(net, addr);
2229 spin_unlock_bh(&tn->node_list_lock);
2233 tipc_node_write_lock(peer);
2234 if (peer->state != SELF_DOWN_PEER_DOWN &&
2235 peer->state != SELF_DOWN_PEER_LEAVING) {
2236 tipc_node_write_unlock(peer);
2241 tipc_node_clear_links(peer);
2242 tipc_node_write_unlock(peer);
2243 tipc_node_delete(peer);
2245 /* Calculate cluster capabilities */
2246 tn->capabilities = TIPC_NODE_CAPABILITIES;
2247 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
2248 tn->capabilities &= temp_node->capabilities;
2250 tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
2253 tipc_node_put(peer);
2254 spin_unlock_bh(&tn->node_list_lock);
2259 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
2262 struct net *net = sock_net(skb->sk);
2263 struct tipc_net *tn = net_generic(net, tipc_net_id);
2264 int done = cb->args[0];
2265 int last_addr = cb->args[1];
2266 struct tipc_node *node;
2267 struct tipc_nl_msg msg;
2273 msg.portid = NETLINK_CB(cb->skb).portid;
2274 msg.seq = cb->nlh->nlmsg_seq;
2278 node = tipc_node_find(net, last_addr);
2281 /* We never set seq or call nl_dump_check_consistent()
2282 * this means that setting prev_seq here will cause the
2283 * consistence check to fail in the netlink callback
2284 * handler. Resulting in the NLMSG_DONE message having
2285 * the NLM_F_DUMP_INTR flag set if the node state
2286 * changed while we released the lock.
2291 tipc_node_put(node);
2294 list_for_each_entry_rcu(node, &tn->node_list, list) {
2295 if (node->preliminary)
2298 if (node->addr == last_addr)
2304 tipc_node_read_lock(node);
2305 err = __tipc_nl_add_node(&msg, node);
2307 last_addr = node->addr;
2308 tipc_node_read_unlock(node);
2312 tipc_node_read_unlock(node);
2317 cb->args[1] = last_addr;
2323 /* tipc_node_find_by_name - locate owner node of link by link's name
2324 * @net: the applicable net namespace
2325 * @name: pointer to link name string
2326 * @bearer_id: pointer to index in 'node->links' array where the link was found.
2328 * Returns pointer to node owning the link, or 0 if no matching link is found.
2330 static struct tipc_node *tipc_node_find_by_name(struct net *net,
2331 const char *link_name,
2332 unsigned int *bearer_id)
2334 struct tipc_net *tn = net_generic(net, tipc_net_id);
2335 struct tipc_link *l;
2336 struct tipc_node *n;
2337 struct tipc_node *found_node = NULL;
2342 list_for_each_entry_rcu(n, &tn->node_list, list) {
2343 tipc_node_read_lock(n);
2344 for (i = 0; i < MAX_BEARERS; i++) {
2345 l = n->links[i].link;
2346 if (l && !strcmp(tipc_link_name(l), link_name)) {
2352 tipc_node_read_unlock(n);
2361 int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
2367 struct tipc_link *link;
2368 struct tipc_node *node;
2369 struct sk_buff_head xmitq;
2370 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2371 struct net *net = sock_net(skb->sk);
2373 __skb_queue_head_init(&xmitq);
2375 if (!info->attrs[TIPC_NLA_LINK])
2378 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2379 info->attrs[TIPC_NLA_LINK],
2380 tipc_nl_link_policy, info->extack);
2384 if (!attrs[TIPC_NLA_LINK_NAME])
2387 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2389 if (strcmp(name, tipc_bclink_name) == 0)
2390 return tipc_nl_bc_link_set(net, attrs);
2392 node = tipc_node_find_by_name(net, name, &bearer_id);
2396 tipc_node_read_lock(node);
2398 link = node->links[bearer_id].link;
2404 if (attrs[TIPC_NLA_LINK_PROP]) {
2405 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
2407 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
2413 if (props[TIPC_NLA_PROP_TOL]) {
2416 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2417 tipc_link_set_tolerance(link, tol, &xmitq);
2419 if (props[TIPC_NLA_PROP_PRIO]) {
2422 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2423 tipc_link_set_prio(link, prio, &xmitq);
2425 if (props[TIPC_NLA_PROP_WIN]) {
2428 max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2429 tipc_link_set_queue_limits(link,
2430 tipc_link_min_win(link),
2436 tipc_node_read_unlock(node);
2437 tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr,
2442 int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
2444 struct net *net = genl_info_net(info);
2445 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2446 struct tipc_nl_msg msg;
2450 msg.portid = info->snd_portid;
2451 msg.seq = info->snd_seq;
2453 if (!info->attrs[TIPC_NLA_LINK])
2456 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2457 info->attrs[TIPC_NLA_LINK],
2458 tipc_nl_link_policy, info->extack);
2462 if (!attrs[TIPC_NLA_LINK_NAME])
2465 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2467 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2471 if (strcmp(name, tipc_bclink_name) == 0) {
2472 err = tipc_nl_add_bc_link(net, &msg, tipc_net(net)->bcl);
2477 struct tipc_node *node;
2478 struct tipc_link *link;
2480 node = tipc_node_find_by_name(net, name, &bearer_id);
2486 tipc_node_read_lock(node);
2487 link = node->links[bearer_id].link;
2489 tipc_node_read_unlock(node);
2494 err = __tipc_nl_add_link(net, &msg, link, 0);
2495 tipc_node_read_unlock(node);
2500 return genlmsg_reply(msg.skb, info);
2503 nlmsg_free(msg.skb);
2507 int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
2511 unsigned int bearer_id;
2512 struct tipc_link *link;
2513 struct tipc_node *node;
2514 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2515 struct net *net = sock_net(skb->sk);
2516 struct tipc_net *tn = tipc_net(net);
2517 struct tipc_link_entry *le;
2519 if (!info->attrs[TIPC_NLA_LINK])
2522 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2523 info->attrs[TIPC_NLA_LINK],
2524 tipc_nl_link_policy, info->extack);
2528 if (!attrs[TIPC_NLA_LINK_NAME])
2531 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2534 if (!strcmp(link_name, tipc_bclink_name)) {
2535 err = tipc_bclink_reset_stats(net, tipc_bc_sndlink(net));
2539 } else if (strstr(link_name, tipc_bclink_name)) {
2541 list_for_each_entry_rcu(node, &tn->node_list, list) {
2542 tipc_node_read_lock(node);
2543 link = node->bc_entry.link;
2544 if (link && !strcmp(link_name, tipc_link_name(link))) {
2545 err = tipc_bclink_reset_stats(net, link);
2546 tipc_node_read_unlock(node);
2549 tipc_node_read_unlock(node);
2555 node = tipc_node_find_by_name(net, link_name, &bearer_id);
2559 le = &node->links[bearer_id];
2560 tipc_node_read_lock(node);
2561 spin_lock_bh(&le->lock);
2562 link = node->links[bearer_id].link;
2564 spin_unlock_bh(&le->lock);
2565 tipc_node_read_unlock(node);
2568 tipc_link_reset_stats(link);
2569 spin_unlock_bh(&le->lock);
2570 tipc_node_read_unlock(node);
2574 /* Caller should hold node lock */
2575 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
2576 struct tipc_node *node, u32 *prev_link,
2582 for (i = *prev_link; i < MAX_BEARERS; i++) {
2585 if (!node->links[i].link)
2588 err = __tipc_nl_add_link(net, msg,
2589 node->links[i].link, NLM_F_MULTI);
2596 err = tipc_nl_add_bc_link(net, msg, node->bc_entry.link);
2606 int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
2608 struct net *net = sock_net(skb->sk);
2609 struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
2610 struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
2611 struct tipc_net *tn = net_generic(net, tipc_net_id);
2612 struct tipc_node *node;
2613 struct tipc_nl_msg msg;
2614 u32 prev_node = cb->args[0];
2615 u32 prev_link = cb->args[1];
2616 int done = cb->args[2];
2617 bool bc_link = cb->args[3];
2624 /* Check if broadcast-receiver links dumping is needed */
2625 if (attrs && attrs[TIPC_NLA_LINK]) {
2626 err = nla_parse_nested_deprecated(link,
2628 attrs[TIPC_NLA_LINK],
2629 tipc_nl_link_policy,
2633 if (unlikely(!link[TIPC_NLA_LINK_BROADCAST]))
2640 msg.portid = NETLINK_CB(cb->skb).portid;
2641 msg.seq = cb->nlh->nlmsg_seq;
2645 node = tipc_node_find(net, prev_node);
2647 /* We never set seq or call nl_dump_check_consistent()
2648 * this means that setting prev_seq here will cause the
2649 * consistence check to fail in the netlink callback
2650 * handler. Resulting in the last NLMSG_DONE message
2651 * having the NLM_F_DUMP_INTR flag set.
2656 tipc_node_put(node);
2658 list_for_each_entry_continue_rcu(node, &tn->node_list,
2660 tipc_node_read_lock(node);
2661 err = __tipc_nl_add_node_links(net, &msg, node,
2662 &prev_link, bc_link);
2663 tipc_node_read_unlock(node);
2667 prev_node = node->addr;
2670 err = tipc_nl_add_bc_link(net, &msg, tn->bcl);
2674 list_for_each_entry_rcu(node, &tn->node_list, list) {
2675 tipc_node_read_lock(node);
2676 err = __tipc_nl_add_node_links(net, &msg, node,
2677 &prev_link, bc_link);
2678 tipc_node_read_unlock(node);
2682 prev_node = node->addr;
2689 cb->args[0] = prev_node;
2690 cb->args[1] = prev_link;
2692 cb->args[3] = bc_link;
2697 int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info)
2699 struct nlattr *attrs[TIPC_NLA_MON_MAX + 1];
2700 struct net *net = sock_net(skb->sk);
2703 if (!info->attrs[TIPC_NLA_MON])
2706 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_MON_MAX,
2707 info->attrs[TIPC_NLA_MON],
2708 tipc_nl_monitor_policy,
2713 if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) {
2716 val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]);
2717 err = tipc_nl_monitor_set_threshold(net, val);
2725 static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg)
2727 struct nlattr *attrs;
2731 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2732 0, TIPC_NL_MON_GET);
2736 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON);
2740 val = tipc_nl_monitor_get_threshold(net);
2742 if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val))
2745 nla_nest_end(msg->skb, attrs);
2746 genlmsg_end(msg->skb, hdr);
2751 nla_nest_cancel(msg->skb, attrs);
2753 genlmsg_cancel(msg->skb, hdr);
2758 int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info)
2760 struct net *net = sock_net(skb->sk);
2761 struct tipc_nl_msg msg;
2764 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2767 msg.portid = info->snd_portid;
2768 msg.seq = info->snd_seq;
2770 err = __tipc_nl_add_monitor_prop(net, &msg);
2772 nlmsg_free(msg.skb);
2776 return genlmsg_reply(msg.skb, info);
2779 int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
2781 struct net *net = sock_net(skb->sk);
2782 u32 prev_bearer = cb->args[0];
2783 struct tipc_nl_msg msg;
2787 if (prev_bearer == MAX_BEARERS)
2791 msg.portid = NETLINK_CB(cb->skb).portid;
2792 msg.seq = cb->nlh->nlmsg_seq;
2795 for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
2796 err = __tipc_nl_add_monitor(net, &msg, bearer_id);
2801 cb->args[0] = bearer_id;
2806 int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
2807 struct netlink_callback *cb)
2809 struct net *net = sock_net(skb->sk);
2810 u32 prev_node = cb->args[1];
2811 u32 bearer_id = cb->args[2];
2812 int done = cb->args[0];
2813 struct tipc_nl_msg msg;
2817 struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
2818 struct nlattr *mon[TIPC_NLA_MON_MAX + 1];
2820 if (!attrs[TIPC_NLA_MON])
2823 err = nla_parse_nested_deprecated(mon, TIPC_NLA_MON_MAX,
2824 attrs[TIPC_NLA_MON],
2825 tipc_nl_monitor_policy,
2830 if (!mon[TIPC_NLA_MON_REF])
2833 bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]);
2835 if (bearer_id >= MAX_BEARERS)
2843 msg.portid = NETLINK_CB(cb->skb).portid;
2844 msg.seq = cb->nlh->nlmsg_seq;
2847 err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node);
2853 cb->args[1] = prev_node;
2854 cb->args[2] = bearer_id;
2859 #ifdef CONFIG_TIPC_CRYPTO
2860 static int tipc_nl_retrieve_key(struct nlattr **attrs,
2861 struct tipc_aead_key **pkey)
2863 struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY];
2864 struct tipc_aead_key *key;
2869 if (nla_len(attr) < sizeof(*key))
2871 key = (struct tipc_aead_key *)nla_data(attr);
2872 if (key->keylen > TIPC_AEAD_KEYLEN_MAX ||
2873 nla_len(attr) < tipc_aead_key_size(key))
2880 static int tipc_nl_retrieve_nodeid(struct nlattr **attrs, u8 **node_id)
2882 struct nlattr *attr = attrs[TIPC_NLA_NODE_ID];
2887 if (nla_len(attr) < TIPC_NODEID_LEN)
2890 *node_id = (u8 *)nla_data(attr);
2894 static int tipc_nl_retrieve_rekeying(struct nlattr **attrs, u32 *intv)
2896 struct nlattr *attr = attrs[TIPC_NLA_NODE_REKEYING];
2901 *intv = nla_get_u32(attr);
2905 static int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
2907 struct nlattr *attrs[TIPC_NLA_NODE_MAX + 1];
2908 struct net *net = sock_net(skb->sk);
2909 struct tipc_crypto *tx = tipc_net(net)->crypto_tx, *c = tx;
2910 struct tipc_node *n = NULL;
2911 struct tipc_aead_key *ukey;
2912 bool rekeying = true, master_key = false;
2913 u8 *id, *own_id, mode;
2917 if (!info->attrs[TIPC_NLA_NODE])
2920 rc = nla_parse_nested(attrs, TIPC_NLA_NODE_MAX,
2921 info->attrs[TIPC_NLA_NODE],
2922 tipc_nl_node_policy, info->extack);
2926 own_id = tipc_own_id(net);
2928 GENL_SET_ERR_MSG(info, "not found own node identity (set id?)");
2932 rc = tipc_nl_retrieve_rekeying(attrs, &intv);
2936 rc = tipc_nl_retrieve_key(attrs, &ukey);
2937 if (rc == -ENODATA && rekeying)
2942 rc = tipc_aead_key_validate(ukey, info);
2946 rc = tipc_nl_retrieve_nodeid(attrs, &id);
2950 master_key = !!(attrs[TIPC_NLA_NODE_KEY_MASTER]);
2953 mode = PER_NODE_KEY;
2954 if (memcmp(id, own_id, NODE_ID_LEN)) {
2955 n = tipc_node_find_by_id(net, id) ?:
2956 tipc_node_create(net, 0, id, 0xffffu, 0, true);
2966 /* Initiate the TX/RX key */
2967 rc = tipc_crypto_key_init(c, ukey, mode, master_key);
2971 if (unlikely(rc < 0)) {
2972 GENL_SET_ERR_MSG(info, "unable to initiate or attach new key");
2974 } else if (c == tx) {
2975 /* Distribute TX key but not master one */
2976 if (!master_key && tipc_crypto_key_distr(tx, rc, NULL))
2977 GENL_SET_ERR_MSG(info, "failed to replicate new key");
2979 /* Schedule TX rekeying if needed */
2980 tipc_crypto_rekeying_sched(tx, rekeying, intv);
2986 int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
2991 err = __tipc_nl_node_set_key(skb, info);
2997 static int __tipc_nl_node_flush_key(struct sk_buff *skb,
2998 struct genl_info *info)
3000 struct net *net = sock_net(skb->sk);
3001 struct tipc_net *tn = tipc_net(net);
3002 struct tipc_node *n;
3004 tipc_crypto_key_flush(tn->crypto_tx);
3006 list_for_each_entry_rcu(n, &tn->node_list, list)
3007 tipc_crypto_key_flush(n->crypto_rx);
3013 int tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info)
3018 err = __tipc_nl_node_flush_key(skb, info);
3026 * tipc_node_dump - dump TIPC node data
3027 * @n: tipc node to be dumped
3029 * - false: dump only tipc node data
3030 * - true: dump node link data as well
3031 * @buf: returned buffer of dump data in format
3033 int tipc_node_dump(struct tipc_node *n, bool more, char *buf)
3036 size_t sz = (more) ? NODE_LMAX : NODE_LMIN;
3039 i += scnprintf(buf, sz, "node data: (null)\n");
3043 i += scnprintf(buf, sz, "node data: %x", n->addr);
3044 i += scnprintf(buf + i, sz - i, " %x", n->state);
3045 i += scnprintf(buf + i, sz - i, " %d", n->active_links[0]);
3046 i += scnprintf(buf + i, sz - i, " %d", n->active_links[1]);
3047 i += scnprintf(buf + i, sz - i, " %x", n->action_flags);
3048 i += scnprintf(buf + i, sz - i, " %u", n->failover_sent);
3049 i += scnprintf(buf + i, sz - i, " %u", n->sync_point);
3050 i += scnprintf(buf + i, sz - i, " %d", n->link_cnt);
3051 i += scnprintf(buf + i, sz - i, " %u", n->working_links);
3052 i += scnprintf(buf + i, sz - i, " %x", n->capabilities);
3053 i += scnprintf(buf + i, sz - i, " %lu\n", n->keepalive_intv);
3058 i += scnprintf(buf + i, sz - i, "link_entry[0]:\n");
3059 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[0].mtu);
3060 i += scnprintf(buf + i, sz - i, " media: ");
3061 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[0].maddr);
3062 i += scnprintf(buf + i, sz - i, "\n");
3063 i += tipc_link_dump(n->links[0].link, TIPC_DUMP_NONE, buf + i);
3064 i += scnprintf(buf + i, sz - i, " inputq: ");
3065 i += tipc_list_dump(&n->links[0].inputq, false, buf + i);
3067 i += scnprintf(buf + i, sz - i, "link_entry[1]:\n");
3068 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[1].mtu);
3069 i += scnprintf(buf + i, sz - i, " media: ");
3070 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[1].maddr);
3071 i += scnprintf(buf + i, sz - i, "\n");
3072 i += tipc_link_dump(n->links[1].link, TIPC_DUMP_NONE, buf + i);
3073 i += scnprintf(buf + i, sz - i, " inputq: ");
3074 i += tipc_list_dump(&n->links[1].inputq, false, buf + i);
3076 i += scnprintf(buf + i, sz - i, "bclink:\n ");
3077 i += tipc_link_dump(n->bc_entry.link, TIPC_DUMP_NONE, buf + i);
3082 void tipc_node_pre_cleanup_net(struct net *exit_net)
3084 struct tipc_node *n;
3085 struct tipc_net *tn;
3089 for_each_net_rcu(tmp) {
3090 if (tmp == exit_net)
3095 spin_lock_bh(&tn->node_list_lock);
3096 list_for_each_entry_rcu(n, &tn->node_list, list) {
3099 if (n->peer_net != exit_net)
3101 tipc_node_write_lock(n);
3103 n->peer_hash_mix = 0;
3104 tipc_node_write_unlock_fast(n);
3107 spin_unlock_bh(&tn->node_list_lock);