2 * net/tipc/node.c: TIPC node management routines
4 * Copyright (c) 2000-2006, 2012-2016, Ericsson AB
5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
40 #include "name_distr.h"
47 #define INVALID_NODE_SIG 0x10000
48 #define NODE_CLEANUP_AFTER 300000
50 /* Flags used to take different actions according to flag type
51 * TIPC_NOTIFY_NODE_DOWN: notify node is down
52 * TIPC_NOTIFY_NODE_UP: notify node is up
53 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
56 TIPC_NOTIFY_NODE_DOWN = (1 << 3),
57 TIPC_NOTIFY_NODE_UP = (1 << 4),
58 TIPC_NOTIFY_LINK_UP = (1 << 6),
59 TIPC_NOTIFY_LINK_DOWN = (1 << 7)
62 struct tipc_link_entry {
63 struct tipc_link *link;
64 spinlock_t lock; /* per link */
66 struct sk_buff_head inputq;
67 struct tipc_media_addr maddr;
70 struct tipc_bclink_entry {
71 struct tipc_link *link;
72 struct sk_buff_head inputq1;
73 struct sk_buff_head arrvq;
74 struct sk_buff_head inputq2;
75 struct sk_buff_head namedq;
79 * struct tipc_node - TIPC node structure
80 * @addr: network address of node
81 * @ref: reference counter to node object
82 * @lock: rwlock governing access to structure
83 * @net: the applicable net namespace
84 * @hash: links to adjacent nodes in unsorted hash chain
85 * @inputq: pointer to input queue containing messages for msg event
86 * @namedq: pointer to name table input queue with name table messages
87 * @active_links: bearer ids of active links, used as index into links[] array
88 * @links: array containing references to all links to node
89 * @action_flags: bit mask of different types of node actions
90 * @state: connectivity state vs peer node
91 * @sync_point: sequence number where synch/failover is finished
92 * @list: links to adjacent nodes in sorted list of cluster's nodes
93 * @working_links: number of working links to node (both active and standby)
94 * @link_cnt: number of links to node
95 * @capabilities: bitmap, indicating peer node's functional capabilities
96 * @signature: node instance identifier
97 * @link_id: local and remote bearer ids of changing link, if any
98 * @publ_list: list of publications
99 * @rcu: rcu struct for tipc_node
100 * @delete_at: indicates the time for deleting a down node
107 struct hlist_node hash;
109 struct tipc_link_entry links[MAX_BEARERS];
110 struct tipc_bclink_entry bc_entry;
112 struct list_head list;
122 struct list_head publ_list;
123 struct list_head conn_sks;
124 unsigned long keepalive_intv;
125 struct timer_list timer;
127 unsigned long delete_at;
130 /* Node FSM states and events:
133 SELF_DOWN_PEER_DOWN = 0xdd,
134 SELF_UP_PEER_UP = 0xaa,
135 SELF_DOWN_PEER_LEAVING = 0xd1,
136 SELF_UP_PEER_COMING = 0xac,
137 SELF_COMING_PEER_UP = 0xca,
138 SELF_LEAVING_PEER_DOWN = 0x1d,
139 NODE_FAILINGOVER = 0xf0,
144 SELF_ESTABL_CONTACT_EVT = 0xece,
145 SELF_LOST_CONTACT_EVT = 0x1ce,
146 PEER_ESTABL_CONTACT_EVT = 0x9ece,
147 PEER_LOST_CONTACT_EVT = 0x91ce,
148 NODE_FAILOVER_BEGIN_EVT = 0xfbe,
149 NODE_FAILOVER_END_EVT = 0xfee,
150 NODE_SYNCH_BEGIN_EVT = 0xcbe,
151 NODE_SYNCH_END_EVT = 0xcee
154 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
155 struct sk_buff_head *xmitq,
156 struct tipc_media_addr **maddr);
157 static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
159 static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
160 static void tipc_node_delete(struct tipc_node *node);
161 static void tipc_node_timeout(struct timer_list *t);
162 static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
163 static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
164 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id);
165 static void tipc_node_put(struct tipc_node *node);
166 static bool node_is_up(struct tipc_node *n);
167 static void tipc_node_delete_from_list(struct tipc_node *node);
169 struct tipc_sock_conn {
173 struct list_head list;
176 static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
178 int bearer_id = n->active_links[sel & 1];
180 if (unlikely(bearer_id == INVALID_BEARER_ID))
183 return n->links[bearer_id].link;
186 int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
190 unsigned int mtu = MAX_MSG_SIZE;
192 n = tipc_node_find(net, addr);
196 bearer_id = n->active_links[sel & 1];
197 if (likely(bearer_id != INVALID_BEARER_ID))
198 mtu = n->links[bearer_id].mtu;
203 bool tipc_node_get_id(struct net *net, u32 addr, u8 *id)
205 u8 *own_id = tipc_own_id(net);
211 if (addr == tipc_own_addr(net)) {
212 memcpy(id, own_id, TIPC_NODEID_LEN);
215 n = tipc_node_find(net, addr);
219 memcpy(id, &n->peer_id, TIPC_NODEID_LEN);
224 u16 tipc_node_get_capabilities(struct net *net, u32 addr)
229 n = tipc_node_find(net, addr);
231 return TIPC_NODE_CAPABILITIES;
232 caps = n->capabilities;
237 static void tipc_node_kref_release(struct kref *kref)
239 struct tipc_node *n = container_of(kref, struct tipc_node, kref);
241 kfree(n->bc_entry.link);
245 static void tipc_node_put(struct tipc_node *node)
247 kref_put(&node->kref, tipc_node_kref_release);
250 static void tipc_node_get(struct tipc_node *node)
252 kref_get(&node->kref);
256 * tipc_node_find - locate specified node object, if it exists
258 static struct tipc_node *tipc_node_find(struct net *net, u32 addr)
260 struct tipc_net *tn = tipc_net(net);
261 struct tipc_node *node;
262 unsigned int thash = tipc_hashfn(addr);
265 hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) {
266 if (node->addr != addr)
268 if (!kref_get_unless_zero(&node->kref))
276 /* tipc_node_find_by_id - locate specified node object by its 128-bit id
277 * Note: this function is called only when a discovery request failed
278 * to find the node by its 32-bit id, and is not time critical
280 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id)
282 struct tipc_net *tn = tipc_net(net);
287 list_for_each_entry_rcu(n, &tn->node_list, list) {
288 read_lock_bh(&n->lock);
289 if (!memcmp(id, n->peer_id, 16) &&
290 kref_get_unless_zero(&n->kref))
292 read_unlock_bh(&n->lock);
297 return found ? n : NULL;
300 static void tipc_node_read_lock(struct tipc_node *n)
302 read_lock_bh(&n->lock);
305 static void tipc_node_read_unlock(struct tipc_node *n)
307 read_unlock_bh(&n->lock);
310 static void tipc_node_write_lock(struct tipc_node *n)
312 write_lock_bh(&n->lock);
315 static void tipc_node_write_unlock_fast(struct tipc_node *n)
317 write_unlock_bh(&n->lock);
320 static void tipc_node_write_unlock(struct tipc_node *n)
322 struct net *net = n->net;
324 u32 flags = n->action_flags;
327 struct list_head *publ_list;
329 if (likely(!flags)) {
330 write_unlock_bh(&n->lock);
335 link_id = n->link_id;
336 bearer_id = link_id & 0xffff;
337 publ_list = &n->publ_list;
339 n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
340 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
342 write_unlock_bh(&n->lock);
344 if (flags & TIPC_NOTIFY_NODE_DOWN)
345 tipc_publ_notify(net, publ_list, addr);
347 if (flags & TIPC_NOTIFY_NODE_UP)
348 tipc_named_node_up(net, addr);
350 if (flags & TIPC_NOTIFY_LINK_UP) {
351 tipc_mon_peer_up(net, addr, bearer_id);
352 tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
353 TIPC_NODE_SCOPE, link_id, link_id);
355 if (flags & TIPC_NOTIFY_LINK_DOWN) {
356 tipc_mon_peer_down(net, addr, bearer_id);
357 tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
362 static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
363 u8 *peer_id, u16 capabilities)
365 struct tipc_net *tn = net_generic(net, tipc_net_id);
366 struct tipc_node *n, *temp_node;
371 spin_lock_bh(&tn->node_list_lock);
372 n = tipc_node_find(net, addr);
374 if (n->capabilities == capabilities)
376 /* Same node may come back with new capabilities */
377 write_lock_bh(&n->lock);
378 n->capabilities = capabilities;
379 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
380 l = n->links[bearer_id].link;
382 tipc_link_update_caps(l, capabilities);
384 write_unlock_bh(&n->lock);
387 n = kzalloc(sizeof(*n), GFP_ATOMIC);
389 pr_warn("Node creation failed, no memory\n");
393 memcpy(&n->peer_id, peer_id, 16);
395 n->capabilities = capabilities;
397 rwlock_init(&n->lock);
398 INIT_HLIST_NODE(&n->hash);
399 INIT_LIST_HEAD(&n->list);
400 INIT_LIST_HEAD(&n->publ_list);
401 INIT_LIST_HEAD(&n->conn_sks);
402 skb_queue_head_init(&n->bc_entry.namedq);
403 skb_queue_head_init(&n->bc_entry.inputq1);
404 __skb_queue_head_init(&n->bc_entry.arrvq);
405 skb_queue_head_init(&n->bc_entry.inputq2);
406 for (i = 0; i < MAX_BEARERS; i++)
407 spin_lock_init(&n->links[i].lock);
408 n->state = SELF_DOWN_PEER_LEAVING;
409 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
410 n->signature = INVALID_NODE_SIG;
411 n->active_links[0] = INVALID_BEARER_ID;
412 n->active_links[1] = INVALID_BEARER_ID;
413 if (!tipc_link_bc_create(net, tipc_own_addr(net),
415 tipc_link_window(tipc_bc_sndlink(net)),
417 &n->bc_entry.inputq1,
419 tipc_bc_sndlink(net),
420 &n->bc_entry.link)) {
421 pr_warn("Broadcast rcv link creation failed, no memory\n");
427 timer_setup(&n->timer, tipc_node_timeout, 0);
428 n->keepalive_intv = U32_MAX;
429 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
430 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
431 if (n->addr < temp_node->addr)
434 list_add_tail_rcu(&n->list, &temp_node->list);
436 spin_unlock_bh(&tn->node_list_lock);
440 static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
442 unsigned long tol = tipc_link_tolerance(l);
443 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
445 /* Link with lowest tolerance determines timer interval */
446 if (intv < n->keepalive_intv)
447 n->keepalive_intv = intv;
449 /* Ensure link's abort limit corresponds to current tolerance */
450 tipc_link_set_abort_limit(l, tol / n->keepalive_intv);
453 static void tipc_node_delete_from_list(struct tipc_node *node)
455 list_del_rcu(&node->list);
456 hlist_del_rcu(&node->hash);
460 static void tipc_node_delete(struct tipc_node *node)
462 tipc_node_delete_from_list(node);
464 del_timer_sync(&node->timer);
468 void tipc_node_stop(struct net *net)
470 struct tipc_net *tn = tipc_net(net);
471 struct tipc_node *node, *t_node;
473 spin_lock_bh(&tn->node_list_lock);
474 list_for_each_entry_safe(node, t_node, &tn->node_list, list)
475 tipc_node_delete(node);
476 spin_unlock_bh(&tn->node_list_lock);
479 void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
483 if (in_own_node(net, addr))
486 n = tipc_node_find(net, addr);
488 pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr);
491 tipc_node_write_lock(n);
492 list_add_tail(subscr, &n->publ_list);
493 tipc_node_write_unlock_fast(n);
497 void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
501 if (in_own_node(net, addr))
504 n = tipc_node_find(net, addr);
506 pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr);
509 tipc_node_write_lock(n);
510 list_del_init(subscr);
511 tipc_node_write_unlock_fast(n);
515 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
517 struct tipc_node *node;
518 struct tipc_sock_conn *conn;
521 if (in_own_node(net, dnode))
524 node = tipc_node_find(net, dnode);
526 pr_warn("Connecting sock to node 0x%x failed\n", dnode);
527 return -EHOSTUNREACH;
529 conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
534 conn->peer_node = dnode;
536 conn->peer_port = peer_port;
538 tipc_node_write_lock(node);
539 list_add_tail(&conn->list, &node->conn_sks);
540 tipc_node_write_unlock(node);
546 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
548 struct tipc_node *node;
549 struct tipc_sock_conn *conn, *safe;
551 if (in_own_node(net, dnode))
554 node = tipc_node_find(net, dnode);
558 tipc_node_write_lock(node);
559 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
560 if (port != conn->port)
562 list_del(&conn->list);
565 tipc_node_write_unlock(node);
569 static void tipc_node_clear_links(struct tipc_node *node)
573 for (i = 0; i < MAX_BEARERS; i++) {
574 struct tipc_link_entry *le = &node->links[i];
584 /* tipc_node_cleanup - delete nodes that does not
585 * have active links for NODE_CLEANUP_AFTER time
587 static bool tipc_node_cleanup(struct tipc_node *peer)
589 struct tipc_net *tn = tipc_net(peer->net);
590 bool deleted = false;
592 /* If lock held by tipc_node_stop() the node will be deleted anyway */
593 if (!spin_trylock_bh(&tn->node_list_lock))
596 tipc_node_write_lock(peer);
598 if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
599 tipc_node_clear_links(peer);
600 tipc_node_delete_from_list(peer);
603 tipc_node_write_unlock(peer);
604 spin_unlock_bh(&tn->node_list_lock);
608 /* tipc_node_timeout - handle expiration of node timer
610 static void tipc_node_timeout(struct timer_list *t)
612 struct tipc_node *n = from_timer(n, t, timer);
613 struct tipc_link_entry *le;
614 struct sk_buff_head xmitq;
615 int remains = n->link_cnt;
619 if (!node_is_up(n) && tipc_node_cleanup(n)) {
620 /*Removing the reference of Timer*/
625 __skb_queue_head_init(&xmitq);
627 /* Initial node interval to value larger (10 seconds), then it will be
628 * recalculated with link lowest tolerance
630 tipc_node_read_lock(n);
631 n->keepalive_intv = 10000;
632 tipc_node_read_unlock(n);
633 for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
634 tipc_node_read_lock(n);
635 le = &n->links[bearer_id];
637 spin_lock_bh(&le->lock);
638 /* Link tolerance may change asynchronously: */
639 tipc_node_calculate_timer(n, le->link);
640 rc = tipc_link_timeout(le->link, &xmitq);
641 spin_unlock_bh(&le->lock);
644 tipc_node_read_unlock(n);
645 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr);
646 if (rc & TIPC_LINK_DOWN_EVT)
647 tipc_node_link_down(n, bearer_id, false);
649 mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv));
653 * __tipc_node_link_up - handle addition of link
654 * Node lock must be held by caller
655 * Link becomes active (alone or shared) or standby, depending on its priority.
657 static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
658 struct sk_buff_head *xmitq)
660 int *slot0 = &n->active_links[0];
661 int *slot1 = &n->active_links[1];
662 struct tipc_link *ol = node_active_link(n, 0);
663 struct tipc_link *nl = n->links[bearer_id].link;
665 if (!nl || tipc_link_is_up(nl))
668 tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT);
669 if (!tipc_link_is_up(nl))
673 n->action_flags |= TIPC_NOTIFY_LINK_UP;
674 n->link_id = tipc_link_id(nl);
676 /* Leave room for tunnel header when returning 'mtu' to users: */
677 n->links[bearer_id].mtu = tipc_link_mtu(nl) - INT_H_SIZE;
679 tipc_bearer_add_dest(n->net, bearer_id, n->addr);
680 tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
682 pr_debug("Established link <%s> on network plane %c\n",
683 tipc_link_name(nl), tipc_link_plane(nl));
685 /* Ensure that a STATE message goes first */
686 tipc_link_build_state_msg(nl, xmitq);
688 /* First link? => give it both slots */
692 tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
693 n->failover_sent = false;
694 n->action_flags |= TIPC_NOTIFY_NODE_UP;
695 tipc_link_set_active(nl, true);
696 tipc_bcast_add_peer(n->net, nl, xmitq);
700 /* Second link => redistribute slots */
701 if (tipc_link_prio(nl) > tipc_link_prio(ol)) {
702 pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol));
705 tipc_link_set_active(nl, true);
706 tipc_link_set_active(ol, false);
707 } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) {
708 tipc_link_set_active(nl, true);
711 pr_debug("New link <%s> is standby\n", tipc_link_name(nl));
714 /* Prepare synchronization with first link */
715 tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
719 * tipc_node_link_up - handle addition of link
721 * Link becomes active (alone or shared) or standby, depending on its priority.
723 static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
724 struct sk_buff_head *xmitq)
726 struct tipc_media_addr *maddr;
728 tipc_node_write_lock(n);
729 __tipc_node_link_up(n, bearer_id, xmitq);
730 maddr = &n->links[bearer_id].maddr;
731 tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr);
732 tipc_node_write_unlock(n);
736 * __tipc_node_link_down - handle loss of link
738 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
739 struct sk_buff_head *xmitq,
740 struct tipc_media_addr **maddr)
742 struct tipc_link_entry *le = &n->links[*bearer_id];
743 int *slot0 = &n->active_links[0];
744 int *slot1 = &n->active_links[1];
745 int i, highest = 0, prio;
746 struct tipc_link *l, *_l, *tnl;
748 l = n->links[*bearer_id].link;
749 if (!l || tipc_link_is_reset(l))
753 n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
754 n->link_id = tipc_link_id(l);
756 tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
758 pr_debug("Lost link <%s> on network plane %c\n",
759 tipc_link_name(l), tipc_link_plane(l));
761 /* Select new active link if any available */
762 *slot0 = INVALID_BEARER_ID;
763 *slot1 = INVALID_BEARER_ID;
764 for (i = 0; i < MAX_BEARERS; i++) {
765 _l = n->links[i].link;
766 if (!_l || !tipc_link_is_up(_l))
770 prio = tipc_link_prio(_l);
773 if (prio > highest) {
782 if (!node_is_up(n)) {
783 if (tipc_link_peer_is_down(l))
784 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
785 tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT);
786 tipc_link_fsm_evt(l, LINK_RESET_EVT);
788 tipc_link_build_reset_msg(l, xmitq);
789 *maddr = &n->links[*bearer_id].maddr;
790 node_lost_contact(n, &le->inputq);
791 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
794 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
796 /* There is still a working link => initiate failover */
797 *bearer_id = n->active_links[0];
798 tnl = n->links[*bearer_id].link;
799 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
800 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
801 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
802 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
804 tipc_link_fsm_evt(l, LINK_RESET_EVT);
805 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
806 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
807 *maddr = &n->links[*bearer_id].maddr;
810 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
812 struct tipc_link_entry *le = &n->links[bearer_id];
813 struct tipc_media_addr *maddr = NULL;
814 struct tipc_link *l = le->link;
815 int old_bearer_id = bearer_id;
816 struct sk_buff_head xmitq;
821 __skb_queue_head_init(&xmitq);
823 tipc_node_write_lock(n);
824 if (!tipc_link_is_establishing(l)) {
825 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
832 /* Defuse pending tipc_node_link_up() */
833 tipc_link_fsm_evt(l, LINK_RESET_EVT);
835 tipc_node_write_unlock(n);
837 tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
838 if (!skb_queue_empty(&xmitq))
839 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
840 tipc_sk_rcv(n->net, &le->inputq);
843 static bool node_is_up(struct tipc_node *n)
845 return n->active_links[0] != INVALID_BEARER_ID;
848 bool tipc_node_is_up(struct net *net, u32 addr)
853 if (in_own_node(net, addr))
856 n = tipc_node_find(net, addr);
859 retval = node_is_up(n);
864 static u32 tipc_node_suggest_addr(struct net *net, u32 addr)
868 addr ^= tipc_net(net)->random;
869 while ((n = tipc_node_find(net, addr))) {
876 /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not
877 * Returns suggested address if any, otherwise 0
879 u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
881 struct tipc_net *tn = tipc_net(net);
884 /* Suggest new address if some other peer is using this one */
885 n = tipc_node_find(net, addr);
887 if (!memcmp(n->peer_id, id, NODE_ID_LEN))
892 return tipc_node_suggest_addr(net, addr);
895 /* Suggest previously used address if peer is known */
896 n = tipc_node_find_by_id(net, id);
903 /* Even this node may be in conflict */
904 if (tn->trial_addr == addr)
905 return tipc_node_suggest_addr(net, addr);
910 void tipc_node_check_dest(struct net *net, u32 addr,
911 u8 *peer_id, struct tipc_bearer *b,
912 u16 capabilities, u32 signature,
913 struct tipc_media_addr *maddr,
914 bool *respond, bool *dupl_addr)
918 struct tipc_link_entry *le;
919 bool addr_match = false;
920 bool sign_match = false;
921 bool link_up = false;
922 bool accept_addr = false;
931 n = tipc_node_create(net, addr, peer_id, capabilities);
935 tipc_node_write_lock(n);
937 le = &n->links[b->identity];
939 /* Prepare to validate requesting node's signature and media address */
941 link_up = l && tipc_link_is_up(l);
942 addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
943 sign_match = (signature == n->signature);
945 /* These three flags give us eight permutations: */
947 if (sign_match && addr_match && link_up) {
948 /* All is fine. Do nothing. */
950 } else if (sign_match && addr_match && !link_up) {
951 /* Respond. The link will come up in due time */
953 } else if (sign_match && !addr_match && link_up) {
954 /* Peer has changed i/f address without rebooting.
955 * If so, the link will reset soon, and the next
956 * discovery will be accepted. So we can ignore it.
957 * It may also be an cloned or malicious peer having
958 * chosen the same node address and signature as an
960 * Ignore requests until the link goes down, if ever.
963 } else if (sign_match && !addr_match && !link_up) {
964 /* Peer link has changed i/f address without rebooting.
965 * It may also be a cloned or malicious peer; we can't
966 * distinguish between the two.
967 * The signature is correct, so we must accept.
971 } else if (!sign_match && addr_match && link_up) {
972 /* Peer node rebooted. Two possibilities:
973 * - Delayed re-discovery; this link endpoint has already
974 * reset and re-established contact with the peer, before
975 * receiving a discovery message from that node.
976 * (The peer happened to receive one from this node first).
977 * - The peer came back so fast that our side has not
978 * discovered it yet. Probing from this side will soon
979 * reset the link, since there can be no working link
980 * endpoint at the peer end, and the link will re-establish.
981 * Accept the signature, since it comes from a known peer.
983 n->signature = signature;
984 } else if (!sign_match && addr_match && !link_up) {
985 /* The peer node has rebooted.
986 * Accept signature, since it is a known peer.
988 n->signature = signature;
990 } else if (!sign_match && !addr_match && link_up) {
991 /* Peer rebooted with new address, or a new/duplicate peer.
992 * Ignore until the link goes down, if ever.
995 } else if (!sign_match && !addr_match && !link_up) {
996 /* Peer rebooted with new address, or it is a new peer.
997 * Accept signature and address.
999 n->signature = signature;
1007 /* Now create new link if not already existing */
1009 if (n->link_cnt == 2)
1012 if_name = strchr(b->name, ':') + 1;
1013 get_random_bytes(&session, sizeof(u16));
1014 if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
1015 b->net_plane, b->mtu, b->priority,
1017 tipc_own_addr(net), addr, peer_id,
1019 tipc_bc_sndlink(n->net), n->bc_entry.link,
1021 &n->bc_entry.namedq, &l)) {
1026 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1027 if (n->state == NODE_FAILINGOVER)
1028 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
1031 tipc_node_calculate_timer(n, l);
1032 if (n->link_cnt == 1) {
1033 intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
1034 if (!mod_timer(&n->timer, intv))
1038 memcpy(&le->maddr, maddr, sizeof(*maddr));
1040 tipc_node_write_unlock(n);
1041 if (reset && l && !tipc_link_is_reset(l))
1042 tipc_node_link_down(n, b->identity, false);
1046 void tipc_node_delete_links(struct net *net, int bearer_id)
1048 struct tipc_net *tn = net_generic(net, tipc_net_id);
1049 struct tipc_node *n;
1052 list_for_each_entry_rcu(n, &tn->node_list, list) {
1053 tipc_node_link_down(n, bearer_id, true);
1058 static void tipc_node_reset_links(struct tipc_node *n)
1062 pr_warn("Resetting all links to %x\n", n->addr);
1064 for (i = 0; i < MAX_BEARERS; i++) {
1065 tipc_node_link_down(n, i, false);
1069 /* tipc_node_fsm_evt - node finite state machine
1070 * Determines when contact is allowed with peer node
1072 static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
1074 int state = n->state;
1077 case SELF_DOWN_PEER_DOWN:
1079 case SELF_ESTABL_CONTACT_EVT:
1080 state = SELF_UP_PEER_COMING;
1082 case PEER_ESTABL_CONTACT_EVT:
1083 state = SELF_COMING_PEER_UP;
1085 case SELF_LOST_CONTACT_EVT:
1086 case PEER_LOST_CONTACT_EVT:
1088 case NODE_SYNCH_END_EVT:
1089 case NODE_SYNCH_BEGIN_EVT:
1090 case NODE_FAILOVER_BEGIN_EVT:
1091 case NODE_FAILOVER_END_EVT:
1096 case SELF_UP_PEER_UP:
1098 case SELF_LOST_CONTACT_EVT:
1099 state = SELF_DOWN_PEER_LEAVING;
1101 case PEER_LOST_CONTACT_EVT:
1102 state = SELF_LEAVING_PEER_DOWN;
1104 case NODE_SYNCH_BEGIN_EVT:
1105 state = NODE_SYNCHING;
1107 case NODE_FAILOVER_BEGIN_EVT:
1108 state = NODE_FAILINGOVER;
1110 case SELF_ESTABL_CONTACT_EVT:
1111 case PEER_ESTABL_CONTACT_EVT:
1112 case NODE_SYNCH_END_EVT:
1113 case NODE_FAILOVER_END_EVT:
1119 case SELF_DOWN_PEER_LEAVING:
1121 case PEER_LOST_CONTACT_EVT:
1122 state = SELF_DOWN_PEER_DOWN;
1124 case SELF_ESTABL_CONTACT_EVT:
1125 case PEER_ESTABL_CONTACT_EVT:
1126 case SELF_LOST_CONTACT_EVT:
1128 case NODE_SYNCH_END_EVT:
1129 case NODE_SYNCH_BEGIN_EVT:
1130 case NODE_FAILOVER_BEGIN_EVT:
1131 case NODE_FAILOVER_END_EVT:
1136 case SELF_UP_PEER_COMING:
1138 case PEER_ESTABL_CONTACT_EVT:
1139 state = SELF_UP_PEER_UP;
1141 case SELF_LOST_CONTACT_EVT:
1142 state = SELF_DOWN_PEER_DOWN;
1144 case SELF_ESTABL_CONTACT_EVT:
1145 case PEER_LOST_CONTACT_EVT:
1146 case NODE_SYNCH_END_EVT:
1147 case NODE_FAILOVER_BEGIN_EVT:
1149 case NODE_SYNCH_BEGIN_EVT:
1150 case NODE_FAILOVER_END_EVT:
1155 case SELF_COMING_PEER_UP:
1157 case SELF_ESTABL_CONTACT_EVT:
1158 state = SELF_UP_PEER_UP;
1160 case PEER_LOST_CONTACT_EVT:
1161 state = SELF_DOWN_PEER_DOWN;
1163 case SELF_LOST_CONTACT_EVT:
1164 case PEER_ESTABL_CONTACT_EVT:
1166 case NODE_SYNCH_END_EVT:
1167 case NODE_SYNCH_BEGIN_EVT:
1168 case NODE_FAILOVER_BEGIN_EVT:
1169 case NODE_FAILOVER_END_EVT:
1174 case SELF_LEAVING_PEER_DOWN:
1176 case SELF_LOST_CONTACT_EVT:
1177 state = SELF_DOWN_PEER_DOWN;
1179 case SELF_ESTABL_CONTACT_EVT:
1180 case PEER_ESTABL_CONTACT_EVT:
1181 case PEER_LOST_CONTACT_EVT:
1183 case NODE_SYNCH_END_EVT:
1184 case NODE_SYNCH_BEGIN_EVT:
1185 case NODE_FAILOVER_BEGIN_EVT:
1186 case NODE_FAILOVER_END_EVT:
1191 case NODE_FAILINGOVER:
1193 case SELF_LOST_CONTACT_EVT:
1194 state = SELF_DOWN_PEER_LEAVING;
1196 case PEER_LOST_CONTACT_EVT:
1197 state = SELF_LEAVING_PEER_DOWN;
1199 case NODE_FAILOVER_END_EVT:
1200 state = SELF_UP_PEER_UP;
1202 case NODE_FAILOVER_BEGIN_EVT:
1203 case SELF_ESTABL_CONTACT_EVT:
1204 case PEER_ESTABL_CONTACT_EVT:
1206 case NODE_SYNCH_BEGIN_EVT:
1207 case NODE_SYNCH_END_EVT:
1214 case SELF_LOST_CONTACT_EVT:
1215 state = SELF_DOWN_PEER_LEAVING;
1217 case PEER_LOST_CONTACT_EVT:
1218 state = SELF_LEAVING_PEER_DOWN;
1220 case NODE_SYNCH_END_EVT:
1221 state = SELF_UP_PEER_UP;
1223 case NODE_FAILOVER_BEGIN_EVT:
1224 state = NODE_FAILINGOVER;
1226 case NODE_SYNCH_BEGIN_EVT:
1227 case SELF_ESTABL_CONTACT_EVT:
1228 case PEER_ESTABL_CONTACT_EVT:
1230 case NODE_FAILOVER_END_EVT:
1236 pr_err("Unknown node fsm state %x\n", state);
1243 pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
1246 static void node_lost_contact(struct tipc_node *n,
1247 struct sk_buff_head *inputq)
1249 struct tipc_sock_conn *conn, *safe;
1250 struct tipc_link *l;
1251 struct list_head *conns = &n->conn_sks;
1252 struct sk_buff *skb;
1255 pr_debug("Lost contact with %x\n", n->addr);
1256 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
1258 /* Clean up broadcast state */
1259 tipc_bcast_remove_peer(n->net, n->bc_entry.link);
1261 /* Abort any ongoing link failover */
1262 for (i = 0; i < MAX_BEARERS; i++) {
1263 l = n->links[i].link;
1265 tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
1268 /* Notify publications from this node */
1269 n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
1271 /* Notify sockets connected to node */
1272 list_for_each_entry_safe(conn, safe, conns, list) {
1273 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
1274 SHORT_H_SIZE, 0, tipc_own_addr(n->net),
1275 conn->peer_node, conn->port,
1276 conn->peer_port, TIPC_ERR_NO_NODE);
1278 skb_queue_tail(inputq, skb);
1279 list_del(&conn->list);
1285 * tipc_node_get_linkname - get the name of a link
1287 * @bearer_id: id of the bearer
1288 * @node: peer node address
1289 * @linkname: link name output buffer
1291 * Returns 0 on success
1293 int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
1294 char *linkname, size_t len)
1296 struct tipc_link *link;
1298 struct tipc_node *node = tipc_node_find(net, addr);
1303 if (bearer_id >= MAX_BEARERS)
1306 tipc_node_read_lock(node);
1307 link = node->links[bearer_id].link;
1309 strncpy(linkname, tipc_link_name(link), len);
1312 tipc_node_read_unlock(node);
1314 tipc_node_put(node);
1318 /* Caller should hold node lock for the passed node */
1319 static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
1322 struct nlattr *attrs;
1324 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1325 NLM_F_MULTI, TIPC_NL_NODE_GET);
1329 attrs = nla_nest_start(msg->skb, TIPC_NLA_NODE);
1333 if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
1335 if (node_is_up(node))
1336 if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
1339 nla_nest_end(msg->skb, attrs);
1340 genlmsg_end(msg->skb, hdr);
1345 nla_nest_cancel(msg->skb, attrs);
1347 genlmsg_cancel(msg->skb, hdr);
1353 * tipc_node_xmit() is the general link level function for message sending
1354 * @net: the applicable net namespace
1355 * @list: chain of buffers containing message
1356 * @dnode: address of destination node
1357 * @selector: a number used for deterministic link selection
1358 * Consumes the buffer chain.
1359 * Returns 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF
1361 int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
1362 u32 dnode, int selector)
1364 struct tipc_link_entry *le = NULL;
1365 struct tipc_node *n;
1366 struct sk_buff_head xmitq;
1370 if (in_own_node(net, dnode)) {
1371 spin_lock_init(&list->lock);
1372 tipc_sk_rcv(net, list);
1376 n = tipc_node_find(net, dnode);
1378 __skb_queue_purge(list);
1379 return -EHOSTUNREACH;
1382 tipc_node_read_lock(n);
1383 bearer_id = n->active_links[selector & 1];
1384 if (unlikely(bearer_id == INVALID_BEARER_ID)) {
1385 tipc_node_read_unlock(n);
1387 __skb_queue_purge(list);
1388 return -EHOSTUNREACH;
1391 __skb_queue_head_init(&xmitq);
1392 le = &n->links[bearer_id];
1393 spin_lock_bh(&le->lock);
1394 rc = tipc_link_xmit(le->link, list, &xmitq);
1395 spin_unlock_bh(&le->lock);
1396 tipc_node_read_unlock(n);
1398 if (unlikely(rc == -ENOBUFS))
1399 tipc_node_link_down(n, bearer_id, false);
1401 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
1408 /* tipc_node_xmit_skb(): send single buffer to destination
1409 * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
1410 * messages, which will not be rejected
1411 * The only exception is datagram messages rerouted after secondary
1412 * lookup, which are rare and safe to dispose of anyway.
1414 int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
1417 struct sk_buff_head head;
1419 __skb_queue_head_init(&head);
1420 __skb_queue_tail(&head, skb);
1421 tipc_node_xmit(net, &head, dnode, selector);
1425 /* tipc_node_distr_xmit(): send single buffer msgs to individual destinations
1426 * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected
1428 int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq)
1430 struct sk_buff *skb;
1431 u32 selector, dnode;
1433 while ((skb = __skb_dequeue(xmitq))) {
1434 selector = msg_origport(buf_msg(skb));
1435 dnode = msg_destnode(buf_msg(skb));
1436 tipc_node_xmit_skb(net, skb, dnode, selector);
1441 void tipc_node_broadcast(struct net *net, struct sk_buff *skb)
1443 struct sk_buff *txskb;
1444 struct tipc_node *n;
1448 list_for_each_entry_rcu(n, tipc_nodes(net), list) {
1450 if (in_own_node(net, dst))
1454 txskb = pskb_copy(skb, GFP_ATOMIC);
1457 msg_set_destnode(buf_msg(txskb), dst);
1458 tipc_node_xmit_skb(net, txskb, dst, 0);
1465 static void tipc_node_mcast_rcv(struct tipc_node *n)
1467 struct tipc_bclink_entry *be = &n->bc_entry;
1469 /* 'arrvq' is under inputq2's lock protection */
1470 spin_lock_bh(&be->inputq2.lock);
1471 spin_lock_bh(&be->inputq1.lock);
1472 skb_queue_splice_tail_init(&be->inputq1, &be->arrvq);
1473 spin_unlock_bh(&be->inputq1.lock);
1474 spin_unlock_bh(&be->inputq2.lock);
1475 tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2);
1478 static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr,
1479 int bearer_id, struct sk_buff_head *xmitq)
1481 struct tipc_link *ucl;
1484 rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr);
1486 if (rc & TIPC_LINK_DOWN_EVT) {
1487 tipc_node_reset_links(n);
1491 if (!(rc & TIPC_LINK_SND_STATE))
1494 /* If probe message, a STATE response will be sent anyway */
1498 /* Produce a STATE message carrying broadcast NACK */
1499 tipc_node_read_lock(n);
1500 ucl = n->links[bearer_id].link;
1502 tipc_link_build_state_msg(ucl, xmitq);
1503 tipc_node_read_unlock(n);
1507 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node
1508 * @net: the applicable net namespace
1510 * @bearer_id: id of bearer message arrived on
1512 * Invoked with no locks held.
1514 static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id)
1517 struct sk_buff_head xmitq;
1518 struct tipc_bclink_entry *be;
1519 struct tipc_link_entry *le;
1520 struct tipc_msg *hdr = buf_msg(skb);
1521 int usr = msg_user(hdr);
1522 u32 dnode = msg_destnode(hdr);
1523 struct tipc_node *n;
1525 __skb_queue_head_init(&xmitq);
1527 /* If NACK for other node, let rcv link for that node peek into it */
1528 if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net)))
1529 n = tipc_node_find(net, dnode);
1531 n = tipc_node_find(net, msg_prevnode(hdr));
1537 le = &n->links[bearer_id];
1539 rc = tipc_bcast_rcv(net, be->link, skb);
1541 /* Broadcast ACKs are sent on a unicast link */
1542 if (rc & TIPC_LINK_SND_STATE) {
1543 tipc_node_read_lock(n);
1544 tipc_link_build_state_msg(le->link, &xmitq);
1545 tipc_node_read_unlock(n);
1548 if (!skb_queue_empty(&xmitq))
1549 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
1551 if (!skb_queue_empty(&be->inputq1))
1552 tipc_node_mcast_rcv(n);
1554 /* If reassembly or retransmission failure => reset all links to peer */
1555 if (rc & TIPC_LINK_DOWN_EVT)
1556 tipc_node_reset_links(n);
1562 * tipc_node_check_state - check and if necessary update node state
1564 * @bearer_id: identity of bearer delivering the packet
1565 * Returns true if state and msg are ok, otherwise false
1567 static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1568 int bearer_id, struct sk_buff_head *xmitq)
1570 struct tipc_msg *hdr = buf_msg(skb);
1571 int usr = msg_user(hdr);
1572 int mtyp = msg_type(hdr);
1573 u16 oseqno = msg_seqno(hdr);
1574 u16 iseqno = msg_seqno(msg_get_wrapped(hdr));
1575 u16 exp_pkts = msg_msgcnt(hdr);
1576 u16 rcv_nxt, syncpt, dlv_nxt, inputq_len;
1577 int state = n->state;
1578 struct tipc_link *l, *tnl, *pl = NULL;
1579 struct tipc_media_addr *maddr;
1582 l = n->links[bearer_id].link;
1585 rcv_nxt = tipc_link_rcv_nxt(l);
1588 if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
1591 /* Find parallel link, if any */
1592 for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) {
1593 if ((pb_id != bearer_id) && n->links[pb_id].link) {
1594 pl = n->links[pb_id].link;
1599 if (!tipc_link_validate_msg(l, hdr))
1602 /* Check and update node accesibility if applicable */
1603 if (state == SELF_UP_PEER_COMING) {
1604 if (!tipc_link_is_up(l))
1606 if (!msg_peer_link_is_up(hdr))
1608 tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
1611 if (state == SELF_DOWN_PEER_LEAVING) {
1612 if (msg_peer_node_is_up(hdr))
1614 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
1618 if (state == SELF_LEAVING_PEER_DOWN)
1621 /* Ignore duplicate packets */
1622 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
1625 /* Initiate or update failover mode if applicable */
1626 if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
1627 syncpt = oseqno + exp_pkts - 1;
1628 if (pl && tipc_link_is_up(pl)) {
1629 __tipc_node_link_down(n, &pb_id, xmitq, &maddr);
1630 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
1631 tipc_link_inputq(l));
1633 /* If parallel link was already down, and this happened before
1634 * the tunnel link came up, FAILOVER was never sent. Ensure that
1635 * FAILOVER is sent to get peer out of NODE_FAILINGOVER state.
1637 if (n->state != NODE_FAILINGOVER && !n->failover_sent) {
1638 tipc_link_create_dummy_tnl_msg(l, xmitq);
1639 n->failover_sent = true;
1641 /* If pkts arrive out of order, use lowest calculated syncpt */
1642 if (less(syncpt, n->sync_point))
1643 n->sync_point = syncpt;
1646 /* Open parallel link when tunnel link reaches synch point */
1647 if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) {
1648 if (!more(rcv_nxt, n->sync_point))
1650 tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
1652 tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT);
1656 /* No synching needed if only one link */
1657 if (!pl || !tipc_link_is_up(pl))
1660 /* Initiate synch mode if applicable */
1661 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
1662 syncpt = iseqno + exp_pkts - 1;
1663 if (!tipc_link_is_up(l))
1664 __tipc_node_link_up(n, bearer_id, xmitq);
1665 if (n->state == SELF_UP_PEER_UP) {
1666 n->sync_point = syncpt;
1667 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
1668 tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
1672 /* Open tunnel link when parallel link reaches synch point */
1673 if (n->state == NODE_SYNCHING) {
1674 if (tipc_link_is_synching(l)) {
1680 inputq_len = skb_queue_len(tipc_link_inputq(pl));
1681 dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len;
1682 if (more(dlv_nxt, n->sync_point)) {
1683 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
1684 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
1689 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
1691 if (usr == LINK_PROTOCOL)
1699 * tipc_rcv - process TIPC packets/messages arriving from off-node
1700 * @net: the applicable net namespace
1702 * @bearer: pointer to bearer message arrived on
1704 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1705 * structure (i.e. cannot be NULL), but bearer can be inactive.
1707 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
1709 struct sk_buff_head xmitq;
1710 struct tipc_node *n;
1711 struct tipc_msg *hdr;
1712 int bearer_id = b->identity;
1713 struct tipc_link_entry *le;
1714 u32 self = tipc_own_addr(net);
1718 __skb_queue_head_init(&xmitq);
1720 /* Ensure message is well-formed before touching the header */
1721 if (unlikely(!tipc_msg_validate(&skb)))
1724 usr = msg_user(hdr);
1725 bc_ack = msg_bcast_ack(hdr);
1727 /* Handle arrival of discovery or broadcast packet */
1728 if (unlikely(msg_non_seq(hdr))) {
1729 if (unlikely(usr == LINK_CONFIG))
1730 return tipc_disc_rcv(net, skb, b);
1732 return tipc_node_bc_rcv(net, skb, bearer_id);
1735 /* Discard unicast link messages destined for another node */
1736 if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self)))
1739 /* Locate neighboring node that sent packet */
1740 n = tipc_node_find(net, msg_prevnode(hdr));
1743 le = &n->links[bearer_id];
1745 /* Ensure broadcast reception is in synch with peer's send state */
1746 if (unlikely(usr == LINK_PROTOCOL))
1747 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
1748 else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack))
1749 tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr);
1751 /* Receive packet directly if conditions permit */
1752 tipc_node_read_lock(n);
1753 if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
1754 spin_lock_bh(&le->lock);
1756 rc = tipc_link_rcv(le->link, skb, &xmitq);
1759 spin_unlock_bh(&le->lock);
1761 tipc_node_read_unlock(n);
1763 /* Check/update node state before receiving */
1764 if (unlikely(skb)) {
1765 if (unlikely(skb_linearize(skb)))
1767 tipc_node_write_lock(n);
1768 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
1770 rc = tipc_link_rcv(le->link, skb, &xmitq);
1774 tipc_node_write_unlock(n);
1777 if (unlikely(rc & TIPC_LINK_UP_EVT))
1778 tipc_node_link_up(n, bearer_id, &xmitq);
1780 if (unlikely(rc & TIPC_LINK_DOWN_EVT))
1781 tipc_node_link_down(n, bearer_id, false);
1783 if (unlikely(!skb_queue_empty(&n->bc_entry.namedq)))
1784 tipc_named_rcv(net, &n->bc_entry.namedq);
1786 if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1)))
1787 tipc_node_mcast_rcv(n);
1789 if (!skb_queue_empty(&le->inputq))
1790 tipc_sk_rcv(net, &le->inputq);
1792 if (!skb_queue_empty(&xmitq))
1793 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
1800 void tipc_node_apply_property(struct net *net, struct tipc_bearer *b,
1803 struct tipc_net *tn = tipc_net(net);
1804 int bearer_id = b->identity;
1805 struct sk_buff_head xmitq;
1806 struct tipc_link_entry *e;
1807 struct tipc_node *n;
1809 __skb_queue_head_init(&xmitq);
1813 list_for_each_entry_rcu(n, &tn->node_list, list) {
1814 tipc_node_write_lock(n);
1815 e = &n->links[bearer_id];
1817 if (prop == TIPC_NLA_PROP_TOL)
1818 tipc_link_set_tolerance(e->link, b->tolerance,
1820 else if (prop == TIPC_NLA_PROP_MTU)
1821 tipc_link_set_mtu(e->link, b->mtu);
1823 tipc_node_write_unlock(n);
1824 tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr);
1830 int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
1832 struct net *net = sock_net(skb->sk);
1833 struct tipc_net *tn = net_generic(net, tipc_net_id);
1834 struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
1835 struct tipc_node *peer;
1839 /* We identify the peer by its net */
1840 if (!info->attrs[TIPC_NLA_NET])
1843 err = nla_parse_nested(attrs, TIPC_NLA_NET_MAX,
1844 info->attrs[TIPC_NLA_NET], tipc_nl_net_policy,
1849 if (!attrs[TIPC_NLA_NET_ADDR])
1852 addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
1854 if (in_own_node(net, addr))
1857 spin_lock_bh(&tn->node_list_lock);
1858 peer = tipc_node_find(net, addr);
1860 spin_unlock_bh(&tn->node_list_lock);
1864 tipc_node_write_lock(peer);
1865 if (peer->state != SELF_DOWN_PEER_DOWN &&
1866 peer->state != SELF_DOWN_PEER_LEAVING) {
1867 tipc_node_write_unlock(peer);
1872 tipc_node_clear_links(peer);
1873 tipc_node_write_unlock(peer);
1874 tipc_node_delete(peer);
1878 tipc_node_put(peer);
1879 spin_unlock_bh(&tn->node_list_lock);
1884 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
1887 struct net *net = sock_net(skb->sk);
1888 struct tipc_net *tn = net_generic(net, tipc_net_id);
1889 int done = cb->args[0];
1890 int last_addr = cb->args[1];
1891 struct tipc_node *node;
1892 struct tipc_nl_msg msg;
1898 msg.portid = NETLINK_CB(cb->skb).portid;
1899 msg.seq = cb->nlh->nlmsg_seq;
1903 node = tipc_node_find(net, last_addr);
1906 /* We never set seq or call nl_dump_check_consistent()
1907 * this means that setting prev_seq here will cause the
1908 * consistence check to fail in the netlink callback
1909 * handler. Resulting in the NLMSG_DONE message having
1910 * the NLM_F_DUMP_INTR flag set if the node state
1911 * changed while we released the lock.
1916 tipc_node_put(node);
1919 list_for_each_entry_rcu(node, &tn->node_list, list) {
1921 if (node->addr == last_addr)
1927 tipc_node_read_lock(node);
1928 err = __tipc_nl_add_node(&msg, node);
1930 last_addr = node->addr;
1931 tipc_node_read_unlock(node);
1935 tipc_node_read_unlock(node);
1940 cb->args[1] = last_addr;
1946 /* tipc_node_find_by_name - locate owner node of link by link's name
1947 * @net: the applicable net namespace
1948 * @name: pointer to link name string
1949 * @bearer_id: pointer to index in 'node->links' array where the link was found.
1951 * Returns pointer to node owning the link, or 0 if no matching link is found.
1953 static struct tipc_node *tipc_node_find_by_name(struct net *net,
1954 const char *link_name,
1955 unsigned int *bearer_id)
1957 struct tipc_net *tn = net_generic(net, tipc_net_id);
1958 struct tipc_link *l;
1959 struct tipc_node *n;
1960 struct tipc_node *found_node = NULL;
1965 list_for_each_entry_rcu(n, &tn->node_list, list) {
1966 tipc_node_read_lock(n);
1967 for (i = 0; i < MAX_BEARERS; i++) {
1968 l = n->links[i].link;
1969 if (l && !strcmp(tipc_link_name(l), link_name)) {
1975 tipc_node_read_unlock(n);
1984 int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
1990 struct tipc_link *link;
1991 struct tipc_node *node;
1992 struct sk_buff_head xmitq;
1993 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
1994 struct net *net = sock_net(skb->sk);
1996 __skb_queue_head_init(&xmitq);
1998 if (!info->attrs[TIPC_NLA_LINK])
2001 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
2002 info->attrs[TIPC_NLA_LINK],
2003 tipc_nl_link_policy, info->extack);
2007 if (!attrs[TIPC_NLA_LINK_NAME])
2010 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2012 if (strcmp(name, tipc_bclink_name) == 0)
2013 return tipc_nl_bc_link_set(net, attrs);
2015 node = tipc_node_find_by_name(net, name, &bearer_id);
2019 tipc_node_read_lock(node);
2021 link = node->links[bearer_id].link;
2027 if (attrs[TIPC_NLA_LINK_PROP]) {
2028 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
2030 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
2037 if (props[TIPC_NLA_PROP_TOL]) {
2040 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2041 tipc_link_set_tolerance(link, tol, &xmitq);
2043 if (props[TIPC_NLA_PROP_PRIO]) {
2046 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2047 tipc_link_set_prio(link, prio, &xmitq);
2049 if (props[TIPC_NLA_PROP_WIN]) {
2052 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2053 tipc_link_set_queue_limits(link, win);
2058 tipc_node_read_unlock(node);
2059 tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr);
2063 int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
2065 struct net *net = genl_info_net(info);
2066 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2067 struct tipc_nl_msg msg;
2071 msg.portid = info->snd_portid;
2072 msg.seq = info->snd_seq;
2074 if (!info->attrs[TIPC_NLA_LINK])
2077 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
2078 info->attrs[TIPC_NLA_LINK],
2079 tipc_nl_link_policy, info->extack);
2083 if (!attrs[TIPC_NLA_LINK_NAME])
2086 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2088 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2092 if (strcmp(name, tipc_bclink_name) == 0) {
2093 err = tipc_nl_add_bc_link(net, &msg);
2098 struct tipc_node *node;
2099 struct tipc_link *link;
2101 node = tipc_node_find_by_name(net, name, &bearer_id);
2107 tipc_node_read_lock(node);
2108 link = node->links[bearer_id].link;
2110 tipc_node_read_unlock(node);
2115 err = __tipc_nl_add_link(net, &msg, link, 0);
2116 tipc_node_read_unlock(node);
2121 return genlmsg_reply(msg.skb, info);
2124 nlmsg_free(msg.skb);
2128 int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
2132 unsigned int bearer_id;
2133 struct tipc_link *link;
2134 struct tipc_node *node;
2135 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2136 struct net *net = sock_net(skb->sk);
2137 struct tipc_link_entry *le;
2139 if (!info->attrs[TIPC_NLA_LINK])
2142 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
2143 info->attrs[TIPC_NLA_LINK],
2144 tipc_nl_link_policy, info->extack);
2148 if (!attrs[TIPC_NLA_LINK_NAME])
2151 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2153 if (strcmp(link_name, tipc_bclink_name) == 0) {
2154 err = tipc_bclink_reset_stats(net);
2160 node = tipc_node_find_by_name(net, link_name, &bearer_id);
2164 le = &node->links[bearer_id];
2165 tipc_node_read_lock(node);
2166 spin_lock_bh(&le->lock);
2167 link = node->links[bearer_id].link;
2169 spin_unlock_bh(&le->lock);
2170 tipc_node_read_unlock(node);
2173 tipc_link_reset_stats(link);
2174 spin_unlock_bh(&le->lock);
2175 tipc_node_read_unlock(node);
2179 /* Caller should hold node lock */
2180 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
2181 struct tipc_node *node, u32 *prev_link)
2186 for (i = *prev_link; i < MAX_BEARERS; i++) {
2189 if (!node->links[i].link)
2192 err = __tipc_nl_add_link(net, msg,
2193 node->links[i].link, NLM_F_MULTI);
2202 int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
2204 struct net *net = sock_net(skb->sk);
2205 struct tipc_net *tn = net_generic(net, tipc_net_id);
2206 struct tipc_node *node;
2207 struct tipc_nl_msg msg;
2208 u32 prev_node = cb->args[0];
2209 u32 prev_link = cb->args[1];
2210 int done = cb->args[2];
2217 msg.portid = NETLINK_CB(cb->skb).portid;
2218 msg.seq = cb->nlh->nlmsg_seq;
2222 node = tipc_node_find(net, prev_node);
2224 /* We never set seq or call nl_dump_check_consistent()
2225 * this means that setting prev_seq here will cause the
2226 * consistence check to fail in the netlink callback
2227 * handler. Resulting in the last NLMSG_DONE message
2228 * having the NLM_F_DUMP_INTR flag set.
2233 tipc_node_put(node);
2235 list_for_each_entry_continue_rcu(node, &tn->node_list,
2237 tipc_node_read_lock(node);
2238 err = __tipc_nl_add_node_links(net, &msg, node,
2240 tipc_node_read_unlock(node);
2244 prev_node = node->addr;
2247 err = tipc_nl_add_bc_link(net, &msg);
2251 list_for_each_entry_rcu(node, &tn->node_list, list) {
2252 tipc_node_read_lock(node);
2253 err = __tipc_nl_add_node_links(net, &msg, node,
2255 tipc_node_read_unlock(node);
2259 prev_node = node->addr;
2266 cb->args[0] = prev_node;
2267 cb->args[1] = prev_link;
2273 int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info)
2275 struct nlattr *attrs[TIPC_NLA_MON_MAX + 1];
2276 struct net *net = sock_net(skb->sk);
2279 if (!info->attrs[TIPC_NLA_MON])
2282 err = nla_parse_nested(attrs, TIPC_NLA_MON_MAX,
2283 info->attrs[TIPC_NLA_MON],
2284 tipc_nl_monitor_policy, info->extack);
2288 if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) {
2291 val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]);
2292 err = tipc_nl_monitor_set_threshold(net, val);
2300 static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg)
2302 struct nlattr *attrs;
2306 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2307 0, TIPC_NL_MON_GET);
2311 attrs = nla_nest_start(msg->skb, TIPC_NLA_MON);
2315 val = tipc_nl_monitor_get_threshold(net);
2317 if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val))
2320 nla_nest_end(msg->skb, attrs);
2321 genlmsg_end(msg->skb, hdr);
2326 nla_nest_cancel(msg->skb, attrs);
2328 genlmsg_cancel(msg->skb, hdr);
2333 int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info)
2335 struct net *net = sock_net(skb->sk);
2336 struct tipc_nl_msg msg;
2339 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2342 msg.portid = info->snd_portid;
2343 msg.seq = info->snd_seq;
2345 err = __tipc_nl_add_monitor_prop(net, &msg);
2347 nlmsg_free(msg.skb);
2351 return genlmsg_reply(msg.skb, info);
2354 int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
2356 struct net *net = sock_net(skb->sk);
2357 u32 prev_bearer = cb->args[0];
2358 struct tipc_nl_msg msg;
2362 if (prev_bearer == MAX_BEARERS)
2366 msg.portid = NETLINK_CB(cb->skb).portid;
2367 msg.seq = cb->nlh->nlmsg_seq;
2370 for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
2371 err = __tipc_nl_add_monitor(net, &msg, bearer_id);
2376 cb->args[0] = bearer_id;
2381 int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
2382 struct netlink_callback *cb)
2384 struct net *net = sock_net(skb->sk);
2385 u32 prev_node = cb->args[1];
2386 u32 bearer_id = cb->args[2];
2387 int done = cb->args[0];
2388 struct tipc_nl_msg msg;
2392 struct nlattr **attrs;
2393 struct nlattr *mon[TIPC_NLA_MON_MAX + 1];
2395 err = tipc_nlmsg_parse(cb->nlh, &attrs);
2399 if (!attrs[TIPC_NLA_MON])
2402 err = nla_parse_nested(mon, TIPC_NLA_MON_MAX,
2403 attrs[TIPC_NLA_MON],
2404 tipc_nl_monitor_policy, NULL);
2408 if (!mon[TIPC_NLA_MON_REF])
2411 bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]);
2413 if (bearer_id >= MAX_BEARERS)
2421 msg.portid = NETLINK_CB(cb->skb).portid;
2422 msg.seq = cb->nlh->nlmsg_seq;
2425 err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node);
2431 cb->args[1] = prev_node;
2432 cb->args[2] = bearer_id;