2 * net/tipc/node.c: TIPC node management routines
4 * Copyright (c) 2000-2006, 2012-2016, Ericsson AB
5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
40 #include "name_distr.h"
48 #define INVALID_NODE_SIG 0x10000
49 #define NODE_CLEANUP_AFTER 300000
51 /* Flags used to take different actions according to flag type
52 * TIPC_NOTIFY_NODE_DOWN: notify node is down
53 * TIPC_NOTIFY_NODE_UP: notify node is up
54 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
57 TIPC_NOTIFY_NODE_DOWN = (1 << 3),
58 TIPC_NOTIFY_NODE_UP = (1 << 4),
59 TIPC_NOTIFY_LINK_UP = (1 << 6),
60 TIPC_NOTIFY_LINK_DOWN = (1 << 7)
63 struct tipc_link_entry {
64 struct tipc_link *link;
65 spinlock_t lock; /* per link */
67 struct sk_buff_head inputq;
68 struct tipc_media_addr maddr;
71 struct tipc_bclink_entry {
72 struct tipc_link *link;
73 struct sk_buff_head inputq1;
74 struct sk_buff_head arrvq;
75 struct sk_buff_head inputq2;
76 struct sk_buff_head namedq;
80 * struct tipc_node - TIPC node structure
81 * @addr: network address of node
82 * @ref: reference counter to node object
83 * @lock: rwlock governing access to structure
84 * @net: the applicable net namespace
85 * @hash: links to adjacent nodes in unsorted hash chain
86 * @inputq: pointer to input queue containing messages for msg event
87 * @namedq: pointer to name table input queue with name table messages
88 * @active_links: bearer ids of active links, used as index into links[] array
89 * @links: array containing references to all links to node
90 * @action_flags: bit mask of different types of node actions
91 * @state: connectivity state vs peer node
92 * @sync_point: sequence number where synch/failover is finished
93 * @list: links to adjacent nodes in sorted list of cluster's nodes
94 * @working_links: number of working links to node (both active and standby)
95 * @link_cnt: number of links to node
96 * @capabilities: bitmap, indicating peer node's functional capabilities
97 * @signature: node instance identifier
98 * @link_id: local and remote bearer ids of changing link, if any
99 * @publ_list: list of publications
100 * @rcu: rcu struct for tipc_node
101 * @delete_at: indicates the time for deleting a down node
108 struct hlist_node hash;
110 struct tipc_link_entry links[MAX_BEARERS];
111 struct tipc_bclink_entry bc_entry;
113 struct list_head list;
123 struct list_head publ_list;
124 struct list_head conn_sks;
125 unsigned long keepalive_intv;
126 struct timer_list timer;
128 unsigned long delete_at;
129 struct net *peer_net;
133 /* Node FSM states and events:
136 SELF_DOWN_PEER_DOWN = 0xdd,
137 SELF_UP_PEER_UP = 0xaa,
138 SELF_DOWN_PEER_LEAVING = 0xd1,
139 SELF_UP_PEER_COMING = 0xac,
140 SELF_COMING_PEER_UP = 0xca,
141 SELF_LEAVING_PEER_DOWN = 0x1d,
142 NODE_FAILINGOVER = 0xf0,
147 SELF_ESTABL_CONTACT_EVT = 0xece,
148 SELF_LOST_CONTACT_EVT = 0x1ce,
149 PEER_ESTABL_CONTACT_EVT = 0x9ece,
150 PEER_LOST_CONTACT_EVT = 0x91ce,
151 NODE_FAILOVER_BEGIN_EVT = 0xfbe,
152 NODE_FAILOVER_END_EVT = 0xfee,
153 NODE_SYNCH_BEGIN_EVT = 0xcbe,
154 NODE_SYNCH_END_EVT = 0xcee
157 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
158 struct sk_buff_head *xmitq,
159 struct tipc_media_addr **maddr);
160 static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
162 static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
163 static void tipc_node_delete(struct tipc_node *node);
164 static void tipc_node_timeout(struct timer_list *t);
165 static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
166 static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
167 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id);
168 static void tipc_node_put(struct tipc_node *node);
169 static bool node_is_up(struct tipc_node *n);
170 static void tipc_node_delete_from_list(struct tipc_node *node);
172 struct tipc_sock_conn {
176 struct list_head list;
179 static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
181 int bearer_id = n->active_links[sel & 1];
183 if (unlikely(bearer_id == INVALID_BEARER_ID))
186 return n->links[bearer_id].link;
189 int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected)
193 unsigned int mtu = MAX_MSG_SIZE;
195 n = tipc_node_find(net, addr);
199 /* Allow MAX_MSG_SIZE when building connection oriented message
200 * if they are in the same core network
202 if (n->peer_net && connected) {
207 bearer_id = n->active_links[sel & 1];
208 if (likely(bearer_id != INVALID_BEARER_ID))
209 mtu = n->links[bearer_id].mtu;
214 bool tipc_node_get_id(struct net *net, u32 addr, u8 *id)
216 u8 *own_id = tipc_own_id(net);
222 if (addr == tipc_own_addr(net)) {
223 memcpy(id, own_id, TIPC_NODEID_LEN);
226 n = tipc_node_find(net, addr);
230 memcpy(id, &n->peer_id, TIPC_NODEID_LEN);
235 u16 tipc_node_get_capabilities(struct net *net, u32 addr)
240 n = tipc_node_find(net, addr);
242 return TIPC_NODE_CAPABILITIES;
243 caps = n->capabilities;
248 static void tipc_node_kref_release(struct kref *kref)
250 struct tipc_node *n = container_of(kref, struct tipc_node, kref);
252 kfree(n->bc_entry.link);
256 static void tipc_node_put(struct tipc_node *node)
258 kref_put(&node->kref, tipc_node_kref_release);
261 static void tipc_node_get(struct tipc_node *node)
263 kref_get(&node->kref);
267 * tipc_node_find - locate specified node object, if it exists
269 static struct tipc_node *tipc_node_find(struct net *net, u32 addr)
271 struct tipc_net *tn = tipc_net(net);
272 struct tipc_node *node;
273 unsigned int thash = tipc_hashfn(addr);
276 hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) {
277 if (node->addr != addr)
279 if (!kref_get_unless_zero(&node->kref))
287 /* tipc_node_find_by_id - locate specified node object by its 128-bit id
288 * Note: this function is called only when a discovery request failed
289 * to find the node by its 32-bit id, and is not time critical
291 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id)
293 struct tipc_net *tn = tipc_net(net);
298 list_for_each_entry_rcu(n, &tn->node_list, list) {
299 read_lock_bh(&n->lock);
300 if (!memcmp(id, n->peer_id, 16) &&
301 kref_get_unless_zero(&n->kref))
303 read_unlock_bh(&n->lock);
308 return found ? n : NULL;
311 static void tipc_node_read_lock(struct tipc_node *n)
313 read_lock_bh(&n->lock);
316 static void tipc_node_read_unlock(struct tipc_node *n)
318 read_unlock_bh(&n->lock);
321 static void tipc_node_write_lock(struct tipc_node *n)
323 write_lock_bh(&n->lock);
326 static void tipc_node_write_unlock_fast(struct tipc_node *n)
328 write_unlock_bh(&n->lock);
331 static void tipc_node_write_unlock(struct tipc_node *n)
333 struct net *net = n->net;
335 u32 flags = n->action_flags;
338 struct list_head *publ_list;
340 if (likely(!flags)) {
341 write_unlock_bh(&n->lock);
346 link_id = n->link_id;
347 bearer_id = link_id & 0xffff;
348 publ_list = &n->publ_list;
350 n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
351 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
353 write_unlock_bh(&n->lock);
355 if (flags & TIPC_NOTIFY_NODE_DOWN)
356 tipc_publ_notify(net, publ_list, addr);
358 if (flags & TIPC_NOTIFY_NODE_UP)
359 tipc_named_node_up(net, addr);
361 if (flags & TIPC_NOTIFY_LINK_UP) {
362 tipc_mon_peer_up(net, addr, bearer_id);
363 tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
364 TIPC_NODE_SCOPE, link_id, link_id);
366 if (flags & TIPC_NOTIFY_LINK_DOWN) {
367 tipc_mon_peer_down(net, addr, bearer_id);
368 tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
373 static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes)
375 int net_id = tipc_netid(n->net);
376 struct tipc_net *tn_peer;
383 for_each_net_rcu(tmp) {
384 tn_peer = tipc_net(tmp);
387 /* Integrity checking whether node exists in namespace or not */
388 if (tn_peer->net_id != net_id)
390 if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN))
392 hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random);
393 if (hash_mixes ^ hash_chk)
396 n->peer_hash_mix = hash_mixes;
401 static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
402 u8 *peer_id, u16 capabilities,
403 u32 signature, u32 hash_mixes)
405 struct tipc_net *tn = net_generic(net, tipc_net_id);
406 struct tipc_node *n, *temp_node;
411 spin_lock_bh(&tn->node_list_lock);
412 n = tipc_node_find(net, addr);
414 if (n->peer_hash_mix ^ hash_mixes)
415 tipc_node_assign_peer_net(n, hash_mixes);
416 if (n->capabilities == capabilities)
418 /* Same node may come back with new capabilities */
419 tipc_node_write_lock(n);
420 n->capabilities = capabilities;
421 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
422 l = n->links[bearer_id].link;
424 tipc_link_update_caps(l, capabilities);
426 tipc_node_write_unlock_fast(n);
428 /* Calculate cluster capabilities */
429 tn->capabilities = TIPC_NODE_CAPABILITIES;
430 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
431 tn->capabilities &= temp_node->capabilities;
436 n = kzalloc(sizeof(*n), GFP_ATOMIC);
438 pr_warn("Node creation failed, no memory\n");
442 memcpy(&n->peer_id, peer_id, 16);
445 n->peer_hash_mix = 0;
446 /* Assign kernel local namespace if exists */
447 tipc_node_assign_peer_net(n, hash_mixes);
448 n->capabilities = capabilities;
450 rwlock_init(&n->lock);
451 INIT_HLIST_NODE(&n->hash);
452 INIT_LIST_HEAD(&n->list);
453 INIT_LIST_HEAD(&n->publ_list);
454 INIT_LIST_HEAD(&n->conn_sks);
455 skb_queue_head_init(&n->bc_entry.namedq);
456 skb_queue_head_init(&n->bc_entry.inputq1);
457 __skb_queue_head_init(&n->bc_entry.arrvq);
458 skb_queue_head_init(&n->bc_entry.inputq2);
459 for (i = 0; i < MAX_BEARERS; i++)
460 spin_lock_init(&n->links[i].lock);
461 n->state = SELF_DOWN_PEER_LEAVING;
462 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
463 n->signature = INVALID_NODE_SIG;
464 n->active_links[0] = INVALID_BEARER_ID;
465 n->active_links[1] = INVALID_BEARER_ID;
466 if (!tipc_link_bc_create(net, tipc_own_addr(net),
468 tipc_link_window(tipc_bc_sndlink(net)),
470 &n->bc_entry.inputq1,
472 tipc_bc_sndlink(net),
473 &n->bc_entry.link)) {
474 pr_warn("Broadcast rcv link creation failed, no memory\n");
480 timer_setup(&n->timer, tipc_node_timeout, 0);
481 n->keepalive_intv = U32_MAX;
482 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
483 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
484 if (n->addr < temp_node->addr)
487 list_add_tail_rcu(&n->list, &temp_node->list);
488 /* Calculate cluster capabilities */
489 tn->capabilities = TIPC_NODE_CAPABILITIES;
490 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
491 tn->capabilities &= temp_node->capabilities;
493 trace_tipc_node_create(n, true, " ");
495 spin_unlock_bh(&tn->node_list_lock);
499 static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
501 unsigned long tol = tipc_link_tolerance(l);
502 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
504 /* Link with lowest tolerance determines timer interval */
505 if (intv < n->keepalive_intv)
506 n->keepalive_intv = intv;
508 /* Ensure link's abort limit corresponds to current tolerance */
509 tipc_link_set_abort_limit(l, tol / n->keepalive_intv);
512 static void tipc_node_delete_from_list(struct tipc_node *node)
514 list_del_rcu(&node->list);
515 hlist_del_rcu(&node->hash);
519 static void tipc_node_delete(struct tipc_node *node)
521 trace_tipc_node_delete(node, true, " ");
522 tipc_node_delete_from_list(node);
524 del_timer_sync(&node->timer);
528 void tipc_node_stop(struct net *net)
530 struct tipc_net *tn = tipc_net(net);
531 struct tipc_node *node, *t_node;
533 spin_lock_bh(&tn->node_list_lock);
534 list_for_each_entry_safe(node, t_node, &tn->node_list, list)
535 tipc_node_delete(node);
536 spin_unlock_bh(&tn->node_list_lock);
539 void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
543 if (in_own_node(net, addr))
546 n = tipc_node_find(net, addr);
548 pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr);
551 tipc_node_write_lock(n);
552 list_add_tail(subscr, &n->publ_list);
553 tipc_node_write_unlock_fast(n);
557 void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
561 if (in_own_node(net, addr))
564 n = tipc_node_find(net, addr);
566 pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr);
569 tipc_node_write_lock(n);
570 list_del_init(subscr);
571 tipc_node_write_unlock_fast(n);
575 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
577 struct tipc_node *node;
578 struct tipc_sock_conn *conn;
581 if (in_own_node(net, dnode))
584 node = tipc_node_find(net, dnode);
586 pr_warn("Connecting sock to node 0x%x failed\n", dnode);
587 return -EHOSTUNREACH;
589 conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
594 conn->peer_node = dnode;
596 conn->peer_port = peer_port;
598 tipc_node_write_lock(node);
599 list_add_tail(&conn->list, &node->conn_sks);
600 tipc_node_write_unlock(node);
606 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
608 struct tipc_node *node;
609 struct tipc_sock_conn *conn, *safe;
611 if (in_own_node(net, dnode))
614 node = tipc_node_find(net, dnode);
618 tipc_node_write_lock(node);
619 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
620 if (port != conn->port)
622 list_del(&conn->list);
625 tipc_node_write_unlock(node);
629 static void tipc_node_clear_links(struct tipc_node *node)
633 for (i = 0; i < MAX_BEARERS; i++) {
634 struct tipc_link_entry *le = &node->links[i];
644 /* tipc_node_cleanup - delete nodes that does not
645 * have active links for NODE_CLEANUP_AFTER time
647 static bool tipc_node_cleanup(struct tipc_node *peer)
649 struct tipc_node *temp_node;
650 struct tipc_net *tn = tipc_net(peer->net);
651 bool deleted = false;
653 /* If lock held by tipc_node_stop() the node will be deleted anyway */
654 if (!spin_trylock_bh(&tn->node_list_lock))
657 tipc_node_write_lock(peer);
659 if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
660 tipc_node_clear_links(peer);
661 tipc_node_delete_from_list(peer);
664 tipc_node_write_unlock(peer);
666 /* Calculate cluster capabilities */
667 tn->capabilities = TIPC_NODE_CAPABILITIES;
668 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
669 tn->capabilities &= temp_node->capabilities;
672 spin_unlock_bh(&tn->node_list_lock);
676 /* tipc_node_timeout - handle expiration of node timer
678 static void tipc_node_timeout(struct timer_list *t)
680 struct tipc_node *n = from_timer(n, t, timer);
681 struct tipc_link_entry *le;
682 struct sk_buff_head xmitq;
683 int remains = n->link_cnt;
687 trace_tipc_node_timeout(n, false, " ");
688 if (!node_is_up(n) && tipc_node_cleanup(n)) {
689 /*Removing the reference of Timer*/
694 __skb_queue_head_init(&xmitq);
696 /* Initial node interval to value larger (10 seconds), then it will be
697 * recalculated with link lowest tolerance
699 tipc_node_read_lock(n);
700 n->keepalive_intv = 10000;
701 tipc_node_read_unlock(n);
702 for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
703 tipc_node_read_lock(n);
704 le = &n->links[bearer_id];
706 spin_lock_bh(&le->lock);
707 /* Link tolerance may change asynchronously: */
708 tipc_node_calculate_timer(n, le->link);
709 rc = tipc_link_timeout(le->link, &xmitq);
710 spin_unlock_bh(&le->lock);
713 tipc_node_read_unlock(n);
714 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr);
715 if (rc & TIPC_LINK_DOWN_EVT)
716 tipc_node_link_down(n, bearer_id, false);
718 mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv));
722 * __tipc_node_link_up - handle addition of link
723 * Node lock must be held by caller
724 * Link becomes active (alone or shared) or standby, depending on its priority.
726 static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
727 struct sk_buff_head *xmitq)
729 int *slot0 = &n->active_links[0];
730 int *slot1 = &n->active_links[1];
731 struct tipc_link *ol = node_active_link(n, 0);
732 struct tipc_link *nl = n->links[bearer_id].link;
734 if (!nl || tipc_link_is_up(nl))
737 tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT);
738 if (!tipc_link_is_up(nl))
742 n->action_flags |= TIPC_NOTIFY_LINK_UP;
743 n->link_id = tipc_link_id(nl);
745 /* Leave room for tunnel header when returning 'mtu' to users: */
746 n->links[bearer_id].mtu = tipc_link_mtu(nl) - INT_H_SIZE;
748 tipc_bearer_add_dest(n->net, bearer_id, n->addr);
749 tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
751 pr_debug("Established link <%s> on network plane %c\n",
752 tipc_link_name(nl), tipc_link_plane(nl));
753 trace_tipc_node_link_up(n, true, " ");
755 /* Ensure that a STATE message goes first */
756 tipc_link_build_state_msg(nl, xmitq);
758 /* First link? => give it both slots */
762 tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
763 n->action_flags |= TIPC_NOTIFY_NODE_UP;
764 tipc_link_set_active(nl, true);
765 tipc_bcast_add_peer(n->net, nl, xmitq);
769 /* Second link => redistribute slots */
770 if (tipc_link_prio(nl) > tipc_link_prio(ol)) {
771 pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol));
774 tipc_link_set_active(nl, true);
775 tipc_link_set_active(ol, false);
776 } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) {
777 tipc_link_set_active(nl, true);
780 pr_debug("New link <%s> is standby\n", tipc_link_name(nl));
783 /* Prepare synchronization with first link */
784 tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
788 * tipc_node_link_up - handle addition of link
790 * Link becomes active (alone or shared) or standby, depending on its priority.
792 static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
793 struct sk_buff_head *xmitq)
795 struct tipc_media_addr *maddr;
797 tipc_node_write_lock(n);
798 __tipc_node_link_up(n, bearer_id, xmitq);
799 maddr = &n->links[bearer_id].maddr;
800 tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr);
801 tipc_node_write_unlock(n);
805 * tipc_node_link_failover() - start failover in case "half-failover"
807 * This function is only called in a very special situation where link
808 * failover can be already started on peer node but not on this node.
809 * This can happen when e.g.
810 * 1. Both links <1A-2A>, <1B-2B> down
811 * 2. Link endpoint 2A up, but 1A still down (e.g. due to network
812 * disturbance, wrong session, etc.)
814 * 4. Link endpoint 2A down (e.g. due to link tolerance timeout)
815 * 5. Node 2 starts failover onto link <1B-2B>
817 * ==> Node 1 does never start link/node failover!
819 * @n: tipc node structure
820 * @l: link peer endpoint failingover (- can be NULL)
822 * @xmitq: queue for messages to be xmited on tnl link later
824 static void tipc_node_link_failover(struct tipc_node *n, struct tipc_link *l,
825 struct tipc_link *tnl,
826 struct sk_buff_head *xmitq)
828 /* Avoid to be "self-failover" that can never end */
829 if (!tipc_link_is_up(tnl))
832 /* Don't rush, failure link may be in the process of resetting */
833 if (l && !tipc_link_is_reset(l))
836 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
837 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
839 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
840 tipc_link_failover_prepare(l, tnl, xmitq);
843 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
844 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
848 * __tipc_node_link_down - handle loss of link
850 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
851 struct sk_buff_head *xmitq,
852 struct tipc_media_addr **maddr)
854 struct tipc_link_entry *le = &n->links[*bearer_id];
855 int *slot0 = &n->active_links[0];
856 int *slot1 = &n->active_links[1];
857 int i, highest = 0, prio;
858 struct tipc_link *l, *_l, *tnl;
860 l = n->links[*bearer_id].link;
861 if (!l || tipc_link_is_reset(l))
865 n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
866 n->link_id = tipc_link_id(l);
868 tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
870 pr_debug("Lost link <%s> on network plane %c\n",
871 tipc_link_name(l), tipc_link_plane(l));
873 /* Select new active link if any available */
874 *slot0 = INVALID_BEARER_ID;
875 *slot1 = INVALID_BEARER_ID;
876 for (i = 0; i < MAX_BEARERS; i++) {
877 _l = n->links[i].link;
878 if (!_l || !tipc_link_is_up(_l))
882 prio = tipc_link_prio(_l);
885 if (prio > highest) {
894 if (!node_is_up(n)) {
895 if (tipc_link_peer_is_down(l))
896 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
897 tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT);
898 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down!");
899 tipc_link_fsm_evt(l, LINK_RESET_EVT);
901 tipc_link_build_reset_msg(l, xmitq);
902 *maddr = &n->links[*bearer_id].maddr;
903 node_lost_contact(n, &le->inputq);
904 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
907 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
909 /* There is still a working link => initiate failover */
910 *bearer_id = n->active_links[0];
911 tnl = n->links[*bearer_id].link;
912 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
913 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
914 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
915 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
916 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down -> failover!");
918 tipc_link_fsm_evt(l, LINK_RESET_EVT);
919 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
920 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
921 *maddr = &n->links[*bearer_id].maddr;
924 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
926 struct tipc_link_entry *le = &n->links[bearer_id];
927 struct tipc_media_addr *maddr = NULL;
928 struct tipc_link *l = le->link;
929 int old_bearer_id = bearer_id;
930 struct sk_buff_head xmitq;
935 __skb_queue_head_init(&xmitq);
937 tipc_node_write_lock(n);
938 if (!tipc_link_is_establishing(l)) {
939 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
941 /* Defuse pending tipc_node_link_up() */
943 tipc_link_fsm_evt(l, LINK_RESET_EVT);
950 trace_tipc_node_link_down(n, true, "node link down or deleted!");
951 tipc_node_write_unlock(n);
953 tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
954 if (!skb_queue_empty(&xmitq))
955 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
956 tipc_sk_rcv(n->net, &le->inputq);
959 static bool node_is_up(struct tipc_node *n)
961 return n->active_links[0] != INVALID_BEARER_ID;
964 bool tipc_node_is_up(struct net *net, u32 addr)
969 if (in_own_node(net, addr))
972 n = tipc_node_find(net, addr);
975 retval = node_is_up(n);
980 static u32 tipc_node_suggest_addr(struct net *net, u32 addr)
984 addr ^= tipc_net(net)->random;
985 while ((n = tipc_node_find(net, addr))) {
992 /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not
993 * Returns suggested address if any, otherwise 0
995 u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
997 struct tipc_net *tn = tipc_net(net);
1000 /* Suggest new address if some other peer is using this one */
1001 n = tipc_node_find(net, addr);
1003 if (!memcmp(n->peer_id, id, NODE_ID_LEN))
1008 return tipc_node_suggest_addr(net, addr);
1011 /* Suggest previously used address if peer is known */
1012 n = tipc_node_find_by_id(net, id);
1019 /* Even this node may be in conflict */
1020 if (tn->trial_addr == addr)
1021 return tipc_node_suggest_addr(net, addr);
1026 void tipc_node_check_dest(struct net *net, u32 addr,
1027 u8 *peer_id, struct tipc_bearer *b,
1028 u16 capabilities, u32 signature, u32 hash_mixes,
1029 struct tipc_media_addr *maddr,
1030 bool *respond, bool *dupl_addr)
1032 struct tipc_node *n;
1033 struct tipc_link *l;
1034 struct tipc_link_entry *le;
1035 bool addr_match = false;
1036 bool sign_match = false;
1037 bool link_up = false;
1038 bool link_is_reset = false;
1039 bool accept_addr = false;
1048 n = tipc_node_create(net, addr, peer_id, capabilities, signature,
1053 tipc_node_write_lock(n);
1055 le = &n->links[b->identity];
1057 /* Prepare to validate requesting node's signature and media address */
1059 link_up = l && tipc_link_is_up(l);
1060 link_is_reset = l && tipc_link_is_reset(l);
1061 addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
1062 sign_match = (signature == n->signature);
1064 /* These three flags give us eight permutations: */
1066 if (sign_match && addr_match && link_up) {
1067 /* All is fine. Ignore requests. */
1068 /* Peer node is not a container/local namespace */
1069 if (!n->peer_hash_mix)
1070 n->peer_hash_mix = hash_mixes;
1071 } else if (sign_match && addr_match && !link_up) {
1072 /* Respond. The link will come up in due time */
1074 } else if (sign_match && !addr_match && link_up) {
1075 /* Peer has changed i/f address without rebooting.
1076 * If so, the link will reset soon, and the next
1077 * discovery will be accepted. So we can ignore it.
1078 * It may also be an cloned or malicious peer having
1079 * chosen the same node address and signature as an
1081 * Ignore requests until the link goes down, if ever.
1084 } else if (sign_match && !addr_match && !link_up) {
1085 /* Peer link has changed i/f address without rebooting.
1086 * It may also be a cloned or malicious peer; we can't
1087 * distinguish between the two.
1088 * The signature is correct, so we must accept.
1093 } else if (!sign_match && addr_match && link_up) {
1094 /* Peer node rebooted. Two possibilities:
1095 * - Delayed re-discovery; this link endpoint has already
1096 * reset and re-established contact with the peer, before
1097 * receiving a discovery message from that node.
1098 * (The peer happened to receive one from this node first).
1099 * - The peer came back so fast that our side has not
1100 * discovered it yet. Probing from this side will soon
1101 * reset the link, since there can be no working link
1102 * endpoint at the peer end, and the link will re-establish.
1103 * Accept the signature, since it comes from a known peer.
1105 n->signature = signature;
1106 } else if (!sign_match && addr_match && !link_up) {
1107 /* The peer node has rebooted.
1108 * Accept signature, since it is a known peer.
1110 n->signature = signature;
1112 } else if (!sign_match && !addr_match && link_up) {
1113 /* Peer rebooted with new address, or a new/duplicate peer.
1114 * Ignore until the link goes down, if ever.
1117 } else if (!sign_match && !addr_match && !link_up) {
1118 /* Peer rebooted with new address, or it is a new peer.
1119 * Accept signature and address.
1121 n->signature = signature;
1130 /* Now create new link if not already existing */
1132 if (n->link_cnt == 2)
1135 if_name = strchr(b->name, ':') + 1;
1136 get_random_bytes(&session, sizeof(u16));
1137 if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
1138 b->net_plane, b->mtu, b->priority,
1140 tipc_own_addr(net), addr, peer_id,
1142 tipc_bc_sndlink(n->net), n->bc_entry.link,
1144 &n->bc_entry.namedq, &l)) {
1148 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link created!");
1150 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1151 if (n->state == NODE_FAILINGOVER)
1152 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
1153 link_is_reset = tipc_link_is_reset(l);
1156 tipc_node_calculate_timer(n, l);
1157 if (n->link_cnt == 1) {
1158 intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
1159 if (!mod_timer(&n->timer, intv))
1163 memcpy(&le->maddr, maddr, sizeof(*maddr));
1165 tipc_node_write_unlock(n);
1166 if (reset && !link_is_reset)
1167 tipc_node_link_down(n, b->identity, false);
1171 void tipc_node_delete_links(struct net *net, int bearer_id)
1173 struct tipc_net *tn = net_generic(net, tipc_net_id);
1174 struct tipc_node *n;
1177 list_for_each_entry_rcu(n, &tn->node_list, list) {
1178 tipc_node_link_down(n, bearer_id, true);
1183 static void tipc_node_reset_links(struct tipc_node *n)
1187 pr_warn("Resetting all links to %x\n", n->addr);
1189 trace_tipc_node_reset_links(n, true, " ");
1190 for (i = 0; i < MAX_BEARERS; i++) {
1191 tipc_node_link_down(n, i, false);
1195 /* tipc_node_fsm_evt - node finite state machine
1196 * Determines when contact is allowed with peer node
1198 static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
1200 int state = n->state;
1203 case SELF_DOWN_PEER_DOWN:
1205 case SELF_ESTABL_CONTACT_EVT:
1206 state = SELF_UP_PEER_COMING;
1208 case PEER_ESTABL_CONTACT_EVT:
1209 state = SELF_COMING_PEER_UP;
1211 case SELF_LOST_CONTACT_EVT:
1212 case PEER_LOST_CONTACT_EVT:
1214 case NODE_SYNCH_END_EVT:
1215 case NODE_SYNCH_BEGIN_EVT:
1216 case NODE_FAILOVER_BEGIN_EVT:
1217 case NODE_FAILOVER_END_EVT:
1222 case SELF_UP_PEER_UP:
1224 case SELF_LOST_CONTACT_EVT:
1225 state = SELF_DOWN_PEER_LEAVING;
1227 case PEER_LOST_CONTACT_EVT:
1228 state = SELF_LEAVING_PEER_DOWN;
1230 case NODE_SYNCH_BEGIN_EVT:
1231 state = NODE_SYNCHING;
1233 case NODE_FAILOVER_BEGIN_EVT:
1234 state = NODE_FAILINGOVER;
1236 case SELF_ESTABL_CONTACT_EVT:
1237 case PEER_ESTABL_CONTACT_EVT:
1238 case NODE_SYNCH_END_EVT:
1239 case NODE_FAILOVER_END_EVT:
1245 case SELF_DOWN_PEER_LEAVING:
1247 case PEER_LOST_CONTACT_EVT:
1248 state = SELF_DOWN_PEER_DOWN;
1250 case SELF_ESTABL_CONTACT_EVT:
1251 case PEER_ESTABL_CONTACT_EVT:
1252 case SELF_LOST_CONTACT_EVT:
1254 case NODE_SYNCH_END_EVT:
1255 case NODE_SYNCH_BEGIN_EVT:
1256 case NODE_FAILOVER_BEGIN_EVT:
1257 case NODE_FAILOVER_END_EVT:
1262 case SELF_UP_PEER_COMING:
1264 case PEER_ESTABL_CONTACT_EVT:
1265 state = SELF_UP_PEER_UP;
1267 case SELF_LOST_CONTACT_EVT:
1268 state = SELF_DOWN_PEER_DOWN;
1270 case SELF_ESTABL_CONTACT_EVT:
1271 case PEER_LOST_CONTACT_EVT:
1272 case NODE_SYNCH_END_EVT:
1273 case NODE_FAILOVER_BEGIN_EVT:
1275 case NODE_SYNCH_BEGIN_EVT:
1276 case NODE_FAILOVER_END_EVT:
1281 case SELF_COMING_PEER_UP:
1283 case SELF_ESTABL_CONTACT_EVT:
1284 state = SELF_UP_PEER_UP;
1286 case PEER_LOST_CONTACT_EVT:
1287 state = SELF_DOWN_PEER_DOWN;
1289 case SELF_LOST_CONTACT_EVT:
1290 case PEER_ESTABL_CONTACT_EVT:
1292 case NODE_SYNCH_END_EVT:
1293 case NODE_SYNCH_BEGIN_EVT:
1294 case NODE_FAILOVER_BEGIN_EVT:
1295 case NODE_FAILOVER_END_EVT:
1300 case SELF_LEAVING_PEER_DOWN:
1302 case SELF_LOST_CONTACT_EVT:
1303 state = SELF_DOWN_PEER_DOWN;
1305 case SELF_ESTABL_CONTACT_EVT:
1306 case PEER_ESTABL_CONTACT_EVT:
1307 case PEER_LOST_CONTACT_EVT:
1309 case NODE_SYNCH_END_EVT:
1310 case NODE_SYNCH_BEGIN_EVT:
1311 case NODE_FAILOVER_BEGIN_EVT:
1312 case NODE_FAILOVER_END_EVT:
1317 case NODE_FAILINGOVER:
1319 case SELF_LOST_CONTACT_EVT:
1320 state = SELF_DOWN_PEER_LEAVING;
1322 case PEER_LOST_CONTACT_EVT:
1323 state = SELF_LEAVING_PEER_DOWN;
1325 case NODE_FAILOVER_END_EVT:
1326 state = SELF_UP_PEER_UP;
1328 case NODE_FAILOVER_BEGIN_EVT:
1329 case SELF_ESTABL_CONTACT_EVT:
1330 case PEER_ESTABL_CONTACT_EVT:
1332 case NODE_SYNCH_BEGIN_EVT:
1333 case NODE_SYNCH_END_EVT:
1340 case SELF_LOST_CONTACT_EVT:
1341 state = SELF_DOWN_PEER_LEAVING;
1343 case PEER_LOST_CONTACT_EVT:
1344 state = SELF_LEAVING_PEER_DOWN;
1346 case NODE_SYNCH_END_EVT:
1347 state = SELF_UP_PEER_UP;
1349 case NODE_FAILOVER_BEGIN_EVT:
1350 state = NODE_FAILINGOVER;
1352 case NODE_SYNCH_BEGIN_EVT:
1353 case SELF_ESTABL_CONTACT_EVT:
1354 case PEER_ESTABL_CONTACT_EVT:
1356 case NODE_FAILOVER_END_EVT:
1362 pr_err("Unknown node fsm state %x\n", state);
1365 trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
1370 pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
1371 trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
1374 static void node_lost_contact(struct tipc_node *n,
1375 struct sk_buff_head *inputq)
1377 struct tipc_sock_conn *conn, *safe;
1378 struct tipc_link *l;
1379 struct list_head *conns = &n->conn_sks;
1380 struct sk_buff *skb;
1383 pr_debug("Lost contact with %x\n", n->addr);
1384 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
1385 trace_tipc_node_lost_contact(n, true, " ");
1387 /* Clean up broadcast state */
1388 tipc_bcast_remove_peer(n->net, n->bc_entry.link);
1390 /* Abort any ongoing link failover */
1391 for (i = 0; i < MAX_BEARERS; i++) {
1392 l = n->links[i].link;
1394 tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
1397 /* Notify publications from this node */
1398 n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
1400 n->peer_hash_mix = 0;
1401 /* Notify sockets connected to node */
1402 list_for_each_entry_safe(conn, safe, conns, list) {
1403 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
1404 SHORT_H_SIZE, 0, tipc_own_addr(n->net),
1405 conn->peer_node, conn->port,
1406 conn->peer_port, TIPC_ERR_NO_NODE);
1408 skb_queue_tail(inputq, skb);
1409 list_del(&conn->list);
1415 * tipc_node_get_linkname - get the name of a link
1417 * @bearer_id: id of the bearer
1418 * @node: peer node address
1419 * @linkname: link name output buffer
1421 * Returns 0 on success
1423 int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
1424 char *linkname, size_t len)
1426 struct tipc_link *link;
1428 struct tipc_node *node = tipc_node_find(net, addr);
1433 if (bearer_id >= MAX_BEARERS)
1436 tipc_node_read_lock(node);
1437 link = node->links[bearer_id].link;
1439 strncpy(linkname, tipc_link_name(link), len);
1442 tipc_node_read_unlock(node);
1444 tipc_node_put(node);
1448 /* Caller should hold node lock for the passed node */
1449 static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
1452 struct nlattr *attrs;
1454 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1455 NLM_F_MULTI, TIPC_NL_NODE_GET);
1459 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NODE);
1463 if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
1465 if (node_is_up(node))
1466 if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
1469 nla_nest_end(msg->skb, attrs);
1470 genlmsg_end(msg->skb, hdr);
1475 nla_nest_cancel(msg->skb, attrs);
1477 genlmsg_cancel(msg->skb, hdr);
1482 static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
1484 struct tipc_msg *hdr = buf_msg(skb_peek(list));
1485 struct sk_buff_head inputq;
1487 switch (msg_user(hdr)) {
1488 case TIPC_LOW_IMPORTANCE:
1489 case TIPC_MEDIUM_IMPORTANCE:
1490 case TIPC_HIGH_IMPORTANCE:
1491 case TIPC_CRITICAL_IMPORTANCE:
1492 if (msg_connected(hdr) || msg_named(hdr) ||
1494 tipc_loopback_trace(peer_net, list);
1495 spin_lock_init(&list->lock);
1496 tipc_sk_rcv(peer_net, list);
1499 if (msg_mcast(hdr)) {
1500 tipc_loopback_trace(peer_net, list);
1501 skb_queue_head_init(&inputq);
1502 tipc_sk_mcast_rcv(peer_net, list, &inputq);
1503 __skb_queue_purge(list);
1504 skb_queue_purge(&inputq);
1508 case MSG_FRAGMENTER:
1509 if (tipc_msg_assemble(list)) {
1510 tipc_loopback_trace(peer_net, list);
1511 skb_queue_head_init(&inputq);
1512 tipc_sk_mcast_rcv(peer_net, list, &inputq);
1513 __skb_queue_purge(list);
1514 skb_queue_purge(&inputq);
1517 case GROUP_PROTOCOL:
1519 tipc_loopback_trace(peer_net, list);
1520 spin_lock_init(&list->lock);
1521 tipc_sk_rcv(peer_net, list);
1524 case NAME_DISTRIBUTOR:
1525 case TUNNEL_PROTOCOL:
1526 case BCAST_PROTOCOL:
1534 * tipc_node_xmit() is the general link level function for message sending
1535 * @net: the applicable net namespace
1536 * @list: chain of buffers containing message
1537 * @dnode: address of destination node
1538 * @selector: a number used for deterministic link selection
1539 * Consumes the buffer chain.
1540 * Returns 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF
1542 int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
1543 u32 dnode, int selector)
1545 struct tipc_link_entry *le = NULL;
1546 struct tipc_node *n;
1547 struct sk_buff_head xmitq;
1548 bool node_up = false;
1549 struct net *peer_net;
1553 if (in_own_node(net, dnode)) {
1554 tipc_loopback_trace(net, list);
1555 spin_lock_init(&list->lock);
1556 tipc_sk_rcv(net, list);
1560 n = tipc_node_find(net, dnode);
1562 __skb_queue_purge(list);
1563 return -EHOSTUNREACH;
1567 tipc_node_read_lock(n);
1568 node_up = node_is_up(n);
1569 peer_net = n->peer_net;
1570 tipc_node_read_unlock(n);
1571 if (node_up && peer_net && check_net(peer_net)) {
1572 /* xmit inner linux container */
1573 tipc_lxc_xmit(peer_net, list);
1574 if (likely(skb_queue_empty(list))) {
1582 tipc_node_read_lock(n);
1583 bearer_id = n->active_links[selector & 1];
1584 if (unlikely(bearer_id == INVALID_BEARER_ID)) {
1585 tipc_node_read_unlock(n);
1587 __skb_queue_purge(list);
1588 return -EHOSTUNREACH;
1591 __skb_queue_head_init(&xmitq);
1592 le = &n->links[bearer_id];
1593 spin_lock_bh(&le->lock);
1594 rc = tipc_link_xmit(le->link, list, &xmitq);
1595 spin_unlock_bh(&le->lock);
1596 tipc_node_read_unlock(n);
1598 if (unlikely(rc == -ENOBUFS))
1599 tipc_node_link_down(n, bearer_id, false);
1601 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
1608 /* tipc_node_xmit_skb(): send single buffer to destination
1609 * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
1610 * messages, which will not be rejected
1611 * The only exception is datagram messages rerouted after secondary
1612 * lookup, which are rare and safe to dispose of anyway.
1614 int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
1617 struct sk_buff_head head;
1619 __skb_queue_head_init(&head);
1620 __skb_queue_tail(&head, skb);
1621 tipc_node_xmit(net, &head, dnode, selector);
1625 /* tipc_node_distr_xmit(): send single buffer msgs to individual destinations
1626 * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected
1628 int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq)
1630 struct sk_buff *skb;
1631 u32 selector, dnode;
1633 while ((skb = __skb_dequeue(xmitq))) {
1634 selector = msg_origport(buf_msg(skb));
1635 dnode = msg_destnode(buf_msg(skb));
1636 tipc_node_xmit_skb(net, skb, dnode, selector);
1641 void tipc_node_broadcast(struct net *net, struct sk_buff *skb)
1643 struct sk_buff *txskb;
1644 struct tipc_node *n;
1648 list_for_each_entry_rcu(n, tipc_nodes(net), list) {
1650 if (in_own_node(net, dst))
1654 txskb = pskb_copy(skb, GFP_ATOMIC);
1657 msg_set_destnode(buf_msg(txskb), dst);
1658 tipc_node_xmit_skb(net, txskb, dst, 0);
1665 static void tipc_node_mcast_rcv(struct tipc_node *n)
1667 struct tipc_bclink_entry *be = &n->bc_entry;
1669 /* 'arrvq' is under inputq2's lock protection */
1670 spin_lock_bh(&be->inputq2.lock);
1671 spin_lock_bh(&be->inputq1.lock);
1672 skb_queue_splice_tail_init(&be->inputq1, &be->arrvq);
1673 spin_unlock_bh(&be->inputq1.lock);
1674 spin_unlock_bh(&be->inputq2.lock);
1675 tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2);
1678 static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr,
1679 int bearer_id, struct sk_buff_head *xmitq)
1681 struct tipc_link *ucl;
1684 rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr);
1686 if (rc & TIPC_LINK_DOWN_EVT) {
1687 tipc_node_reset_links(n);
1691 if (!(rc & TIPC_LINK_SND_STATE))
1694 /* If probe message, a STATE response will be sent anyway */
1698 /* Produce a STATE message carrying broadcast NACK */
1699 tipc_node_read_lock(n);
1700 ucl = n->links[bearer_id].link;
1702 tipc_link_build_state_msg(ucl, xmitq);
1703 tipc_node_read_unlock(n);
1707 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node
1708 * @net: the applicable net namespace
1710 * @bearer_id: id of bearer message arrived on
1712 * Invoked with no locks held.
1714 static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id)
1717 struct sk_buff_head xmitq;
1718 struct tipc_bclink_entry *be;
1719 struct tipc_link_entry *le;
1720 struct tipc_msg *hdr = buf_msg(skb);
1721 int usr = msg_user(hdr);
1722 u32 dnode = msg_destnode(hdr);
1723 struct tipc_node *n;
1725 __skb_queue_head_init(&xmitq);
1727 /* If NACK for other node, let rcv link for that node peek into it */
1728 if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net)))
1729 n = tipc_node_find(net, dnode);
1731 n = tipc_node_find(net, msg_prevnode(hdr));
1737 le = &n->links[bearer_id];
1739 rc = tipc_bcast_rcv(net, be->link, skb);
1741 /* Broadcast ACKs are sent on a unicast link */
1742 if (rc & TIPC_LINK_SND_STATE) {
1743 tipc_node_read_lock(n);
1744 tipc_link_build_state_msg(le->link, &xmitq);
1745 tipc_node_read_unlock(n);
1748 if (!skb_queue_empty(&xmitq))
1749 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
1751 if (!skb_queue_empty(&be->inputq1))
1752 tipc_node_mcast_rcv(n);
1754 /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */
1755 if (!skb_queue_empty(&n->bc_entry.namedq))
1756 tipc_named_rcv(net, &n->bc_entry.namedq);
1758 /* If reassembly or retransmission failure => reset all links to peer */
1759 if (rc & TIPC_LINK_DOWN_EVT)
1760 tipc_node_reset_links(n);
1766 * tipc_node_check_state - check and if necessary update node state
1768 * @bearer_id: identity of bearer delivering the packet
1769 * Returns true if state and msg are ok, otherwise false
1771 static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1772 int bearer_id, struct sk_buff_head *xmitq)
1774 struct tipc_msg *hdr = buf_msg(skb);
1775 int usr = msg_user(hdr);
1776 int mtyp = msg_type(hdr);
1777 u16 oseqno = msg_seqno(hdr);
1778 u16 exp_pkts = msg_msgcnt(hdr);
1779 u16 rcv_nxt, syncpt, dlv_nxt, inputq_len;
1780 int state = n->state;
1781 struct tipc_link *l, *tnl, *pl = NULL;
1782 struct tipc_media_addr *maddr;
1785 if (trace_tipc_node_check_state_enabled()) {
1786 trace_tipc_skb_dump(skb, false, "skb for node state check");
1787 trace_tipc_node_check_state(n, true, " ");
1789 l = n->links[bearer_id].link;
1792 rcv_nxt = tipc_link_rcv_nxt(l);
1795 if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
1798 /* Find parallel link, if any */
1799 for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) {
1800 if ((pb_id != bearer_id) && n->links[pb_id].link) {
1801 pl = n->links[pb_id].link;
1806 if (!tipc_link_validate_msg(l, hdr)) {
1807 trace_tipc_skb_dump(skb, false, "PROTO invalid (2)!");
1808 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (2)!");
1812 /* Check and update node accesibility if applicable */
1813 if (state == SELF_UP_PEER_COMING) {
1814 if (!tipc_link_is_up(l))
1816 if (!msg_peer_link_is_up(hdr))
1818 tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
1821 if (state == SELF_DOWN_PEER_LEAVING) {
1822 if (msg_peer_node_is_up(hdr))
1824 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
1828 if (state == SELF_LEAVING_PEER_DOWN)
1831 /* Ignore duplicate packets */
1832 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
1835 /* Initiate or update failover mode if applicable */
1836 if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
1837 syncpt = oseqno + exp_pkts - 1;
1838 if (pl && !tipc_link_is_reset(pl)) {
1839 __tipc_node_link_down(n, &pb_id, xmitq, &maddr);
1840 trace_tipc_node_link_down(n, true,
1841 "node link down <- failover!");
1842 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
1843 tipc_link_inputq(l));
1846 /* If parallel link was already down, and this happened before
1847 * the tunnel link came up, node failover was never started.
1848 * Ensure that a FAILOVER_MSG is sent to get peer out of
1849 * NODE_FAILINGOVER state, also this node must accept
1850 * TUNNEL_MSGs from peer.
1852 if (n->state != NODE_FAILINGOVER)
1853 tipc_node_link_failover(n, pl, l, xmitq);
1855 /* If pkts arrive out of order, use lowest calculated syncpt */
1856 if (less(syncpt, n->sync_point))
1857 n->sync_point = syncpt;
1860 /* Open parallel link when tunnel link reaches synch point */
1861 if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) {
1862 if (!more(rcv_nxt, n->sync_point))
1864 tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
1866 tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT);
1870 /* No synching needed if only one link */
1871 if (!pl || !tipc_link_is_up(pl))
1874 /* Initiate synch mode if applicable */
1875 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
1876 if (n->capabilities & TIPC_TUNNEL_ENHANCED)
1877 syncpt = msg_syncpt(hdr);
1879 syncpt = msg_seqno(msg_inner_hdr(hdr)) + exp_pkts - 1;
1880 if (!tipc_link_is_up(l))
1881 __tipc_node_link_up(n, bearer_id, xmitq);
1882 if (n->state == SELF_UP_PEER_UP) {
1883 n->sync_point = syncpt;
1884 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
1885 tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
1889 /* Open tunnel link when parallel link reaches synch point */
1890 if (n->state == NODE_SYNCHING) {
1891 if (tipc_link_is_synching(l)) {
1897 inputq_len = skb_queue_len(tipc_link_inputq(pl));
1898 dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len;
1899 if (more(dlv_nxt, n->sync_point)) {
1900 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
1901 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
1906 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
1908 if (usr == LINK_PROTOCOL)
1916 * tipc_rcv - process TIPC packets/messages arriving from off-node
1917 * @net: the applicable net namespace
1919 * @bearer: pointer to bearer message arrived on
1921 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1922 * structure (i.e. cannot be NULL), but bearer can be inactive.
1924 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
1926 struct sk_buff_head xmitq;
1927 struct tipc_node *n;
1928 struct tipc_msg *hdr;
1929 int bearer_id = b->identity;
1930 struct tipc_link_entry *le;
1931 u32 self = tipc_own_addr(net);
1935 __skb_queue_head_init(&xmitq);
1937 /* Ensure message is well-formed before touching the header */
1938 TIPC_SKB_CB(skb)->validated = false;
1939 if (unlikely(!tipc_msg_validate(&skb)))
1942 usr = msg_user(hdr);
1943 bc_ack = msg_bcast_ack(hdr);
1945 /* Handle arrival of discovery or broadcast packet */
1946 if (unlikely(msg_non_seq(hdr))) {
1947 if (unlikely(usr == LINK_CONFIG))
1948 return tipc_disc_rcv(net, skb, b);
1950 return tipc_node_bc_rcv(net, skb, bearer_id);
1953 /* Discard unicast link messages destined for another node */
1954 if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self)))
1957 /* Locate neighboring node that sent packet */
1958 n = tipc_node_find(net, msg_prevnode(hdr));
1961 le = &n->links[bearer_id];
1963 /* Ensure broadcast reception is in synch with peer's send state */
1964 if (unlikely(usr == LINK_PROTOCOL))
1965 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
1966 else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack))
1967 tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr);
1969 /* Receive packet directly if conditions permit */
1970 tipc_node_read_lock(n);
1971 if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
1972 spin_lock_bh(&le->lock);
1974 rc = tipc_link_rcv(le->link, skb, &xmitq);
1977 spin_unlock_bh(&le->lock);
1979 tipc_node_read_unlock(n);
1981 /* Check/update node state before receiving */
1982 if (unlikely(skb)) {
1983 if (unlikely(skb_linearize(skb)))
1985 tipc_node_write_lock(n);
1986 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
1988 rc = tipc_link_rcv(le->link, skb, &xmitq);
1992 tipc_node_write_unlock(n);
1995 if (unlikely(rc & TIPC_LINK_UP_EVT))
1996 tipc_node_link_up(n, bearer_id, &xmitq);
1998 if (unlikely(rc & TIPC_LINK_DOWN_EVT))
1999 tipc_node_link_down(n, bearer_id, false);
2001 if (unlikely(!skb_queue_empty(&n->bc_entry.namedq)))
2002 tipc_named_rcv(net, &n->bc_entry.namedq);
2004 if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1)))
2005 tipc_node_mcast_rcv(n);
2007 if (!skb_queue_empty(&le->inputq))
2008 tipc_sk_rcv(net, &le->inputq);
2010 if (!skb_queue_empty(&xmitq))
2011 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
2018 void tipc_node_apply_property(struct net *net, struct tipc_bearer *b,
2021 struct tipc_net *tn = tipc_net(net);
2022 int bearer_id = b->identity;
2023 struct sk_buff_head xmitq;
2024 struct tipc_link_entry *e;
2025 struct tipc_node *n;
2027 __skb_queue_head_init(&xmitq);
2031 list_for_each_entry_rcu(n, &tn->node_list, list) {
2032 tipc_node_write_lock(n);
2033 e = &n->links[bearer_id];
2035 if (prop == TIPC_NLA_PROP_TOL)
2036 tipc_link_set_tolerance(e->link, b->tolerance,
2038 else if (prop == TIPC_NLA_PROP_MTU)
2039 tipc_link_set_mtu(e->link, b->mtu);
2041 tipc_node_write_unlock(n);
2042 tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr);
2048 int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
2050 struct net *net = sock_net(skb->sk);
2051 struct tipc_net *tn = net_generic(net, tipc_net_id);
2052 struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
2053 struct tipc_node *peer;
2057 /* We identify the peer by its net */
2058 if (!info->attrs[TIPC_NLA_NET])
2061 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX,
2062 info->attrs[TIPC_NLA_NET],
2063 tipc_nl_net_policy, info->extack);
2067 if (!attrs[TIPC_NLA_NET_ADDR])
2070 addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
2072 if (in_own_node(net, addr))
2075 spin_lock_bh(&tn->node_list_lock);
2076 peer = tipc_node_find(net, addr);
2078 spin_unlock_bh(&tn->node_list_lock);
2082 tipc_node_write_lock(peer);
2083 if (peer->state != SELF_DOWN_PEER_DOWN &&
2084 peer->state != SELF_DOWN_PEER_LEAVING) {
2085 tipc_node_write_unlock(peer);
2090 tipc_node_clear_links(peer);
2091 tipc_node_write_unlock(peer);
2092 tipc_node_delete(peer);
2096 tipc_node_put(peer);
2097 spin_unlock_bh(&tn->node_list_lock);
2102 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
2105 struct net *net = sock_net(skb->sk);
2106 struct tipc_net *tn = net_generic(net, tipc_net_id);
2107 int done = cb->args[0];
2108 int last_addr = cb->args[1];
2109 struct tipc_node *node;
2110 struct tipc_nl_msg msg;
2116 msg.portid = NETLINK_CB(cb->skb).portid;
2117 msg.seq = cb->nlh->nlmsg_seq;
2121 node = tipc_node_find(net, last_addr);
2124 /* We never set seq or call nl_dump_check_consistent()
2125 * this means that setting prev_seq here will cause the
2126 * consistence check to fail in the netlink callback
2127 * handler. Resulting in the NLMSG_DONE message having
2128 * the NLM_F_DUMP_INTR flag set if the node state
2129 * changed while we released the lock.
2134 tipc_node_put(node);
2137 list_for_each_entry_rcu(node, &tn->node_list, list) {
2139 if (node->addr == last_addr)
2145 tipc_node_read_lock(node);
2146 err = __tipc_nl_add_node(&msg, node);
2148 last_addr = node->addr;
2149 tipc_node_read_unlock(node);
2153 tipc_node_read_unlock(node);
2158 cb->args[1] = last_addr;
2164 /* tipc_node_find_by_name - locate owner node of link by link's name
2165 * @net: the applicable net namespace
2166 * @name: pointer to link name string
2167 * @bearer_id: pointer to index in 'node->links' array where the link was found.
2169 * Returns pointer to node owning the link, or 0 if no matching link is found.
2171 static struct tipc_node *tipc_node_find_by_name(struct net *net,
2172 const char *link_name,
2173 unsigned int *bearer_id)
2175 struct tipc_net *tn = net_generic(net, tipc_net_id);
2176 struct tipc_link *l;
2177 struct tipc_node *n;
2178 struct tipc_node *found_node = NULL;
2183 list_for_each_entry_rcu(n, &tn->node_list, list) {
2184 tipc_node_read_lock(n);
2185 for (i = 0; i < MAX_BEARERS; i++) {
2186 l = n->links[i].link;
2187 if (l && !strcmp(tipc_link_name(l), link_name)) {
2193 tipc_node_read_unlock(n);
2202 int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
2208 struct tipc_link *link;
2209 struct tipc_node *node;
2210 struct sk_buff_head xmitq;
2211 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2212 struct net *net = sock_net(skb->sk);
2214 __skb_queue_head_init(&xmitq);
2216 if (!info->attrs[TIPC_NLA_LINK])
2219 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2220 info->attrs[TIPC_NLA_LINK],
2221 tipc_nl_link_policy, info->extack);
2225 if (!attrs[TIPC_NLA_LINK_NAME])
2228 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2230 if (strcmp(name, tipc_bclink_name) == 0)
2231 return tipc_nl_bc_link_set(net, attrs);
2233 node = tipc_node_find_by_name(net, name, &bearer_id);
2237 tipc_node_read_lock(node);
2239 link = node->links[bearer_id].link;
2245 if (attrs[TIPC_NLA_LINK_PROP]) {
2246 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
2248 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
2255 if (props[TIPC_NLA_PROP_TOL]) {
2258 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2259 tipc_link_set_tolerance(link, tol, &xmitq);
2261 if (props[TIPC_NLA_PROP_PRIO]) {
2264 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2265 tipc_link_set_prio(link, prio, &xmitq);
2267 if (props[TIPC_NLA_PROP_WIN]) {
2270 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2271 tipc_link_set_queue_limits(link, win);
2276 tipc_node_read_unlock(node);
2277 tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr);
2281 int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
2283 struct net *net = genl_info_net(info);
2284 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2285 struct tipc_nl_msg msg;
2289 msg.portid = info->snd_portid;
2290 msg.seq = info->snd_seq;
2292 if (!info->attrs[TIPC_NLA_LINK])
2295 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2296 info->attrs[TIPC_NLA_LINK],
2297 tipc_nl_link_policy, info->extack);
2301 if (!attrs[TIPC_NLA_LINK_NAME])
2304 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2306 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2310 if (strcmp(name, tipc_bclink_name) == 0) {
2311 err = tipc_nl_add_bc_link(net, &msg);
2316 struct tipc_node *node;
2317 struct tipc_link *link;
2319 node = tipc_node_find_by_name(net, name, &bearer_id);
2325 tipc_node_read_lock(node);
2326 link = node->links[bearer_id].link;
2328 tipc_node_read_unlock(node);
2333 err = __tipc_nl_add_link(net, &msg, link, 0);
2334 tipc_node_read_unlock(node);
2339 return genlmsg_reply(msg.skb, info);
2342 nlmsg_free(msg.skb);
2346 int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
2350 unsigned int bearer_id;
2351 struct tipc_link *link;
2352 struct tipc_node *node;
2353 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2354 struct net *net = sock_net(skb->sk);
2355 struct tipc_link_entry *le;
2357 if (!info->attrs[TIPC_NLA_LINK])
2360 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2361 info->attrs[TIPC_NLA_LINK],
2362 tipc_nl_link_policy, info->extack);
2366 if (!attrs[TIPC_NLA_LINK_NAME])
2369 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2371 if (strcmp(link_name, tipc_bclink_name) == 0) {
2372 err = tipc_bclink_reset_stats(net);
2378 node = tipc_node_find_by_name(net, link_name, &bearer_id);
2382 le = &node->links[bearer_id];
2383 tipc_node_read_lock(node);
2384 spin_lock_bh(&le->lock);
2385 link = node->links[bearer_id].link;
2387 spin_unlock_bh(&le->lock);
2388 tipc_node_read_unlock(node);
2391 tipc_link_reset_stats(link);
2392 spin_unlock_bh(&le->lock);
2393 tipc_node_read_unlock(node);
2397 /* Caller should hold node lock */
2398 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
2399 struct tipc_node *node, u32 *prev_link)
2404 for (i = *prev_link; i < MAX_BEARERS; i++) {
2407 if (!node->links[i].link)
2410 err = __tipc_nl_add_link(net, msg,
2411 node->links[i].link, NLM_F_MULTI);
2420 int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
2422 struct net *net = sock_net(skb->sk);
2423 struct tipc_net *tn = net_generic(net, tipc_net_id);
2424 struct tipc_node *node;
2425 struct tipc_nl_msg msg;
2426 u32 prev_node = cb->args[0];
2427 u32 prev_link = cb->args[1];
2428 int done = cb->args[2];
2435 msg.portid = NETLINK_CB(cb->skb).portid;
2436 msg.seq = cb->nlh->nlmsg_seq;
2440 node = tipc_node_find(net, prev_node);
2442 /* We never set seq or call nl_dump_check_consistent()
2443 * this means that setting prev_seq here will cause the
2444 * consistence check to fail in the netlink callback
2445 * handler. Resulting in the last NLMSG_DONE message
2446 * having the NLM_F_DUMP_INTR flag set.
2451 tipc_node_put(node);
2453 list_for_each_entry_continue_rcu(node, &tn->node_list,
2455 tipc_node_read_lock(node);
2456 err = __tipc_nl_add_node_links(net, &msg, node,
2458 tipc_node_read_unlock(node);
2462 prev_node = node->addr;
2465 err = tipc_nl_add_bc_link(net, &msg);
2469 list_for_each_entry_rcu(node, &tn->node_list, list) {
2470 tipc_node_read_lock(node);
2471 err = __tipc_nl_add_node_links(net, &msg, node,
2473 tipc_node_read_unlock(node);
2477 prev_node = node->addr;
2484 cb->args[0] = prev_node;
2485 cb->args[1] = prev_link;
2491 int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info)
2493 struct nlattr *attrs[TIPC_NLA_MON_MAX + 1];
2494 struct net *net = sock_net(skb->sk);
2497 if (!info->attrs[TIPC_NLA_MON])
2500 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_MON_MAX,
2501 info->attrs[TIPC_NLA_MON],
2502 tipc_nl_monitor_policy,
2507 if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) {
2510 val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]);
2511 err = tipc_nl_monitor_set_threshold(net, val);
2519 static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg)
2521 struct nlattr *attrs;
2525 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2526 0, TIPC_NL_MON_GET);
2530 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON);
2534 val = tipc_nl_monitor_get_threshold(net);
2536 if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val))
2539 nla_nest_end(msg->skb, attrs);
2540 genlmsg_end(msg->skb, hdr);
2545 nla_nest_cancel(msg->skb, attrs);
2547 genlmsg_cancel(msg->skb, hdr);
2552 int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info)
2554 struct net *net = sock_net(skb->sk);
2555 struct tipc_nl_msg msg;
2558 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2561 msg.portid = info->snd_portid;
2562 msg.seq = info->snd_seq;
2564 err = __tipc_nl_add_monitor_prop(net, &msg);
2566 nlmsg_free(msg.skb);
2570 return genlmsg_reply(msg.skb, info);
2573 int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
2575 struct net *net = sock_net(skb->sk);
2576 u32 prev_bearer = cb->args[0];
2577 struct tipc_nl_msg msg;
2581 if (prev_bearer == MAX_BEARERS)
2585 msg.portid = NETLINK_CB(cb->skb).portid;
2586 msg.seq = cb->nlh->nlmsg_seq;
2589 for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
2590 err = __tipc_nl_add_monitor(net, &msg, bearer_id);
2595 cb->args[0] = bearer_id;
2600 int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
2601 struct netlink_callback *cb)
2603 struct net *net = sock_net(skb->sk);
2604 u32 prev_node = cb->args[1];
2605 u32 bearer_id = cb->args[2];
2606 int done = cb->args[0];
2607 struct tipc_nl_msg msg;
2611 struct nlattr **attrs;
2612 struct nlattr *mon[TIPC_NLA_MON_MAX + 1];
2614 err = tipc_nlmsg_parse(cb->nlh, &attrs);
2618 if (!attrs[TIPC_NLA_MON])
2621 err = nla_parse_nested_deprecated(mon, TIPC_NLA_MON_MAX,
2622 attrs[TIPC_NLA_MON],
2623 tipc_nl_monitor_policy,
2628 if (!mon[TIPC_NLA_MON_REF])
2631 bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]);
2633 if (bearer_id >= MAX_BEARERS)
2641 msg.portid = NETLINK_CB(cb->skb).portid;
2642 msg.seq = cb->nlh->nlmsg_seq;
2645 err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node);
2651 cb->args[1] = prev_node;
2652 cb->args[2] = bearer_id;
2657 u32 tipc_node_get_addr(struct tipc_node *node)
2659 return (node) ? node->addr : 0;
2663 * tipc_node_dump - dump TIPC node data
2664 * @n: tipc node to be dumped
2666 * - false: dump only tipc node data
2667 * - true: dump node link data as well
2668 * @buf: returned buffer of dump data in format
2670 int tipc_node_dump(struct tipc_node *n, bool more, char *buf)
2673 size_t sz = (more) ? NODE_LMAX : NODE_LMIN;
2676 i += scnprintf(buf, sz, "node data: (null)\n");
2680 i += scnprintf(buf, sz, "node data: %x", n->addr);
2681 i += scnprintf(buf + i, sz - i, " %x", n->state);
2682 i += scnprintf(buf + i, sz - i, " %d", n->active_links[0]);
2683 i += scnprintf(buf + i, sz - i, " %d", n->active_links[1]);
2684 i += scnprintf(buf + i, sz - i, " %x", n->action_flags);
2685 i += scnprintf(buf + i, sz - i, " %u", n->failover_sent);
2686 i += scnprintf(buf + i, sz - i, " %u", n->sync_point);
2687 i += scnprintf(buf + i, sz - i, " %d", n->link_cnt);
2688 i += scnprintf(buf + i, sz - i, " %u", n->working_links);
2689 i += scnprintf(buf + i, sz - i, " %x", n->capabilities);
2690 i += scnprintf(buf + i, sz - i, " %lu\n", n->keepalive_intv);
2695 i += scnprintf(buf + i, sz - i, "link_entry[0]:\n");
2696 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[0].mtu);
2697 i += scnprintf(buf + i, sz - i, " media: ");
2698 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[0].maddr);
2699 i += scnprintf(buf + i, sz - i, "\n");
2700 i += tipc_link_dump(n->links[0].link, TIPC_DUMP_NONE, buf + i);
2701 i += scnprintf(buf + i, sz - i, " inputq: ");
2702 i += tipc_list_dump(&n->links[0].inputq, false, buf + i);
2704 i += scnprintf(buf + i, sz - i, "link_entry[1]:\n");
2705 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[1].mtu);
2706 i += scnprintf(buf + i, sz - i, " media: ");
2707 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[1].maddr);
2708 i += scnprintf(buf + i, sz - i, "\n");
2709 i += tipc_link_dump(n->links[1].link, TIPC_DUMP_NONE, buf + i);
2710 i += scnprintf(buf + i, sz - i, " inputq: ");
2711 i += tipc_list_dump(&n->links[1].inputq, false, buf + i);
2713 i += scnprintf(buf + i, sz - i, "bclink:\n ");
2714 i += tipc_link_dump(n->bc_entry.link, TIPC_DUMP_NONE, buf + i);
2719 void tipc_node_pre_cleanup_net(struct net *exit_net)
2721 struct tipc_node *n;
2722 struct tipc_net *tn;
2726 for_each_net_rcu(tmp) {
2727 if (tmp == exit_net)
2732 spin_lock_bh(&tn->node_list_lock);
2733 list_for_each_entry_rcu(n, &tn->node_list, list) {
2736 if (n->peer_net != exit_net)
2738 tipc_node_write_lock(n);
2740 n->peer_hash_mix = 0;
2741 tipc_node_write_unlock_fast(n);
2744 spin_unlock_bh(&tn->node_list_lock);