1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 *******************************************************************************
5 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
6 ** Copyright (C) 2004-2021 Red Hat, Inc. All rights reserved.
9 *******************************************************************************
10 ******************************************************************************/
15 * This is the appallingly named "mid-level" comms layer. It takes care about
16 * deliver an on application layer "reliable" communication above the used
17 * lowcomms transport layer.
21 * Each nodes keeps track of all send DLM messages in send_queue with a sequence
22 * number. The receive will send an DLM_ACK message back for every DLM message
23 * received at the other side. If a reconnect happens in lowcomms we will send
24 * all unacknowledged dlm messages again. The receiving side might drop any already
25 * received message by comparing sequence numbers.
27 * How version detection works:
29 * Due the fact that dlm has pre-configured node addresses on every side
30 * it is in it's nature that every side connects at starts to transmit
31 * dlm messages which ends in a race. However DLM_RCOM_NAMES, DLM_RCOM_STATUS
32 * and their replies are the first messages which are exchanges. Due backwards
33 * compatibility these messages are not covered by the midcomms re-transmission
34 * layer. These messages have their own re-transmission handling in the dlm
35 * application layer. The version field of every node will be set on these RCOM
36 * messages as soon as they arrived and the node isn't yet part of the nodes
37 * hash. There exists also logic to detect version mismatched if something weird
38 * going on or the first messages isn't an expected one.
42 * The midcomms layer does a 4 way handshake for termination on DLM protocol
43 * like TCP supports it with half-closed socket support. SCTP doesn't support
44 * half-closed socket, so we do it on DLM layer. Also socket shutdown() can be
45 * interrupted by .e.g. tcp reset itself. Additional there exists the othercon
46 * paradigm in lowcomms which cannot be easily without breaking backwards
47 * compatibility. A node cannot send anything to another node when a DLM_FIN
48 * message was send. There exists additional logic to print a warning if
49 * DLM wants to do it. There exists a state handling like RFC 793 but reduced
50 * to termination only. The event "member removal event" describes the cluster
51 * manager removed the node from internal lists, at this point DLM does not
52 * send any message to the other node. There exists two cases:
54 * 1. The cluster member was removed and we received a FIN
56 * 2. We received a FIN but the member was not removed yet
58 * One of these cases will do the CLOSE_WAIT to LAST_ACK change.
64 * | add member/receive RCOM version
72 * +---------+ snd FIN / \ snd ACK +---------+
73 * | FIN |<----------------- ------------------>| CLOSE |
74 * | WAIT-1 |------------------ | WAIT |
75 * +---------+ rcv FIN \ +---------+
76 * | rcv ACK of FIN ------- | CLOSE | member
77 * | -------------- snd ACK | ------- | removal
78 * V x V snd FIN V event
79 * +---------+ +---------+ +---------+
80 * |FINWAIT-2| | CLOSING | | LAST-ACK|
81 * +---------+ +---------+ +---------+
82 * | rcv ACK of FIN | rcv ACK of FIN |
83 * | rcv FIN -------------- | -------------- |
85 * \ snd ACK +---------+ +---------+
86 * ------------------------>| CLOSED | | CLOSED |
87 * +---------+ +---------+
89 * NOTE: any state can interrupted by midcomms_close() and state will be
90 * switched to CLOSED in case of fencing. There exists also some timeout
91 * handling when we receive the version detection RCOM messages which is
92 * made by observation.
94 * Future improvements:
96 * There exists some known issues/improvements of the dlm handling. Some
97 * of them should be done in a next major dlm version bump which makes
98 * it incompatible with previous versions.
100 * Unaligned memory access:
102 * There exists cases when the dlm message buffer length is not aligned
103 * to 8 byte. However seems nobody detected any problem with it. This
104 * can be fixed in the next major version bump of dlm.
108 * The version detection and how it's done is related to backwards
109 * compatibility. There exists better ways to make a better handling.
110 * However this should be changed in the next major version bump of dlm.
112 * Tail Size checking:
114 * There exists a message tail payload in e.g. DLM_MSG however we don't
115 * check it against the message length yet regarding to the receive buffer
116 * length. That need to be validated.
120 * At timeout places or weird sequence number behaviours we should send
121 * a fencing request to the cluster manager.
124 /* Debug switch to enable a 5 seconds sleep waiting of a termination.
125 * This can be useful to test fencing while termination is running.
126 * This requires a setup with only gfs2 as dlm user, so that the
127 * last umount will terminate the connection.
129 * However it became useful to test, while the 5 seconds block in umount
130 * just press the reset button. In a lot of dropping the termination
131 * process can could take several seconds.
133 #define DLM_DEBUG_FENCE_TERMINATION 0
135 #include <trace/events/dlm.h>
138 #include "dlm_internal.h"
139 #include "lowcomms.h"
144 #include "midcomms.h"
146 /* init value for sequence numbers for testing purpose only e.g. overflows */
147 #define DLM_SEQ_INIT 0
148 /* 5 seconds wait to sync ending of dlm */
149 #define DLM_SHUTDOWN_TIMEOUT msecs_to_jiffies(5000)
150 #define DLM_VERSION_NOT_SET 0
151 #define DLM_SEND_ACK_BACK_MSG_THRESHOLD 32
152 #define DLM_RECV_ACK_BACK_MSG_THRESHOLD (DLM_SEND_ACK_BACK_MSG_THRESHOLD * 8)
154 struct midcomms_node {
159 /* These queues are unbound because we cannot drop any message in dlm.
160 * We could send a fence signal for a specific node to the cluster
161 * manager if queues hits some maximum value, however this handling
164 struct list_head send_queue;
165 spinlock_t send_queue_lock;
166 atomic_t send_queue_cnt;
167 #define DLM_NODE_FLAG_CLOSE 1
168 #define DLM_NODE_FLAG_STOP_TX 2
169 #define DLM_NODE_FLAG_STOP_RX 3
170 atomic_t ulp_delivered;
172 wait_queue_head_t shutdown_wait;
174 /* dlm tcp termination state */
176 #define DLM_ESTABLISHED 2
177 #define DLM_FIN_WAIT1 3
178 #define DLM_FIN_WAIT2 4
179 #define DLM_CLOSE_WAIT 5
180 #define DLM_LAST_ACK 6
181 #define DLM_CLOSING 7
183 spinlock_t state_lock;
185 /* counts how many lockspaces are using this node
186 * this refcount is necessary to determine if the
187 * node wants to disconnect.
191 /* not protected by srcu, node_hash lifetime */
194 struct hlist_node hlist;
199 const union dlm_packet *inner_p;
200 struct midcomms_node *node;
201 struct dlm_opts *opts;
206 void (*ack_rcv)(struct midcomms_node *node);
208 /* get_mhandle/commit srcu idx exchange */
211 struct list_head list;
215 static struct hlist_head node_hash[CONN_HASH_SIZE];
216 static DEFINE_SPINLOCK(nodes_lock);
217 DEFINE_STATIC_SRCU(nodes_srcu);
219 /* This mutex prevents that midcomms_close() is running while
220 * stop() or remove(). As I experienced invalid memory access
221 * behaviours when DLM_DEBUG_FENCE_TERMINATION is enabled and
222 * resetting machines. I will end in some double deletion in nodes
225 static DEFINE_MUTEX(close_lock);
227 struct kmem_cache *dlm_midcomms_cache_create(void)
229 return kmem_cache_create("dlm_mhandle", sizeof(struct dlm_mhandle),
233 static inline const char *dlm_state_str(int state)
238 case DLM_ESTABLISHED:
239 return "ESTABLISHED";
255 const char *dlm_midcomms_state(struct midcomms_node *node)
257 return dlm_state_str(node->state);
260 unsigned long dlm_midcomms_flags(struct midcomms_node *node)
265 int dlm_midcomms_send_queue_cnt(struct midcomms_node *node)
267 return atomic_read(&node->send_queue_cnt);
270 uint32_t dlm_midcomms_version(struct midcomms_node *node)
272 return node->version;
275 static struct midcomms_node *__find_node(int nodeid, int r)
277 struct midcomms_node *node;
279 hlist_for_each_entry_rcu(node, &node_hash[r], hlist) {
280 if (node->nodeid == nodeid)
287 static void dlm_mhandle_release(struct rcu_head *rcu)
289 struct dlm_mhandle *mh = container_of(rcu, struct dlm_mhandle, rcu);
291 dlm_lowcomms_put_msg(mh->msg);
292 dlm_free_mhandle(mh);
295 static void dlm_mhandle_delete(struct midcomms_node *node,
296 struct dlm_mhandle *mh)
298 list_del_rcu(&mh->list);
299 atomic_dec(&node->send_queue_cnt);
300 call_rcu(&mh->rcu, dlm_mhandle_release);
303 static void dlm_send_queue_flush(struct midcomms_node *node)
305 struct dlm_mhandle *mh;
307 pr_debug("flush midcomms send queue of node %d\n", node->nodeid);
310 spin_lock_bh(&node->send_queue_lock);
311 list_for_each_entry_rcu(mh, &node->send_queue, list) {
312 dlm_mhandle_delete(node, mh);
314 spin_unlock_bh(&node->send_queue_lock);
318 static void midcomms_node_reset(struct midcomms_node *node)
320 pr_debug("reset node %d\n", node->nodeid);
322 atomic_set(&node->seq_next, DLM_SEQ_INIT);
323 atomic_set(&node->seq_send, DLM_SEQ_INIT);
324 atomic_set(&node->ulp_delivered, 0);
325 node->version = DLM_VERSION_NOT_SET;
328 dlm_send_queue_flush(node);
329 node->state = DLM_CLOSED;
330 wake_up(&node->shutdown_wait);
333 static struct midcomms_node *nodeid2node(int nodeid)
335 return __find_node(nodeid, nodeid_hash(nodeid));
338 int dlm_midcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
340 int ret, idx, r = nodeid_hash(nodeid);
341 struct midcomms_node *node;
343 ret = dlm_lowcomms_addr(nodeid, addr, len);
347 idx = srcu_read_lock(&nodes_srcu);
348 node = __find_node(nodeid, r);
350 srcu_read_unlock(&nodes_srcu, idx);
353 srcu_read_unlock(&nodes_srcu, idx);
355 node = kmalloc(sizeof(*node), GFP_NOFS);
359 node->nodeid = nodeid;
360 spin_lock_init(&node->state_lock);
361 spin_lock_init(&node->send_queue_lock);
362 atomic_set(&node->send_queue_cnt, 0);
363 INIT_LIST_HEAD(&node->send_queue);
364 init_waitqueue_head(&node->shutdown_wait);
366 midcomms_node_reset(node);
368 spin_lock(&nodes_lock);
369 hlist_add_head_rcu(&node->hlist, &node_hash[r]);
370 spin_unlock(&nodes_lock);
372 node->debugfs = dlm_create_debug_comms_file(nodeid, node);
376 static int dlm_send_ack(int nodeid, uint32_t seq)
378 int mb_len = sizeof(struct dlm_header);
379 struct dlm_header *m_header;
383 msg = dlm_lowcomms_new_msg(nodeid, mb_len, GFP_ATOMIC, &ppc,
388 m_header = (struct dlm_header *)ppc;
390 m_header->h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
391 m_header->h_nodeid = cpu_to_le32(dlm_our_nodeid());
392 m_header->h_length = cpu_to_le16(mb_len);
393 m_header->h_cmd = DLM_ACK;
394 m_header->u.h_seq = cpu_to_le32(seq);
396 dlm_lowcomms_commit_msg(msg);
397 dlm_lowcomms_put_msg(msg);
402 static void dlm_send_ack_threshold(struct midcomms_node *node,
408 /* let only send one user trigger threshold to send ack back */
410 oval = atomic_read(&node->ulp_delivered);
411 send_ack = (oval > threshold);
412 /* abort if threshold is not reached */
417 /* try to reset ulp_delivered counter */
418 } while (atomic_cmpxchg(&node->ulp_delivered, oval, nval) != oval);
421 dlm_send_ack(node->nodeid, atomic_read(&node->seq_next));
424 static int dlm_send_fin(struct midcomms_node *node,
425 void (*ack_rcv)(struct midcomms_node *node))
427 int mb_len = sizeof(struct dlm_header);
428 struct dlm_header *m_header;
429 struct dlm_mhandle *mh;
432 mh = dlm_midcomms_get_mhandle(node->nodeid, mb_len, GFP_ATOMIC, &ppc);
436 set_bit(DLM_NODE_FLAG_STOP_TX, &node->flags);
437 mh->ack_rcv = ack_rcv;
439 m_header = (struct dlm_header *)ppc;
441 m_header->h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
442 m_header->h_nodeid = cpu_to_le32(dlm_our_nodeid());
443 m_header->h_length = cpu_to_le16(mb_len);
444 m_header->h_cmd = DLM_FIN;
446 pr_debug("sending fin msg to node %d\n", node->nodeid);
447 dlm_midcomms_commit_mhandle(mh, NULL, 0);
452 static void dlm_receive_ack(struct midcomms_node *node, uint32_t seq)
454 struct dlm_mhandle *mh;
457 list_for_each_entry_rcu(mh, &node->send_queue, list) {
458 if (before(mh->seq, seq)) {
462 /* send queue should be ordered */
467 spin_lock_bh(&node->send_queue_lock);
468 list_for_each_entry_rcu(mh, &node->send_queue, list) {
469 if (before(mh->seq, seq)) {
470 dlm_mhandle_delete(node, mh);
472 /* send queue should be ordered */
476 spin_unlock_bh(&node->send_queue_lock);
480 static void dlm_pas_fin_ack_rcv(struct midcomms_node *node)
482 spin_lock(&node->state_lock);
483 pr_debug("receive passive fin ack from node %d with state %s\n",
484 node->nodeid, dlm_state_str(node->state));
486 switch (node->state) {
489 midcomms_node_reset(node);
492 /* not valid but somehow we got what we want */
493 wake_up(&node->shutdown_wait);
496 spin_unlock(&node->state_lock);
497 log_print("%s: unexpected state: %d",
498 __func__, node->state);
502 spin_unlock(&node->state_lock);
505 static void dlm_receive_buffer_3_2_trace(uint32_t seq,
506 const union dlm_packet *p)
508 switch (p->header.h_cmd) {
510 trace_dlm_recv_message(dlm_our_nodeid(), seq, &p->message);
513 trace_dlm_recv_rcom(dlm_our_nodeid(), seq, &p->rcom);
520 static void dlm_midcomms_receive_buffer(const union dlm_packet *p,
521 struct midcomms_node *node,
524 bool is_expected_seq;
528 oval = atomic_read(&node->seq_next);
529 is_expected_seq = (oval == seq);
530 if (!is_expected_seq)
534 } while (atomic_cmpxchg(&node->seq_next, oval, nval) != oval);
536 if (is_expected_seq) {
537 switch (p->header.h_cmd) {
539 spin_lock(&node->state_lock);
540 pr_debug("receive fin msg from node %d with state %s\n",
541 node->nodeid, dlm_state_str(node->state));
543 switch (node->state) {
544 case DLM_ESTABLISHED:
545 dlm_send_ack(node->nodeid, nval);
547 /* passive shutdown DLM_LAST_ACK case 1
548 * additional we check if the node is used by
549 * cluster manager events at all.
551 if (node->users == 0) {
552 node->state = DLM_LAST_ACK;
553 pr_debug("switch node %d to state %s case 1\n",
554 node->nodeid, dlm_state_str(node->state));
555 set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
556 dlm_send_fin(node, dlm_pas_fin_ack_rcv);
558 node->state = DLM_CLOSE_WAIT;
559 pr_debug("switch node %d to state %s\n",
560 node->nodeid, dlm_state_str(node->state));
564 dlm_send_ack(node->nodeid, nval);
565 node->state = DLM_CLOSING;
566 set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
567 pr_debug("switch node %d to state %s\n",
568 node->nodeid, dlm_state_str(node->state));
571 dlm_send_ack(node->nodeid, nval);
572 midcomms_node_reset(node);
573 pr_debug("switch node %d to state %s\n",
574 node->nodeid, dlm_state_str(node->state));
577 /* probably remove_member caught it, do nothing */
580 spin_unlock(&node->state_lock);
581 log_print("%s: unexpected state: %d",
582 __func__, node->state);
586 spin_unlock(&node->state_lock);
589 WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
590 dlm_receive_buffer_3_2_trace(seq, p);
591 dlm_receive_buffer(p, node->nodeid);
592 atomic_inc(&node->ulp_delivered);
593 /* unlikely case to send ack back when we don't transmit */
594 dlm_send_ack_threshold(node, DLM_RECV_ACK_BACK_MSG_THRESHOLD);
598 /* retry to ack message which we already have by sending back
599 * current node->seq_next number as ack.
602 dlm_send_ack(node->nodeid, oval);
604 log_print_ratelimited("ignore dlm msg because seq mismatch, seq: %u, expected: %u, nodeid: %d",
605 seq, oval, node->nodeid);
609 static int dlm_opts_check_msglen(const union dlm_packet *p, uint16_t msglen,
614 /* we only trust outer header msglen because
615 * it's checked against receive buffer length.
617 if (len < sizeof(struct dlm_opts))
619 len -= sizeof(struct dlm_opts);
621 if (len < le16_to_cpu(p->opts.o_optlen))
623 len -= le16_to_cpu(p->opts.o_optlen);
625 switch (p->opts.o_nextcmd) {
627 if (len < sizeof(struct dlm_header)) {
628 log_print("fin too small: %d, will skip this message from node %d",
635 if (len < sizeof(struct dlm_message)) {
636 log_print("msg too small: %d, will skip this message from node %d",
643 if (len < sizeof(struct dlm_rcom)) {
644 log_print("rcom msg too small: %d, will skip this message from node %d",
651 log_print("unsupported o_nextcmd received: %u, will skip this message from node %d",
652 p->opts.o_nextcmd, nodeid);
659 static void dlm_midcomms_receive_buffer_3_2(const union dlm_packet *p, int nodeid)
661 uint16_t msglen = le16_to_cpu(p->header.h_length);
662 struct midcomms_node *node;
666 idx = srcu_read_lock(&nodes_srcu);
667 node = nodeid2node(nodeid);
668 if (WARN_ON_ONCE(!node))
671 switch (node->version) {
672 case DLM_VERSION_NOT_SET:
673 node->version = DLM_VERSION_3_2;
674 wake_up(&node->shutdown_wait);
675 log_print("version 0x%08x for node %d detected", DLM_VERSION_3_2,
678 spin_lock(&node->state_lock);
679 switch (node->state) {
681 node->state = DLM_ESTABLISHED;
682 pr_debug("switch node %d to state %s\n",
683 node->nodeid, dlm_state_str(node->state));
688 spin_unlock(&node->state_lock);
691 case DLM_VERSION_3_2:
694 log_print_ratelimited("version mismatch detected, assumed 0x%08x but node %d has 0x%08x",
695 DLM_VERSION_3_2, node->nodeid, node->version);
699 switch (p->header.h_cmd) {
701 /* these rcom message we use to determine version.
702 * they have their own retransmission handling and
703 * are the first messages of dlm.
705 * length already checked.
707 switch (p->rcom.rc_type) {
708 case cpu_to_le32(DLM_RCOM_NAMES):
710 case cpu_to_le32(DLM_RCOM_NAMES_REPLY):
712 case cpu_to_le32(DLM_RCOM_STATUS):
714 case cpu_to_le32(DLM_RCOM_STATUS_REPLY):
717 log_print("unsupported rcom type received: %u, will skip this message from node %d",
718 le32_to_cpu(p->rcom.rc_type), nodeid);
722 WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
723 dlm_receive_buffer(p, nodeid);
726 seq = le32_to_cpu(p->header.u.h_seq);
728 ret = dlm_opts_check_msglen(p, msglen, nodeid);
730 log_print("opts msg too small: %u, will skip this message from node %d",
735 p = (union dlm_packet *)((unsigned char *)p->opts.o_opts +
736 le16_to_cpu(p->opts.o_optlen));
738 /* recheck inner msglen just if it's not garbage */
739 msglen = le16_to_cpu(p->header.h_length);
740 switch (p->header.h_cmd) {
742 if (msglen < sizeof(struct dlm_rcom)) {
743 log_print("inner rcom msg too small: %u, will skip this message from node %d",
750 if (msglen < sizeof(struct dlm_message)) {
751 log_print("inner msg too small: %u, will skip this message from node %d",
758 if (msglen < sizeof(struct dlm_header)) {
759 log_print("inner fin too small: %u, will skip this message from node %d",
766 log_print("unsupported inner h_cmd received: %u, will skip this message from node %d",
771 dlm_midcomms_receive_buffer(p, node, seq);
774 seq = le32_to_cpu(p->header.u.h_seq);
775 dlm_receive_ack(node, seq);
778 log_print("unsupported h_cmd received: %u, will skip this message from node %d",
779 p->header.h_cmd, nodeid);
784 srcu_read_unlock(&nodes_srcu, idx);
787 static void dlm_midcomms_receive_buffer_3_1(const union dlm_packet *p, int nodeid)
789 uint16_t msglen = le16_to_cpu(p->header.h_length);
790 struct midcomms_node *node;
793 idx = srcu_read_lock(&nodes_srcu);
794 node = nodeid2node(nodeid);
795 if (WARN_ON_ONCE(!node)) {
796 srcu_read_unlock(&nodes_srcu, idx);
800 switch (node->version) {
801 case DLM_VERSION_NOT_SET:
802 node->version = DLM_VERSION_3_1;
803 wake_up(&node->shutdown_wait);
804 log_print("version 0x%08x for node %d detected", DLM_VERSION_3_1,
807 case DLM_VERSION_3_1:
810 log_print_ratelimited("version mismatch detected, assumed 0x%08x but node %d has 0x%08x",
811 DLM_VERSION_3_1, node->nodeid, node->version);
812 srcu_read_unlock(&nodes_srcu, idx);
815 srcu_read_unlock(&nodes_srcu, idx);
817 switch (p->header.h_cmd) {
819 /* length already checked */
822 if (msglen < sizeof(struct dlm_message)) {
823 log_print("msg too small: %u, will skip this message from node %d",
830 log_print("unsupported h_cmd received: %u, will skip this message from node %d",
831 p->header.h_cmd, nodeid);
835 dlm_receive_buffer(p, nodeid);
838 int dlm_validate_incoming_buffer(int nodeid, unsigned char *buf, int len)
840 const unsigned char *ptr = buf;
841 const struct dlm_header *hd;
845 while (len >= sizeof(struct dlm_header)) {
846 hd = (struct dlm_header *)ptr;
848 /* no message should be more than DLM_MAX_SOCKET_BUFSIZE or
849 * less than dlm_header size.
851 * Some messages does not have a 8 byte length boundary yet
852 * which can occur in a unaligned memory access of some dlm
853 * messages. However this problem need to be fixed at the
854 * sending side, for now it seems nobody run into architecture
855 * related issues yet but it slows down some processing.
856 * Fixing this issue should be scheduled in future by doing
857 * the next major version bump.
859 msglen = le16_to_cpu(hd->h_length);
860 if (msglen > DLM_MAX_SOCKET_BUFSIZE ||
861 msglen < sizeof(struct dlm_header)) {
862 log_print("received invalid length header: %u from node %d, will abort message parsing",
867 /* caller will take care that leftover
868 * will be parsed next call with more data
882 * Called from the low-level comms layer to process a buffer of
885 int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int len)
887 const unsigned char *ptr = buf;
888 const struct dlm_header *hd;
892 while (len >= sizeof(struct dlm_header)) {
893 hd = (struct dlm_header *)ptr;
895 msglen = le16_to_cpu(hd->h_length);
899 switch (hd->h_version) {
900 case cpu_to_le32(DLM_VERSION_3_1):
901 dlm_midcomms_receive_buffer_3_1((const union dlm_packet *)ptr, nodeid);
903 case cpu_to_le32(DLM_VERSION_3_2):
904 dlm_midcomms_receive_buffer_3_2((const union dlm_packet *)ptr, nodeid);
907 log_print("received invalid version header: %u from node %d, will skip this message",
908 le32_to_cpu(hd->h_version), nodeid);
920 void dlm_midcomms_unack_msg_resend(int nodeid)
922 struct midcomms_node *node;
923 struct dlm_mhandle *mh;
926 idx = srcu_read_lock(&nodes_srcu);
927 node = nodeid2node(nodeid);
928 if (WARN_ON_ONCE(!node)) {
929 srcu_read_unlock(&nodes_srcu, idx);
933 /* old protocol, we don't support to retransmit on failure */
934 switch (node->version) {
935 case DLM_VERSION_3_2:
938 srcu_read_unlock(&nodes_srcu, idx);
943 list_for_each_entry_rcu(mh, &node->send_queue, list) {
947 ret = dlm_lowcomms_resend_msg(mh->msg);
949 log_print_ratelimited("retransmit dlm msg, seq %u, nodeid %d",
950 mh->seq, node->nodeid);
953 srcu_read_unlock(&nodes_srcu, idx);
956 static void dlm_fill_opts_header(struct dlm_opts *opts, uint16_t inner_len,
959 opts->o_header.h_cmd = DLM_OPTS;
960 opts->o_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
961 opts->o_header.h_nodeid = cpu_to_le32(dlm_our_nodeid());
962 opts->o_header.h_length = cpu_to_le16(DLM_MIDCOMMS_OPT_LEN + inner_len);
963 opts->o_header.u.h_seq = cpu_to_le32(seq);
966 static void midcomms_new_msg_cb(void *data)
968 struct dlm_mhandle *mh = data;
970 atomic_inc(&mh->node->send_queue_cnt);
972 spin_lock_bh(&mh->node->send_queue_lock);
973 list_add_tail_rcu(&mh->list, &mh->node->send_queue);
974 spin_unlock_bh(&mh->node->send_queue_lock);
976 mh->seq = atomic_fetch_inc(&mh->node->seq_send);
979 static struct dlm_msg *dlm_midcomms_get_msg_3_2(struct dlm_mhandle *mh, int nodeid,
980 int len, gfp_t allocation, char **ppc)
982 struct dlm_opts *opts;
985 msg = dlm_lowcomms_new_msg(nodeid, len + DLM_MIDCOMMS_OPT_LEN,
986 allocation, ppc, midcomms_new_msg_cb, mh);
990 opts = (struct dlm_opts *)*ppc;
993 /* add possible options here */
994 dlm_fill_opts_header(opts, len, mh->seq);
996 *ppc += sizeof(*opts);
997 mh->inner_p = (const union dlm_packet *)*ppc;
1001 /* avoid false positive for nodes_srcu, unlock happens in
1002 * dlm_midcomms_commit_mhandle which is a must call if success
1005 struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len,
1006 gfp_t allocation, char **ppc)
1008 struct midcomms_node *node;
1009 struct dlm_mhandle *mh;
1010 struct dlm_msg *msg;
1013 idx = srcu_read_lock(&nodes_srcu);
1014 node = nodeid2node(nodeid);
1015 if (WARN_ON_ONCE(!node))
1018 /* this is a bug, however we going on and hope it will be resolved */
1019 WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_TX, &node->flags));
1021 mh = dlm_allocate_mhandle(allocation);
1025 mh->committed = false;
1030 switch (node->version) {
1031 case DLM_VERSION_3_1:
1032 msg = dlm_lowcomms_new_msg(nodeid, len, allocation, ppc,
1035 dlm_free_mhandle(mh);
1040 case DLM_VERSION_3_2:
1041 /* send ack back if necessary */
1042 dlm_send_ack_threshold(node, DLM_SEND_ACK_BACK_MSG_THRESHOLD);
1044 msg = dlm_midcomms_get_msg_3_2(mh, nodeid, len, allocation,
1047 dlm_free_mhandle(mh);
1052 dlm_free_mhandle(mh);
1059 /* keep in mind that is a must to call
1060 * dlm_midcomms_commit_msg() which releases
1061 * nodes_srcu using mh->idx which is assumed
1062 * here that the application will call it.
1067 srcu_read_unlock(&nodes_srcu, idx);
1072 static void dlm_midcomms_commit_msg_3_2_trace(const struct dlm_mhandle *mh,
1073 const void *name, int namelen)
1075 switch (mh->inner_p->header.h_cmd) {
1077 trace_dlm_send_message(mh->node->nodeid, mh->seq,
1078 &mh->inner_p->message,
1082 trace_dlm_send_rcom(mh->node->nodeid, mh->seq,
1083 &mh->inner_p->rcom);
1086 /* nothing to trace */
1091 static void dlm_midcomms_commit_msg_3_2(struct dlm_mhandle *mh,
1092 const void *name, int namelen)
1094 /* nexthdr chain for fast lookup */
1095 mh->opts->o_nextcmd = mh->inner_p->header.h_cmd;
1096 mh->committed = true;
1097 dlm_midcomms_commit_msg_3_2_trace(mh, name, namelen);
1098 dlm_lowcomms_commit_msg(mh->msg);
1101 /* avoid false positive for nodes_srcu, lock was happen in
1102 * dlm_midcomms_get_mhandle
1105 void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh,
1106 const void *name, int namelen)
1109 switch (mh->node->version) {
1110 case DLM_VERSION_3_1:
1111 srcu_read_unlock(&nodes_srcu, mh->idx);
1113 dlm_lowcomms_commit_msg(mh->msg);
1114 dlm_lowcomms_put_msg(mh->msg);
1115 /* mh is not part of rcu list in this case */
1116 dlm_free_mhandle(mh);
1118 case DLM_VERSION_3_2:
1119 /* held rcu read lock here, because we sending the
1120 * dlm message out, when we do that we could receive
1121 * an ack back which releases the mhandle and we
1122 * get a use after free.
1125 dlm_midcomms_commit_msg_3_2(mh, name, namelen);
1126 srcu_read_unlock(&nodes_srcu, mh->idx);
1130 srcu_read_unlock(&nodes_srcu, mh->idx);
1137 int dlm_midcomms_start(void)
1139 return dlm_lowcomms_start();
1142 void dlm_midcomms_stop(void)
1144 dlm_lowcomms_stop();
1147 void dlm_midcomms_init(void)
1151 for (i = 0; i < CONN_HASH_SIZE; i++)
1152 INIT_HLIST_HEAD(&node_hash[i]);
1154 dlm_lowcomms_init();
1157 static void midcomms_node_release(struct rcu_head *rcu)
1159 struct midcomms_node *node = container_of(rcu, struct midcomms_node, rcu);
1161 WARN_ON_ONCE(atomic_read(&node->send_queue_cnt));
1162 dlm_send_queue_flush(node);
1166 void dlm_midcomms_exit(void)
1168 struct midcomms_node *node;
1171 idx = srcu_read_lock(&nodes_srcu);
1172 for (i = 0; i < CONN_HASH_SIZE; i++) {
1173 hlist_for_each_entry_rcu(node, &node_hash[i], hlist) {
1174 dlm_delete_debug_comms_file(node->debugfs);
1176 spin_lock(&nodes_lock);
1177 hlist_del_rcu(&node->hlist);
1178 spin_unlock(&nodes_lock);
1180 call_srcu(&nodes_srcu, &node->rcu, midcomms_node_release);
1183 srcu_read_unlock(&nodes_srcu, idx);
1185 dlm_lowcomms_exit();
1188 static void dlm_act_fin_ack_rcv(struct midcomms_node *node)
1190 spin_lock(&node->state_lock);
1191 pr_debug("receive active fin ack from node %d with state %s\n",
1192 node->nodeid, dlm_state_str(node->state));
1194 switch (node->state) {
1196 node->state = DLM_FIN_WAIT2;
1197 pr_debug("switch node %d to state %s\n",
1198 node->nodeid, dlm_state_str(node->state));
1201 midcomms_node_reset(node);
1202 pr_debug("switch node %d to state %s\n",
1203 node->nodeid, dlm_state_str(node->state));
1206 /* not valid but somehow we got what we want */
1207 wake_up(&node->shutdown_wait);
1210 spin_unlock(&node->state_lock);
1211 log_print("%s: unexpected state: %d",
1212 __func__, node->state);
1216 spin_unlock(&node->state_lock);
1219 void dlm_midcomms_add_member(int nodeid)
1221 struct midcomms_node *node;
1224 idx = srcu_read_lock(&nodes_srcu);
1225 node = nodeid2node(nodeid);
1226 if (WARN_ON_ONCE(!node)) {
1227 srcu_read_unlock(&nodes_srcu, idx);
1231 spin_lock(&node->state_lock);
1233 pr_debug("receive add member from node %d with state %s\n",
1234 node->nodeid, dlm_state_str(node->state));
1235 switch (node->state) {
1236 case DLM_ESTABLISHED:
1239 node->state = DLM_ESTABLISHED;
1240 pr_debug("switch node %d to state %s\n",
1241 node->nodeid, dlm_state_str(node->state));
1244 /* some invalid state passive shutdown
1245 * was failed, we try to reset and
1246 * hope it will go on.
1248 log_print("reset node %d because shutdown stuck",
1251 midcomms_node_reset(node);
1252 node->state = DLM_ESTABLISHED;
1258 pr_debug("node %d users inc count %d\n", nodeid, node->users);
1259 spin_unlock(&node->state_lock);
1261 srcu_read_unlock(&nodes_srcu, idx);
1264 void dlm_midcomms_remove_member(int nodeid)
1266 struct midcomms_node *node;
1269 idx = srcu_read_lock(&nodes_srcu);
1270 node = nodeid2node(nodeid);
1271 /* in case of dlm_midcomms_close() removes node */
1273 srcu_read_unlock(&nodes_srcu, idx);
1277 spin_lock(&node->state_lock);
1278 /* case of dlm_midcomms_addr() created node but
1279 * was not added before because dlm_midcomms_close()
1283 spin_unlock(&node->state_lock);
1284 srcu_read_unlock(&nodes_srcu, idx);
1289 pr_debug("node %d users dec count %d\n", nodeid, node->users);
1291 /* hitting users count to zero means the
1292 * other side is running dlm_midcomms_stop()
1293 * we meet us to have a clean disconnect.
1295 if (node->users == 0) {
1296 pr_debug("receive remove member from node %d with state %s\n",
1297 node->nodeid, dlm_state_str(node->state));
1298 switch (node->state) {
1299 case DLM_ESTABLISHED:
1301 case DLM_CLOSE_WAIT:
1302 /* passive shutdown DLM_LAST_ACK case 2 */
1303 node->state = DLM_LAST_ACK;
1304 pr_debug("switch node %d to state %s case 2\n",
1305 node->nodeid, dlm_state_str(node->state));
1306 set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
1307 dlm_send_fin(node, dlm_pas_fin_ack_rcv);
1310 /* probably receive fin caught it, do nothing */
1313 /* already gone, do nothing */
1316 log_print("%s: unexpected state: %d",
1317 __func__, node->state);
1321 spin_unlock(&node->state_lock);
1323 srcu_read_unlock(&nodes_srcu, idx);
1326 void dlm_midcomms_version_wait(void)
1328 struct midcomms_node *node;
1331 idx = srcu_read_lock(&nodes_srcu);
1332 for (i = 0; i < CONN_HASH_SIZE; i++) {
1333 hlist_for_each_entry_rcu(node, &node_hash[i], hlist) {
1334 ret = wait_event_timeout(node->shutdown_wait,
1335 node->version != DLM_VERSION_NOT_SET ||
1336 node->state == DLM_CLOSED ||
1337 test_bit(DLM_NODE_FLAG_CLOSE, &node->flags),
1338 DLM_SHUTDOWN_TIMEOUT);
1339 if (!ret || test_bit(DLM_NODE_FLAG_CLOSE, &node->flags))
1340 pr_debug("version wait timed out for node %d with state %s\n",
1341 node->nodeid, dlm_state_str(node->state));
1344 srcu_read_unlock(&nodes_srcu, idx);
1347 static void midcomms_shutdown(struct midcomms_node *node)
1351 /* old protocol, we don't wait for pending operations */
1352 switch (node->version) {
1353 case DLM_VERSION_3_2:
1359 spin_lock(&node->state_lock);
1360 pr_debug("receive active shutdown for node %d with state %s\n",
1361 node->nodeid, dlm_state_str(node->state));
1362 switch (node->state) {
1363 case DLM_ESTABLISHED:
1364 node->state = DLM_FIN_WAIT1;
1365 pr_debug("switch node %d to state %s case 2\n",
1366 node->nodeid, dlm_state_str(node->state));
1367 dlm_send_fin(node, dlm_act_fin_ack_rcv);
1370 /* we have what we want */
1373 /* busy to enter DLM_FIN_WAIT1, wait until passive
1374 * done in shutdown_wait to enter DLM_CLOSED.
1378 spin_unlock(&node->state_lock);
1380 if (DLM_DEBUG_FENCE_TERMINATION)
1383 /* wait for other side dlm + fin */
1384 ret = wait_event_timeout(node->shutdown_wait,
1385 node->state == DLM_CLOSED ||
1386 test_bit(DLM_NODE_FLAG_CLOSE, &node->flags),
1387 DLM_SHUTDOWN_TIMEOUT);
1389 pr_debug("active shutdown timed out for node %d with state %s\n",
1390 node->nodeid, dlm_state_str(node->state));
1392 pr_debug("active shutdown done for node %d with state %s\n",
1393 node->nodeid, dlm_state_str(node->state));
1396 void dlm_midcomms_shutdown(void)
1398 struct midcomms_node *node;
1401 mutex_lock(&close_lock);
1402 idx = srcu_read_lock(&nodes_srcu);
1403 for (i = 0; i < CONN_HASH_SIZE; i++) {
1404 hlist_for_each_entry_rcu(node, &node_hash[i], hlist) {
1405 midcomms_shutdown(node);
1409 dlm_lowcomms_shutdown();
1411 for (i = 0; i < CONN_HASH_SIZE; i++) {
1412 hlist_for_each_entry_rcu(node, &node_hash[i], hlist) {
1413 midcomms_node_reset(node);
1416 srcu_read_unlock(&nodes_srcu, idx);
1417 mutex_unlock(&close_lock);
1420 int dlm_midcomms_close(int nodeid)
1422 struct midcomms_node *node;
1425 idx = srcu_read_lock(&nodes_srcu);
1426 /* Abort pending close/remove operation */
1427 node = nodeid2node(nodeid);
1429 /* let shutdown waiters leave */
1430 set_bit(DLM_NODE_FLAG_CLOSE, &node->flags);
1431 wake_up(&node->shutdown_wait);
1433 srcu_read_unlock(&nodes_srcu, idx);
1435 synchronize_srcu(&nodes_srcu);
1437 mutex_lock(&close_lock);
1438 idx = srcu_read_lock(&nodes_srcu);
1439 node = nodeid2node(nodeid);
1441 srcu_read_unlock(&nodes_srcu, idx);
1442 mutex_unlock(&close_lock);
1443 return dlm_lowcomms_close(nodeid);
1446 ret = dlm_lowcomms_close(nodeid);
1447 dlm_delete_debug_comms_file(node->debugfs);
1449 spin_lock(&nodes_lock);
1450 hlist_del_rcu(&node->hlist);
1451 spin_unlock(&nodes_lock);
1452 srcu_read_unlock(&nodes_srcu, idx);
1454 /* wait that all readers left until flush send queue */
1455 synchronize_srcu(&nodes_srcu);
1457 /* drop all pending dlm messages, this is fine as
1458 * this function get called when the node is fenced
1460 dlm_send_queue_flush(node);
1462 call_srcu(&nodes_srcu, &node->rcu, midcomms_node_release);
1463 mutex_unlock(&close_lock);
1468 /* debug functionality to send raw dlm msg from user space */
1469 struct dlm_rawmsg_data {
1470 struct midcomms_node *node;
1474 static void midcomms_new_rawmsg_cb(void *data)
1476 struct dlm_rawmsg_data *rd = data;
1477 struct dlm_header *h = rd->buf;
1479 switch (h->h_version) {
1480 case cpu_to_le32(DLM_VERSION_3_1):
1486 h->u.h_seq = cpu_to_le32(atomic_fetch_inc(&rd->node->seq_send));
1495 int dlm_midcomms_rawmsg_send(struct midcomms_node *node, void *buf,
1498 struct dlm_rawmsg_data rd;
1499 struct dlm_msg *msg;
1505 msg = dlm_lowcomms_new_msg(node->nodeid, buflen, GFP_NOFS,
1506 &msgbuf, midcomms_new_rawmsg_cb, &rd);
1510 memcpy(msgbuf, buf, buflen);
1511 dlm_lowcomms_commit_msg(msg);