1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 *******************************************************************************
5 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
6 ** Copyright (C) 2004-2021 Red Hat, Inc. All rights reserved.
9 *******************************************************************************
10 ******************************************************************************/
15 * This is the appallingly named "mid-level" comms layer. It takes care about
16 * deliver an on application layer "reliable" communication above the used
17 * lowcomms transport layer.
21 * Each nodes keeps track of all send DLM messages in send_queue with a sequence
22 * number. The receive will send an DLM_ACK message back for every DLM message
23 * received at the other side. If a reconnect happens in lowcomms we will send
24 * all unacknowledged dlm messages again. The receiving side might drop any already
25 * received message by comparing sequence numbers.
27 * How version detection works:
29 * Due the fact that dlm has pre-configured node addresses on every side
30 * it is in it's nature that every side connects at starts to transmit
31 * dlm messages which ends in a race. However DLM_RCOM_NAMES, DLM_RCOM_STATUS
32 * and their replies are the first messages which are exchanges. Due backwards
33 * compatibility these messages are not covered by the midcomms re-transmission
34 * layer. These messages have their own re-transmission handling in the dlm
35 * application layer. The version field of every node will be set on these RCOM
36 * messages as soon as they arrived and the node isn't yet part of the nodes
37 * hash. There exists also logic to detect version mismatched if something weird
38 * going on or the first messages isn't an expected one.
42 * The midcomms layer does a 4 way handshake for termination on DLM protocol
43 * like TCP supports it with half-closed socket support. SCTP doesn't support
44 * half-closed socket, so we do it on DLM layer. Also socket shutdown() can be
45 * interrupted by .e.g. tcp reset itself. Additional there exists the othercon
46 * paradigm in lowcomms which cannot be easily without breaking backwards
47 * compatibility. A node cannot send anything to another node when a DLM_FIN
48 * message was send. There exists additional logic to print a warning if
49 * DLM wants to do it. There exists a state handling like RFC 793 but reduced
50 * to termination only. The event "member removal event" describes the cluster
51 * manager removed the node from internal lists, at this point DLM does not
52 * send any message to the other node. There exists two cases:
54 * 1. The cluster member was removed and we received a FIN
56 * 2. We received a FIN but the member was not removed yet
58 * One of these cases will do the CLOSE_WAIT to LAST_ACK change.
64 * | add member/receive RCOM version
72 * +---------+ snd FIN / \ snd ACK +---------+
73 * | FIN |<----------------- ------------------>| CLOSE |
74 * | WAIT-1 |------------------ | WAIT |
75 * +---------+ rcv FIN \ +---------+
76 * | rcv ACK of FIN ------- | CLOSE | member
77 * | -------------- snd ACK | ------- | removal
78 * V x V snd FIN V event
79 * +---------+ +---------+ +---------+
80 * |FINWAIT-2| | CLOSING | | LAST-ACK|
81 * +---------+ +---------+ +---------+
82 * | rcv ACK of FIN | rcv ACK of FIN |
83 * | rcv FIN -------------- | -------------- |
85 * \ snd ACK +---------+ +---------+
86 * ------------------------>| CLOSED | | CLOSED |
87 * +---------+ +---------+
89 * NOTE: any state can interrupted by midcomms_close() and state will be
90 * switched to CLOSED in case of fencing. There exists also some timeout
91 * handling when we receive the version detection RCOM messages which is
92 * made by observation.
94 * Future improvements:
96 * There exists some known issues/improvements of the dlm handling. Some
97 * of them should be done in a next major dlm version bump which makes
98 * it incompatible with previous versions.
100 * Unaligned memory access:
102 * There exists cases when the dlm message buffer length is not aligned
103 * to 8 byte. However seems nobody detected any problem with it. This
104 * can be fixed in the next major version bump of dlm.
108 * The version detection and how it's done is related to backwards
109 * compatibility. There exists better ways to make a better handling.
110 * However this should be changed in the next major version bump of dlm.
112 * Tail Size checking:
114 * There exists a message tail payload in e.g. DLM_MSG however we don't
115 * check it against the message length yet regarding to the receive buffer
116 * length. That need to be validated.
120 * At timeout places or weird sequence number behaviours we should send
121 * a fencing request to the cluster manager.
124 /* Debug switch to enable a 5 seconds sleep waiting of a termination.
125 * This can be useful to test fencing while termination is running.
126 * This requires a setup with only gfs2 as dlm user, so that the
127 * last umount will terminate the connection.
129 * However it became useful to test, while the 5 seconds block in umount
130 * just press the reset button. In a lot of dropping the termination
131 * process can could take several seconds.
133 #define DLM_DEBUG_FENCE_TERMINATION 0
135 #include <trace/events/dlm.h>
138 #include "dlm_internal.h"
139 #include "lowcomms.h"
144 #include "midcomms.h"
146 /* init value for sequence numbers for testing purpose only e.g. overflows */
147 #define DLM_SEQ_INIT 0
148 /* 3 minutes wait to sync ending of dlm */
149 #define DLM_SHUTDOWN_TIMEOUT msecs_to_jiffies(3 * 60 * 1000)
150 #define DLM_VERSION_NOT_SET 0
152 struct midcomms_node {
157 /* These queues are unbound because we cannot drop any message in dlm.
158 * We could send a fence signal for a specific node to the cluster
159 * manager if queues hits some maximum value, however this handling
162 struct list_head send_queue;
163 spinlock_t send_queue_lock;
164 atomic_t send_queue_cnt;
165 #define DLM_NODE_FLAG_CLOSE 1
166 #define DLM_NODE_FLAG_STOP_TX 2
167 #define DLM_NODE_FLAG_STOP_RX 3
168 #define DLM_NODE_ULP_DELIVERED 4
170 wait_queue_head_t shutdown_wait;
172 /* dlm tcp termination state */
174 #define DLM_ESTABLISHED 2
175 #define DLM_FIN_WAIT1 3
176 #define DLM_FIN_WAIT2 4
177 #define DLM_CLOSE_WAIT 5
178 #define DLM_LAST_ACK 6
179 #define DLM_CLOSING 7
181 spinlock_t state_lock;
183 /* counts how many lockspaces are using this node
184 * this refcount is necessary to determine if the
185 * node wants to disconnect.
189 /* not protected by srcu, node_hash lifetime */
192 struct hlist_node hlist;
197 const union dlm_packet *inner_p;
198 struct midcomms_node *node;
199 struct dlm_opts *opts;
204 void (*ack_rcv)(struct midcomms_node *node);
206 /* get_mhandle/commit srcu idx exchange */
209 struct list_head list;
213 static struct hlist_head node_hash[CONN_HASH_SIZE];
214 static DEFINE_SPINLOCK(nodes_lock);
215 DEFINE_STATIC_SRCU(nodes_srcu);
217 /* This mutex prevents that midcomms_close() is running while
218 * stop() or remove(). As I experienced invalid memory access
219 * behaviours when DLM_DEBUG_FENCE_TERMINATION is enabled and
220 * resetting machines. I will end in some double deletion in nodes
223 static DEFINE_MUTEX(close_lock);
225 struct kmem_cache *dlm_midcomms_cache_create(void)
227 return kmem_cache_create("dlm_mhandle", sizeof(struct dlm_mhandle),
231 static inline const char *dlm_state_str(int state)
236 case DLM_ESTABLISHED:
237 return "ESTABLISHED";
253 const char *dlm_midcomms_state(struct midcomms_node *node)
255 return dlm_state_str(node->state);
258 unsigned long dlm_midcomms_flags(struct midcomms_node *node)
263 int dlm_midcomms_send_queue_cnt(struct midcomms_node *node)
265 return atomic_read(&node->send_queue_cnt);
268 uint32_t dlm_midcomms_version(struct midcomms_node *node)
270 return node->version;
273 static struct midcomms_node *__find_node(int nodeid, int r)
275 struct midcomms_node *node;
277 hlist_for_each_entry_rcu(node, &node_hash[r], hlist) {
278 if (node->nodeid == nodeid)
285 static void dlm_mhandle_release(struct rcu_head *rcu)
287 struct dlm_mhandle *mh = container_of(rcu, struct dlm_mhandle, rcu);
289 dlm_lowcomms_put_msg(mh->msg);
290 dlm_free_mhandle(mh);
293 static void dlm_mhandle_delete(struct midcomms_node *node,
294 struct dlm_mhandle *mh)
296 list_del_rcu(&mh->list);
297 atomic_dec(&node->send_queue_cnt);
298 call_rcu(&mh->rcu, dlm_mhandle_release);
301 static void dlm_send_queue_flush(struct midcomms_node *node)
303 struct dlm_mhandle *mh;
305 pr_debug("flush midcomms send queue of node %d\n", node->nodeid);
308 spin_lock(&node->send_queue_lock);
309 list_for_each_entry_rcu(mh, &node->send_queue, list) {
310 dlm_mhandle_delete(node, mh);
312 spin_unlock(&node->send_queue_lock);
316 static void midcomms_node_reset(struct midcomms_node *node)
318 pr_debug("reset node %d\n", node->nodeid);
320 node->seq_next = DLM_SEQ_INIT;
321 node->seq_send = DLM_SEQ_INIT;
322 node->version = DLM_VERSION_NOT_SET;
325 dlm_send_queue_flush(node);
326 node->state = DLM_CLOSED;
327 wake_up(&node->shutdown_wait);
330 static struct midcomms_node *nodeid2node(int nodeid, gfp_t alloc)
332 struct midcomms_node *node, *tmp;
333 int r = nodeid_hash(nodeid);
335 node = __find_node(nodeid, r);
339 node = kmalloc(sizeof(*node), alloc);
343 node->nodeid = nodeid;
344 spin_lock_init(&node->state_lock);
345 spin_lock_init(&node->send_queue_lock);
346 atomic_set(&node->send_queue_cnt, 0);
347 INIT_LIST_HEAD(&node->send_queue);
348 init_waitqueue_head(&node->shutdown_wait);
350 midcomms_node_reset(node);
352 spin_lock(&nodes_lock);
353 /* check again if there was somebody else
354 * earlier here to add the node
356 tmp = __find_node(nodeid, r);
358 spin_unlock(&nodes_lock);
363 hlist_add_head_rcu(&node->hlist, &node_hash[r]);
364 spin_unlock(&nodes_lock);
366 node->debugfs = dlm_create_debug_comms_file(nodeid, node);
370 static int dlm_send_ack(int nodeid, uint32_t seq)
372 int mb_len = sizeof(struct dlm_header);
373 struct dlm_header *m_header;
377 msg = dlm_lowcomms_new_msg(nodeid, mb_len, GFP_ATOMIC, &ppc,
382 m_header = (struct dlm_header *)ppc;
384 m_header->h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
385 m_header->h_nodeid = cpu_to_le32(dlm_our_nodeid());
386 m_header->h_length = cpu_to_le16(mb_len);
387 m_header->h_cmd = DLM_ACK;
388 m_header->u.h_seq = cpu_to_le32(seq);
390 dlm_lowcomms_commit_msg(msg);
391 dlm_lowcomms_put_msg(msg);
396 static int dlm_send_fin(struct midcomms_node *node,
397 void (*ack_rcv)(struct midcomms_node *node))
399 int mb_len = sizeof(struct dlm_header);
400 struct dlm_header *m_header;
401 struct dlm_mhandle *mh;
404 mh = dlm_midcomms_get_mhandle(node->nodeid, mb_len, GFP_ATOMIC, &ppc);
408 set_bit(DLM_NODE_FLAG_STOP_TX, &node->flags);
409 mh->ack_rcv = ack_rcv;
411 m_header = (struct dlm_header *)ppc;
413 m_header->h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
414 m_header->h_nodeid = cpu_to_le32(dlm_our_nodeid());
415 m_header->h_length = cpu_to_le16(mb_len);
416 m_header->h_cmd = DLM_FIN;
418 pr_debug("sending fin msg to node %d\n", node->nodeid);
419 dlm_midcomms_commit_mhandle(mh, NULL, 0);
424 static void dlm_receive_ack(struct midcomms_node *node, uint32_t seq)
426 struct dlm_mhandle *mh;
429 list_for_each_entry_rcu(mh, &node->send_queue, list) {
430 if (before(mh->seq, seq)) {
434 /* send queue should be ordered */
439 spin_lock(&node->send_queue_lock);
440 list_for_each_entry_rcu(mh, &node->send_queue, list) {
441 if (before(mh->seq, seq)) {
442 dlm_mhandle_delete(node, mh);
444 /* send queue should be ordered */
448 spin_unlock(&node->send_queue_lock);
452 static void dlm_pas_fin_ack_rcv(struct midcomms_node *node)
454 spin_lock(&node->state_lock);
455 pr_debug("receive passive fin ack from node %d with state %s\n",
456 node->nodeid, dlm_state_str(node->state));
458 switch (node->state) {
461 midcomms_node_reset(node);
464 /* not valid but somehow we got what we want */
465 wake_up(&node->shutdown_wait);
468 spin_unlock(&node->state_lock);
469 log_print("%s: unexpected state: %d\n",
470 __func__, node->state);
474 spin_unlock(&node->state_lock);
477 static void dlm_receive_buffer_3_2_trace(uint32_t seq, union dlm_packet *p)
479 switch (p->header.h_cmd) {
481 trace_dlm_recv_message(seq, &p->message);
484 trace_dlm_recv_rcom(seq, &p->rcom);
491 static void dlm_midcomms_receive_buffer(union dlm_packet *p,
492 struct midcomms_node *node,
495 if (seq == node->seq_next) {
498 switch (p->header.h_cmd) {
500 spin_lock(&node->state_lock);
501 pr_debug("receive fin msg from node %d with state %s\n",
502 node->nodeid, dlm_state_str(node->state));
504 switch (node->state) {
505 case DLM_ESTABLISHED:
506 dlm_send_ack(node->nodeid, node->seq_next);
508 node->state = DLM_CLOSE_WAIT;
509 pr_debug("switch node %d to state %s\n",
510 node->nodeid, dlm_state_str(node->state));
511 /* passive shutdown DLM_LAST_ACK case 1
512 * additional we check if the node is used by
513 * cluster manager events at all.
515 if (node->users == 0) {
516 node->state = DLM_LAST_ACK;
517 pr_debug("switch node %d to state %s case 1\n",
518 node->nodeid, dlm_state_str(node->state));
519 set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
520 dlm_send_fin(node, dlm_pas_fin_ack_rcv);
524 dlm_send_ack(node->nodeid, node->seq_next);
525 node->state = DLM_CLOSING;
526 set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
527 pr_debug("switch node %d to state %s\n",
528 node->nodeid, dlm_state_str(node->state));
531 dlm_send_ack(node->nodeid, node->seq_next);
532 midcomms_node_reset(node);
533 pr_debug("switch node %d to state %s\n",
534 node->nodeid, dlm_state_str(node->state));
535 wake_up(&node->shutdown_wait);
538 /* probably remove_member caught it, do nothing */
541 spin_unlock(&node->state_lock);
542 log_print("%s: unexpected state: %d\n",
543 __func__, node->state);
547 spin_unlock(&node->state_lock);
550 WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
551 dlm_receive_buffer_3_2_trace(seq, p);
552 dlm_receive_buffer(p, node->nodeid);
553 set_bit(DLM_NODE_ULP_DELIVERED, &node->flags);
557 /* retry to ack message which we already have by sending back
558 * current node->seq_next number as ack.
560 if (seq < node->seq_next)
561 dlm_send_ack(node->nodeid, node->seq_next);
563 log_print_ratelimited("ignore dlm msg because seq mismatch, seq: %u, expected: %u, nodeid: %d",
564 seq, node->seq_next, node->nodeid);
568 static struct midcomms_node *
569 dlm_midcomms_recv_node_lookup(int nodeid, const union dlm_packet *p,
570 uint16_t msglen, int (*cb)(struct midcomms_node *node))
572 struct midcomms_node *node = NULL;
573 gfp_t allocation = 0;
576 switch (p->header.h_cmd) {
578 if (msglen < sizeof(struct dlm_rcom)) {
579 log_print("rcom msg too small: %u, will skip this message from node %d",
584 switch (p->rcom.rc_type) {
585 case cpu_to_le32(DLM_RCOM_NAMES):
587 case cpu_to_le32(DLM_RCOM_NAMES_REPLY):
589 case cpu_to_le32(DLM_RCOM_STATUS):
591 case cpu_to_le32(DLM_RCOM_STATUS_REPLY):
592 node = nodeid2node(nodeid, 0);
594 spin_lock(&node->state_lock);
595 if (node->state != DLM_ESTABLISHED)
596 pr_debug("receive begin RCOM msg from node %d with state %s\n",
597 node->nodeid, dlm_state_str(node->state));
599 switch (node->state) {
601 node->state = DLM_ESTABLISHED;
602 pr_debug("switch node %d to state %s\n",
603 node->nodeid, dlm_state_str(node->state));
605 case DLM_ESTABLISHED:
608 /* some invalid state passive shutdown
609 * was failed, we try to reset and
610 * hope it will go on.
612 log_print("reset node %d because shutdown stuck",
615 midcomms_node_reset(node);
616 node->state = DLM_ESTABLISHED;
619 spin_unlock(&node->state_lock);
622 allocation = GFP_NOFS;
633 node = nodeid2node(nodeid, allocation);
635 switch (p->header.h_cmd) {
637 if (msglen < sizeof(struct dlm_opts)) {
638 log_print("opts msg too small: %u, will skip this message from node %d",
643 log_print_ratelimited("received dlm opts message nextcmd %d from node %d in an invalid sequence",
644 p->opts.o_nextcmd, nodeid);
647 log_print_ratelimited("received dlm message cmd %d from node %d in an invalid sequence",
648 p->header.h_cmd, nodeid);
662 static int dlm_midcomms_version_check_3_2(struct midcomms_node *node)
664 switch (node->version) {
665 case DLM_VERSION_NOT_SET:
666 node->version = DLM_VERSION_3_2;
667 log_print("version 0x%08x for node %d detected", DLM_VERSION_3_2,
670 case DLM_VERSION_3_2:
673 log_print_ratelimited("version mismatch detected, assumed 0x%08x but node %d has 0x%08x",
674 DLM_VERSION_3_2, node->nodeid, node->version);
681 static int dlm_opts_check_msglen(union dlm_packet *p, uint16_t msglen, int nodeid)
685 /* we only trust outer header msglen because
686 * it's checked against receive buffer length.
688 if (len < sizeof(struct dlm_opts))
690 len -= sizeof(struct dlm_opts);
692 if (len < le16_to_cpu(p->opts.o_optlen))
694 len -= le16_to_cpu(p->opts.o_optlen);
696 switch (p->opts.o_nextcmd) {
698 if (len < sizeof(struct dlm_header)) {
699 log_print("fin too small: %d, will skip this message from node %d",
706 if (len < sizeof(struct dlm_message)) {
707 log_print("msg too small: %d, will skip this message from node %d",
714 if (len < sizeof(struct dlm_rcom)) {
715 log_print("rcom msg too small: %d, will skip this message from node %d",
722 log_print("unsupported o_nextcmd received: %u, will skip this message from node %d",
723 p->opts.o_nextcmd, nodeid);
730 static void dlm_midcomms_receive_buffer_3_2(union dlm_packet *p, int nodeid)
732 uint16_t msglen = le16_to_cpu(p->header.h_length);
733 struct midcomms_node *node;
737 idx = srcu_read_lock(&nodes_srcu);
738 node = dlm_midcomms_recv_node_lookup(nodeid, p, msglen,
739 dlm_midcomms_version_check_3_2);
743 switch (p->header.h_cmd) {
745 /* these rcom message we use to determine version.
746 * they have their own retransmission handling and
747 * are the first messages of dlm.
749 * length already checked.
751 switch (p->rcom.rc_type) {
752 case cpu_to_le32(DLM_RCOM_NAMES):
754 case cpu_to_le32(DLM_RCOM_NAMES_REPLY):
756 case cpu_to_le32(DLM_RCOM_STATUS):
758 case cpu_to_le32(DLM_RCOM_STATUS_REPLY):
761 log_print("unsupported rcom type received: %u, will skip this message from node %d",
762 le32_to_cpu(p->rcom.rc_type), nodeid);
766 WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
767 dlm_receive_buffer(p, nodeid);
770 seq = le32_to_cpu(p->header.u.h_seq);
772 ret = dlm_opts_check_msglen(p, msglen, nodeid);
774 log_print("opts msg too small: %u, will skip this message from node %d",
779 p = (union dlm_packet *)((unsigned char *)p->opts.o_opts +
780 le16_to_cpu(p->opts.o_optlen));
782 /* recheck inner msglen just if it's not garbage */
783 msglen = le16_to_cpu(p->header.h_length);
784 switch (p->header.h_cmd) {
786 if (msglen < sizeof(struct dlm_rcom)) {
787 log_print("inner rcom msg too small: %u, will skip this message from node %d",
794 if (msglen < sizeof(struct dlm_message)) {
795 log_print("inner msg too small: %u, will skip this message from node %d",
802 if (msglen < sizeof(struct dlm_header)) {
803 log_print("inner fin too small: %u, will skip this message from node %d",
810 log_print("unsupported inner h_cmd received: %u, will skip this message from node %d",
815 dlm_midcomms_receive_buffer(p, node, seq);
818 seq = le32_to_cpu(p->header.u.h_seq);
819 dlm_receive_ack(node, seq);
822 log_print("unsupported h_cmd received: %u, will skip this message from node %d",
823 p->header.h_cmd, nodeid);
828 srcu_read_unlock(&nodes_srcu, idx);
831 static int dlm_midcomms_version_check_3_1(struct midcomms_node *node)
833 switch (node->version) {
834 case DLM_VERSION_NOT_SET:
835 node->version = DLM_VERSION_3_1;
836 log_print("version 0x%08x for node %d detected", DLM_VERSION_3_1,
839 case DLM_VERSION_3_1:
842 log_print_ratelimited("version mismatch detected, assumed 0x%08x but node %d has 0x%08x",
843 DLM_VERSION_3_1, node->nodeid, node->version);
850 static void dlm_midcomms_receive_buffer_3_1(union dlm_packet *p, int nodeid)
852 uint16_t msglen = le16_to_cpu(p->header.h_length);
853 struct midcomms_node *node;
856 idx = srcu_read_lock(&nodes_srcu);
857 node = dlm_midcomms_recv_node_lookup(nodeid, p, msglen,
858 dlm_midcomms_version_check_3_1);
860 srcu_read_unlock(&nodes_srcu, idx);
863 srcu_read_unlock(&nodes_srcu, idx);
865 switch (p->header.h_cmd) {
867 /* length already checked */
870 if (msglen < sizeof(struct dlm_message)) {
871 log_print("msg too small: %u, will skip this message from node %d",
878 log_print("unsupported h_cmd received: %u, will skip this message from node %d",
879 p->header.h_cmd, nodeid);
883 dlm_receive_buffer(p, nodeid);
887 * Called from the low-level comms layer to process a buffer of
891 int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int len)
893 const unsigned char *ptr = buf;
894 const struct dlm_header *hd;
898 while (len >= sizeof(struct dlm_header)) {
899 hd = (struct dlm_header *)ptr;
901 /* no message should be more than DLM_MAX_SOCKET_BUFSIZE or
902 * less than dlm_header size.
904 * Some messages does not have a 8 byte length boundary yet
905 * which can occur in a unaligned memory access of some dlm
906 * messages. However this problem need to be fixed at the
907 * sending side, for now it seems nobody run into architecture
908 * related issues yet but it slows down some processing.
909 * Fixing this issue should be scheduled in future by doing
910 * the next major version bump.
912 msglen = le16_to_cpu(hd->h_length);
913 if (msglen > DLM_MAX_SOCKET_BUFSIZE ||
914 msglen < sizeof(struct dlm_header)) {
915 log_print("received invalid length header: %u from node %d, will abort message parsing",
920 /* caller will take care that leftover
921 * will be parsed next call with more data
926 switch (hd->h_version) {
927 case cpu_to_le32(DLM_VERSION_3_1):
928 dlm_midcomms_receive_buffer_3_1((union dlm_packet *)ptr, nodeid);
930 case cpu_to_le32(DLM_VERSION_3_2):
931 dlm_midcomms_receive_buffer_3_2((union dlm_packet *)ptr, nodeid);
934 log_print("received invalid version header: %u from node %d, will skip this message",
935 le32_to_cpu(hd->h_version), nodeid);
947 void dlm_midcomms_receive_done(int nodeid)
949 struct midcomms_node *node;
952 idx = srcu_read_lock(&nodes_srcu);
953 node = nodeid2node(nodeid, 0);
955 srcu_read_unlock(&nodes_srcu, idx);
959 /* old protocol, we do nothing */
960 switch (node->version) {
961 case DLM_VERSION_3_2:
964 srcu_read_unlock(&nodes_srcu, idx);
968 /* do nothing if we didn't delivered stateful to ulp */
969 if (!test_and_clear_bit(DLM_NODE_ULP_DELIVERED,
971 srcu_read_unlock(&nodes_srcu, idx);
975 spin_lock(&node->state_lock);
976 /* we only ack if state is ESTABLISHED */
977 switch (node->state) {
978 case DLM_ESTABLISHED:
979 spin_unlock(&node->state_lock);
980 dlm_send_ack(node->nodeid, node->seq_next);
983 spin_unlock(&node->state_lock);
984 /* do nothing FIN has it's own ack send */
987 srcu_read_unlock(&nodes_srcu, idx);
990 void dlm_midcomms_unack_msg_resend(int nodeid)
992 struct midcomms_node *node;
993 struct dlm_mhandle *mh;
996 idx = srcu_read_lock(&nodes_srcu);
997 node = nodeid2node(nodeid, 0);
999 srcu_read_unlock(&nodes_srcu, idx);
1003 /* old protocol, we don't support to retransmit on failure */
1004 switch (node->version) {
1005 case DLM_VERSION_3_2:
1008 srcu_read_unlock(&nodes_srcu, idx);
1013 list_for_each_entry_rcu(mh, &node->send_queue, list) {
1017 ret = dlm_lowcomms_resend_msg(mh->msg);
1019 log_print_ratelimited("retransmit dlm msg, seq %u, nodeid %d",
1020 mh->seq, node->nodeid);
1023 srcu_read_unlock(&nodes_srcu, idx);
1026 static void dlm_fill_opts_header(struct dlm_opts *opts, uint16_t inner_len,
1029 opts->o_header.h_cmd = DLM_OPTS;
1030 opts->o_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
1031 opts->o_header.h_nodeid = cpu_to_le32(dlm_our_nodeid());
1032 opts->o_header.h_length = cpu_to_le16(DLM_MIDCOMMS_OPT_LEN + inner_len);
1033 opts->o_header.u.h_seq = cpu_to_le32(seq);
1036 static void midcomms_new_msg_cb(void *data)
1038 struct dlm_mhandle *mh = data;
1040 atomic_inc(&mh->node->send_queue_cnt);
1042 spin_lock(&mh->node->send_queue_lock);
1043 list_add_tail_rcu(&mh->list, &mh->node->send_queue);
1044 spin_unlock(&mh->node->send_queue_lock);
1046 mh->seq = mh->node->seq_send++;
1049 static struct dlm_msg *dlm_midcomms_get_msg_3_2(struct dlm_mhandle *mh, int nodeid,
1050 int len, gfp_t allocation, char **ppc)
1052 struct dlm_opts *opts;
1053 struct dlm_msg *msg;
1055 msg = dlm_lowcomms_new_msg(nodeid, len + DLM_MIDCOMMS_OPT_LEN,
1056 allocation, ppc, midcomms_new_msg_cb, mh);
1060 opts = (struct dlm_opts *)*ppc;
1063 /* add possible options here */
1064 dlm_fill_opts_header(opts, len, mh->seq);
1066 *ppc += sizeof(*opts);
1067 mh->inner_p = (const union dlm_packet *)*ppc;
1071 /* avoid false positive for nodes_srcu, unlock happens in
1072 * dlm_midcomms_commit_mhandle which is a must call if success
1075 struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len,
1076 gfp_t allocation, char **ppc)
1078 struct midcomms_node *node;
1079 struct dlm_mhandle *mh;
1080 struct dlm_msg *msg;
1083 idx = srcu_read_lock(&nodes_srcu);
1084 node = nodeid2node(nodeid, 0);
1090 /* this is a bug, however we going on and hope it will be resolved */
1091 WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_TX, &node->flags));
1093 mh = dlm_allocate_mhandle();
1097 mh->committed = false;
1102 switch (node->version) {
1103 case DLM_VERSION_3_1:
1104 msg = dlm_lowcomms_new_msg(nodeid, len, allocation, ppc,
1107 dlm_free_mhandle(mh);
1112 case DLM_VERSION_3_2:
1113 msg = dlm_midcomms_get_msg_3_2(mh, nodeid, len, allocation,
1116 dlm_free_mhandle(mh);
1122 dlm_free_mhandle(mh);
1129 /* keep in mind that is a must to call
1130 * dlm_midcomms_commit_msg() which releases
1131 * nodes_srcu using mh->idx which is assumed
1132 * here that the application will call it.
1137 srcu_read_unlock(&nodes_srcu, idx);
1142 static void dlm_midcomms_commit_msg_3_2_trace(const struct dlm_mhandle *mh,
1143 const void *name, int namelen)
1145 switch (mh->inner_p->header.h_cmd) {
1147 trace_dlm_send_message(mh->seq, &mh->inner_p->message,
1151 trace_dlm_send_rcom(mh->seq, &mh->inner_p->rcom);
1154 /* nothing to trace */
1159 static void dlm_midcomms_commit_msg_3_2(struct dlm_mhandle *mh,
1160 const void *name, int namelen)
1162 /* nexthdr chain for fast lookup */
1163 mh->opts->o_nextcmd = mh->inner_p->header.h_cmd;
1164 mh->committed = true;
1165 dlm_midcomms_commit_msg_3_2_trace(mh, name, namelen);
1166 dlm_lowcomms_commit_msg(mh->msg);
1169 /* avoid false positive for nodes_srcu, lock was happen in
1170 * dlm_midcomms_get_mhandle
1173 void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh,
1174 const void *name, int namelen)
1177 switch (mh->node->version) {
1178 case DLM_VERSION_3_1:
1179 srcu_read_unlock(&nodes_srcu, mh->idx);
1181 dlm_lowcomms_commit_msg(mh->msg);
1182 dlm_lowcomms_put_msg(mh->msg);
1183 /* mh is not part of rcu list in this case */
1184 dlm_free_mhandle(mh);
1186 case DLM_VERSION_3_2:
1187 /* held rcu read lock here, because we sending the
1188 * dlm message out, when we do that we could receive
1189 * an ack back which releases the mhandle and we
1190 * get a use after free.
1193 dlm_midcomms_commit_msg_3_2(mh, name, namelen);
1194 srcu_read_unlock(&nodes_srcu, mh->idx);
1198 srcu_read_unlock(&nodes_srcu, mh->idx);
1205 int dlm_midcomms_start(void)
1207 return dlm_lowcomms_start();
1210 void dlm_midcomms_stop(void)
1212 dlm_lowcomms_stop();
1215 void dlm_midcomms_init(void)
1219 for (i = 0; i < CONN_HASH_SIZE; i++)
1220 INIT_HLIST_HEAD(&node_hash[i]);
1222 dlm_lowcomms_init();
1225 void dlm_midcomms_exit(void)
1227 dlm_lowcomms_exit();
1230 static void dlm_act_fin_ack_rcv(struct midcomms_node *node)
1232 spin_lock(&node->state_lock);
1233 pr_debug("receive active fin ack from node %d with state %s\n",
1234 node->nodeid, dlm_state_str(node->state));
1236 switch (node->state) {
1238 node->state = DLM_FIN_WAIT2;
1239 pr_debug("switch node %d to state %s\n",
1240 node->nodeid, dlm_state_str(node->state));
1243 midcomms_node_reset(node);
1244 pr_debug("switch node %d to state %s\n",
1245 node->nodeid, dlm_state_str(node->state));
1246 wake_up(&node->shutdown_wait);
1249 /* not valid but somehow we got what we want */
1250 wake_up(&node->shutdown_wait);
1253 spin_unlock(&node->state_lock);
1254 log_print("%s: unexpected state: %d\n",
1255 __func__, node->state);
1259 spin_unlock(&node->state_lock);
1262 void dlm_midcomms_add_member(int nodeid)
1264 struct midcomms_node *node;
1267 if (nodeid == dlm_our_nodeid())
1270 idx = srcu_read_lock(&nodes_srcu);
1271 node = nodeid2node(nodeid, GFP_NOFS);
1273 srcu_read_unlock(&nodes_srcu, idx);
1277 spin_lock(&node->state_lock);
1279 pr_debug("receive add member from node %d with state %s\n",
1280 node->nodeid, dlm_state_str(node->state));
1281 switch (node->state) {
1282 case DLM_ESTABLISHED:
1285 node->state = DLM_ESTABLISHED;
1286 pr_debug("switch node %d to state %s\n",
1287 node->nodeid, dlm_state_str(node->state));
1290 /* some invalid state passive shutdown
1291 * was failed, we try to reset and
1292 * hope it will go on.
1294 log_print("reset node %d because shutdown stuck",
1297 midcomms_node_reset(node);
1298 node->state = DLM_ESTABLISHED;
1304 pr_debug("node %d users inc count %d\n", nodeid, node->users);
1305 spin_unlock(&node->state_lock);
1307 srcu_read_unlock(&nodes_srcu, idx);
1310 void dlm_midcomms_remove_member(int nodeid)
1312 struct midcomms_node *node;
1315 if (nodeid == dlm_our_nodeid())
1318 idx = srcu_read_lock(&nodes_srcu);
1319 node = nodeid2node(nodeid, 0);
1321 srcu_read_unlock(&nodes_srcu, idx);
1325 spin_lock(&node->state_lock);
1327 pr_debug("node %d users dec count %d\n", nodeid, node->users);
1329 /* hitting users count to zero means the
1330 * other side is running dlm_midcomms_stop()
1331 * we meet us to have a clean disconnect.
1333 if (node->users == 0) {
1334 pr_debug("receive remove member from node %d with state %s\n",
1335 node->nodeid, dlm_state_str(node->state));
1336 switch (node->state) {
1337 case DLM_ESTABLISHED:
1339 case DLM_CLOSE_WAIT:
1340 /* passive shutdown DLM_LAST_ACK case 2 */
1341 node->state = DLM_LAST_ACK;
1342 pr_debug("switch node %d to state %s case 2\n",
1343 node->nodeid, dlm_state_str(node->state));
1344 set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
1345 dlm_send_fin(node, dlm_pas_fin_ack_rcv);
1348 /* probably receive fin caught it, do nothing */
1351 /* already gone, do nothing */
1354 log_print("%s: unexpected state: %d\n",
1355 __func__, node->state);
1359 spin_unlock(&node->state_lock);
1361 srcu_read_unlock(&nodes_srcu, idx);
1364 static void midcomms_node_release(struct rcu_head *rcu)
1366 struct midcomms_node *node = container_of(rcu, struct midcomms_node, rcu);
1368 WARN_ON_ONCE(atomic_read(&node->send_queue_cnt));
1369 dlm_send_queue_flush(node);
1373 static void midcomms_shutdown(struct midcomms_node *node)
1377 /* old protocol, we don't wait for pending operations */
1378 switch (node->version) {
1379 case DLM_VERSION_3_2:
1385 spin_lock(&node->state_lock);
1386 pr_debug("receive active shutdown for node %d with state %s\n",
1387 node->nodeid, dlm_state_str(node->state));
1388 switch (node->state) {
1389 case DLM_ESTABLISHED:
1390 node->state = DLM_FIN_WAIT1;
1391 pr_debug("switch node %d to state %s case 2\n",
1392 node->nodeid, dlm_state_str(node->state));
1393 dlm_send_fin(node, dlm_act_fin_ack_rcv);
1396 /* we have what we want */
1397 spin_unlock(&node->state_lock);
1400 /* busy to enter DLM_FIN_WAIT1, wait until passive
1401 * done in shutdown_wait to enter DLM_CLOSED.
1405 spin_unlock(&node->state_lock);
1407 if (DLM_DEBUG_FENCE_TERMINATION)
1410 /* wait for other side dlm + fin */
1411 ret = wait_event_timeout(node->shutdown_wait,
1412 node->state == DLM_CLOSED ||
1413 test_bit(DLM_NODE_FLAG_CLOSE, &node->flags),
1414 DLM_SHUTDOWN_TIMEOUT);
1415 if (!ret || test_bit(DLM_NODE_FLAG_CLOSE, &node->flags)) {
1416 pr_debug("active shutdown timed out for node %d with state %s\n",
1417 node->nodeid, dlm_state_str(node->state));
1418 midcomms_node_reset(node);
1422 pr_debug("active shutdown done for node %d with state %s\n",
1423 node->nodeid, dlm_state_str(node->state));
1426 void dlm_midcomms_shutdown(void)
1428 struct midcomms_node *node;
1431 mutex_lock(&close_lock);
1432 idx = srcu_read_lock(&nodes_srcu);
1433 for (i = 0; i < CONN_HASH_SIZE; i++) {
1434 hlist_for_each_entry_rcu(node, &node_hash[i], hlist) {
1435 midcomms_shutdown(node);
1437 dlm_delete_debug_comms_file(node->debugfs);
1439 spin_lock(&nodes_lock);
1440 hlist_del_rcu(&node->hlist);
1441 spin_unlock(&nodes_lock);
1443 call_srcu(&nodes_srcu, &node->rcu, midcomms_node_release);
1446 srcu_read_unlock(&nodes_srcu, idx);
1447 mutex_unlock(&close_lock);
1449 dlm_lowcomms_shutdown();
1452 int dlm_midcomms_close(int nodeid)
1454 struct midcomms_node *node;
1457 if (nodeid == dlm_our_nodeid())
1460 idx = srcu_read_lock(&nodes_srcu);
1461 /* Abort pending close/remove operation */
1462 node = nodeid2node(nodeid, 0);
1464 /* let shutdown waiters leave */
1465 set_bit(DLM_NODE_FLAG_CLOSE, &node->flags);
1466 wake_up(&node->shutdown_wait);
1468 srcu_read_unlock(&nodes_srcu, idx);
1470 synchronize_srcu(&nodes_srcu);
1472 idx = srcu_read_lock(&nodes_srcu);
1473 mutex_lock(&close_lock);
1474 node = nodeid2node(nodeid, 0);
1476 mutex_unlock(&close_lock);
1477 srcu_read_unlock(&nodes_srcu, idx);
1478 return dlm_lowcomms_close(nodeid);
1481 ret = dlm_lowcomms_close(nodeid);
1482 spin_lock(&node->state_lock);
1483 midcomms_node_reset(node);
1484 spin_unlock(&node->state_lock);
1485 srcu_read_unlock(&nodes_srcu, idx);
1486 mutex_unlock(&close_lock);
1491 /* debug functionality to send raw dlm msg from user space */
1492 struct dlm_rawmsg_data {
1493 struct midcomms_node *node;
1497 static void midcomms_new_rawmsg_cb(void *data)
1499 struct dlm_rawmsg_data *rd = data;
1500 struct dlm_header *h = rd->buf;
1502 switch (h->h_version) {
1503 case cpu_to_le32(DLM_VERSION_3_1):
1509 h->u.h_seq = cpu_to_le32(rd->node->seq_send++);
1518 int dlm_midcomms_rawmsg_send(struct midcomms_node *node, void *buf,
1521 struct dlm_rawmsg_data rd;
1522 struct dlm_msg *msg;
1528 msg = dlm_lowcomms_new_msg(node->nodeid, buflen, GFP_NOFS,
1529 &msgbuf, midcomms_new_rawmsg_cb, &rd);
1533 memcpy(msgbuf, buf, buflen);
1534 dlm_lowcomms_commit_msg(msg);