Lines Matching +full:shutdown +full:- +full:ack
1 // SPDX-License-Identifier: GPL-2.0-only
5 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
6 ** Copyright (C) 2004-2021 Red Hat, Inc. All rights reserved.
15 * This is the appallingly named "mid-level" comms layer. It takes care about
29 * Due the fact that dlm has pre-configured node addresses on every side
33 * compatibility these messages are not covered by the midcomms re-transmission
34 * layer. These messages have their own re-transmission handling in the dlm
43 * like TCP supports it with half-closed socket support. SCTP doesn't support
44 * half-closed socket, so we do it on DLM layer. Also socket shutdown() can be
61 * +---------+
63 * +---------+
67 * +---------+
69 * +---------+
71 * ------- | | -------
72 * +---------+ snd FIN / \ snd ACK +---------+
73 * | FIN |<----------------- ------------------>| CLOSE |
74 * | WAIT-1 |------------------ | WAIT |
75 * +---------+ rcv FIN \ +---------+
76 * | rcv ACK of FIN ------- | CLOSE | member
77 * | -------------- snd ACK | ------- | removal
79 * +---------+ +---------+ +---------+
80 * |FINWAIT-2| | CLOSING | | LAST-ACK|
81 * +---------+ +---------+ +---------+
82 * | rcv ACK of FIN | rcv ACK of FIN |
83 * | rcv FIN -------------- | -------------- |
84 * | ------- x V x V
85 * \ snd ACK +---------+ +---------+
86 * ------------------------>| CLOSED | | CLOSED |
87 * +---------+ +---------+
256 return dlm_state_str(node->state); in dlm_midcomms_state()
261 return node->flags; in dlm_midcomms_flags()
266 return atomic_read(&node->send_queue_cnt); in dlm_midcomms_send_queue_cnt()
271 return node->version; in dlm_midcomms_version()
279 if (node->nodeid == nodeid) in __find_node()
290 dlm_lowcomms_put_msg(mh->msg); in dlm_mhandle_release()
297 list_del_rcu(&mh->list); in dlm_mhandle_delete()
298 atomic_dec(&node->send_queue_cnt); in dlm_mhandle_delete()
299 call_rcu(&mh->rcu, dlm_mhandle_release); in dlm_mhandle_delete()
306 pr_debug("flush midcomms send queue of node %d\n", node->nodeid); in dlm_send_queue_flush()
309 spin_lock_bh(&node->send_queue_lock); in dlm_send_queue_flush()
310 list_for_each_entry_rcu(mh, &node->send_queue, list) { in dlm_send_queue_flush()
313 spin_unlock_bh(&node->send_queue_lock); in dlm_send_queue_flush()
319 pr_debug("reset node %d\n", node->nodeid); in midcomms_node_reset()
321 atomic_set(&node->seq_next, DLM_SEQ_INIT); in midcomms_node_reset()
322 atomic_set(&node->seq_send, DLM_SEQ_INIT); in midcomms_node_reset()
323 atomic_set(&node->ulp_delivered, 0); in midcomms_node_reset()
324 node->version = DLM_VERSION_NOT_SET; in midcomms_node_reset()
325 node->flags = 0; in midcomms_node_reset()
328 node->state = DLM_CLOSED; in midcomms_node_reset()
329 wake_up(&node->shutdown_wait); in midcomms_node_reset()
356 return -ENOMEM; in dlm_midcomms_addr()
358 node->nodeid = nodeid; in dlm_midcomms_addr()
359 spin_lock_init(&node->state_lock); in dlm_midcomms_addr()
360 spin_lock_init(&node->send_queue_lock); in dlm_midcomms_addr()
361 atomic_set(&node->send_queue_cnt, 0); in dlm_midcomms_addr()
362 INIT_LIST_HEAD(&node->send_queue); in dlm_midcomms_addr()
363 init_waitqueue_head(&node->shutdown_wait); in dlm_midcomms_addr()
364 node->users = 0; in dlm_midcomms_addr()
368 hlist_add_head_rcu(&node->hlist, &node_hash[r]); in dlm_midcomms_addr()
371 node->debugfs = dlm_create_debug_comms_file(nodeid, node); in dlm_midcomms_addr()
384 return -ENOMEM; in dlm_send_ack()
388 m_header->h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR); in dlm_send_ack()
389 m_header->h_nodeid = cpu_to_le32(dlm_our_nodeid()); in dlm_send_ack()
390 m_header->h_length = cpu_to_le16(mb_len); in dlm_send_ack()
391 m_header->h_cmd = DLM_ACK; in dlm_send_ack()
392 m_header->u.h_seq = cpu_to_le32(seq); in dlm_send_ack()
406 /* let only send one user trigger threshold to send ack back */ in dlm_send_ack_threshold()
408 oval = atomic_read(&node->ulp_delivered); in dlm_send_ack_threshold()
416 } while (atomic_cmpxchg(&node->ulp_delivered, oval, nval) != oval); in dlm_send_ack_threshold()
419 dlm_send_ack(node->nodeid, atomic_read(&node->seq_next)); in dlm_send_ack_threshold()
430 mh = dlm_midcomms_get_mhandle(node->nodeid, mb_len, &ppc); in dlm_send_fin()
432 return -ENOMEM; in dlm_send_fin()
434 set_bit(DLM_NODE_FLAG_STOP_TX, &node->flags); in dlm_send_fin()
435 mh->ack_rcv = ack_rcv; in dlm_send_fin()
439 m_header->h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR); in dlm_send_fin()
440 m_header->h_nodeid = cpu_to_le32(dlm_our_nodeid()); in dlm_send_fin()
441 m_header->h_length = cpu_to_le16(mb_len); in dlm_send_fin()
442 m_header->h_cmd = DLM_FIN; in dlm_send_fin()
444 pr_debug("sending fin msg to node %d\n", node->nodeid); in dlm_send_fin()
455 list_for_each_entry_rcu(mh, &node->send_queue, list) { in dlm_receive_ack()
456 if (before(mh->seq, seq)) { in dlm_receive_ack()
457 if (mh->ack_rcv) in dlm_receive_ack()
458 mh->ack_rcv(node); in dlm_receive_ack()
465 spin_lock_bh(&node->send_queue_lock); in dlm_receive_ack()
466 list_for_each_entry_rcu(mh, &node->send_queue, list) { in dlm_receive_ack()
467 if (before(mh->seq, seq)) { in dlm_receive_ack()
474 spin_unlock_bh(&node->send_queue_lock); in dlm_receive_ack()
480 spin_lock_bh(&node->state_lock); in dlm_pas_fin_ack_rcv()
481 pr_debug("receive passive fin ack from node %d with state %s\n", in dlm_pas_fin_ack_rcv()
482 node->nodeid, dlm_state_str(node->state)); in dlm_pas_fin_ack_rcv()
484 switch (node->state) { in dlm_pas_fin_ack_rcv()
491 wake_up(&node->shutdown_wait); in dlm_pas_fin_ack_rcv()
494 spin_unlock_bh(&node->state_lock); in dlm_pas_fin_ack_rcv()
496 __func__, node->state); in dlm_pas_fin_ack_rcv()
500 spin_unlock_bh(&node->state_lock); in dlm_pas_fin_ack_rcv()
506 switch (p->header.h_cmd) { in dlm_receive_buffer_3_2_trace()
508 trace_dlm_recv_message(dlm_our_nodeid(), seq, &p->message); in dlm_receive_buffer_3_2_trace()
511 trace_dlm_recv_rcom(dlm_our_nodeid(), seq, &p->rcom); in dlm_receive_buffer_3_2_trace()
526 oval = atomic_read(&node->seq_next); in dlm_midcomms_receive_buffer()
532 } while (atomic_cmpxchg(&node->seq_next, oval, nval) != oval); in dlm_midcomms_receive_buffer()
535 switch (p->header.h_cmd) { in dlm_midcomms_receive_buffer()
537 spin_lock_bh(&node->state_lock); in dlm_midcomms_receive_buffer()
539 node->nodeid, dlm_state_str(node->state)); in dlm_midcomms_receive_buffer()
541 switch (node->state) { in dlm_midcomms_receive_buffer()
543 dlm_send_ack(node->nodeid, nval); in dlm_midcomms_receive_buffer()
545 /* passive shutdown DLM_LAST_ACK case 1 in dlm_midcomms_receive_buffer()
549 if (node->users == 0) { in dlm_midcomms_receive_buffer()
550 node->state = DLM_LAST_ACK; in dlm_midcomms_receive_buffer()
552 node->nodeid, dlm_state_str(node->state)); in dlm_midcomms_receive_buffer()
553 set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags); in dlm_midcomms_receive_buffer()
556 node->state = DLM_CLOSE_WAIT; in dlm_midcomms_receive_buffer()
558 node->nodeid, dlm_state_str(node->state)); in dlm_midcomms_receive_buffer()
562 dlm_send_ack(node->nodeid, nval); in dlm_midcomms_receive_buffer()
563 node->state = DLM_CLOSING; in dlm_midcomms_receive_buffer()
564 set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags); in dlm_midcomms_receive_buffer()
566 node->nodeid, dlm_state_str(node->state)); in dlm_midcomms_receive_buffer()
569 dlm_send_ack(node->nodeid, nval); in dlm_midcomms_receive_buffer()
572 node->nodeid, dlm_state_str(node->state)); in dlm_midcomms_receive_buffer()
578 spin_unlock_bh(&node->state_lock); in dlm_midcomms_receive_buffer()
580 __func__, node->state); in dlm_midcomms_receive_buffer()
584 spin_unlock_bh(&node->state_lock); in dlm_midcomms_receive_buffer()
587 WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags)); in dlm_midcomms_receive_buffer()
589 dlm_receive_buffer(p, node->nodeid); in dlm_midcomms_receive_buffer()
590 atomic_inc(&node->ulp_delivered); in dlm_midcomms_receive_buffer()
591 /* unlikely case to send ack back when we don't transmit */ in dlm_midcomms_receive_buffer()
596 /* retry to ack message which we already have by sending back in dlm_midcomms_receive_buffer()
597 * current node->seq_next number as ack. in dlm_midcomms_receive_buffer()
600 dlm_send_ack(node->nodeid, oval); in dlm_midcomms_receive_buffer()
603 seq, oval, node->nodeid); in dlm_midcomms_receive_buffer()
616 return -1; in dlm_opts_check_msglen()
617 len -= sizeof(struct dlm_opts); in dlm_opts_check_msglen()
619 if (len < le16_to_cpu(p->opts.o_optlen)) in dlm_opts_check_msglen()
620 return -1; in dlm_opts_check_msglen()
621 len -= le16_to_cpu(p->opts.o_optlen); in dlm_opts_check_msglen()
623 switch (p->opts.o_nextcmd) { in dlm_opts_check_msglen()
628 return -1; in dlm_opts_check_msglen()
636 return -1; in dlm_opts_check_msglen()
644 return -1; in dlm_opts_check_msglen()
650 p->opts.o_nextcmd, nodeid); in dlm_opts_check_msglen()
651 return -1; in dlm_opts_check_msglen()
659 uint16_t msglen = le16_to_cpu(p->header.h_length); in dlm_midcomms_receive_buffer_3_2()
669 switch (node->version) { in dlm_midcomms_receive_buffer_3_2()
671 node->version = DLM_VERSION_3_2; in dlm_midcomms_receive_buffer_3_2()
672 wake_up(&node->shutdown_wait); in dlm_midcomms_receive_buffer_3_2()
674 node->nodeid); in dlm_midcomms_receive_buffer_3_2()
676 spin_lock(&node->state_lock); in dlm_midcomms_receive_buffer_3_2()
677 switch (node->state) { in dlm_midcomms_receive_buffer_3_2()
679 node->state = DLM_ESTABLISHED; in dlm_midcomms_receive_buffer_3_2()
681 node->nodeid, dlm_state_str(node->state)); in dlm_midcomms_receive_buffer_3_2()
686 spin_unlock(&node->state_lock); in dlm_midcomms_receive_buffer_3_2()
693 DLM_VERSION_3_2, node->nodeid, node->version); in dlm_midcomms_receive_buffer_3_2()
697 switch (p->header.h_cmd) { in dlm_midcomms_receive_buffer_3_2()
705 switch (p->rcom.rc_type) { in dlm_midcomms_receive_buffer_3_2()
716 le32_to_cpu(p->rcom.rc_type), nodeid); in dlm_midcomms_receive_buffer_3_2()
720 WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags)); in dlm_midcomms_receive_buffer_3_2()
724 seq = le32_to_cpu(p->header.u.h_seq); in dlm_midcomms_receive_buffer_3_2()
733 p = (union dlm_packet *)((unsigned char *)p->opts.o_opts + in dlm_midcomms_receive_buffer_3_2()
734 le16_to_cpu(p->opts.o_optlen)); in dlm_midcomms_receive_buffer_3_2()
737 msglen = le16_to_cpu(p->header.h_length); in dlm_midcomms_receive_buffer_3_2()
738 switch (p->header.h_cmd) { in dlm_midcomms_receive_buffer_3_2()
772 seq = le32_to_cpu(p->header.u.h_seq); in dlm_midcomms_receive_buffer_3_2()
777 p->header.h_cmd, nodeid); in dlm_midcomms_receive_buffer_3_2()
787 uint16_t msglen = le16_to_cpu(p->header.h_length); in dlm_midcomms_receive_buffer_3_1()
798 switch (node->version) { in dlm_midcomms_receive_buffer_3_1()
800 node->version = DLM_VERSION_3_1; in dlm_midcomms_receive_buffer_3_1()
801 wake_up(&node->shutdown_wait); in dlm_midcomms_receive_buffer_3_1()
803 node->nodeid); in dlm_midcomms_receive_buffer_3_1()
809 DLM_VERSION_3_1, node->nodeid, node->version); in dlm_midcomms_receive_buffer_3_1()
815 switch (p->header.h_cmd) { in dlm_midcomms_receive_buffer_3_1()
829 p->header.h_cmd, nodeid); in dlm_midcomms_receive_buffer_3_1()
857 msglen = le16_to_cpu(hd->h_length); in dlm_validate_incoming_buffer()
862 return -EBADMSG; in dlm_validate_incoming_buffer()
872 len -= msglen; in dlm_validate_incoming_buffer()
880 * Called from the low-level comms layer to process a buffer of
893 msglen = le16_to_cpu(hd->h_length); in dlm_process_incoming_buffer()
897 switch (hd->h_version) { in dlm_process_incoming_buffer()
906 le32_to_cpu(hd->h_version), nodeid); in dlm_process_incoming_buffer()
911 len -= msglen; in dlm_process_incoming_buffer()
932 switch (node->version) { in dlm_midcomms_unack_msg_resend()
941 list_for_each_entry_rcu(mh, &node->send_queue, list) { in dlm_midcomms_unack_msg_resend()
942 if (!mh->committed) in dlm_midcomms_unack_msg_resend()
945 ret = dlm_lowcomms_resend_msg(mh->msg); in dlm_midcomms_unack_msg_resend()
948 mh->seq, node->nodeid); in dlm_midcomms_unack_msg_resend()
957 opts->o_header.h_cmd = DLM_OPTS; in dlm_fill_opts_header()
958 opts->o_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR); in dlm_fill_opts_header()
959 opts->o_header.h_nodeid = cpu_to_le32(dlm_our_nodeid()); in dlm_fill_opts_header()
960 opts->o_header.h_length = cpu_to_le16(DLM_MIDCOMMS_OPT_LEN + inner_len); in dlm_fill_opts_header()
961 opts->o_header.u.h_seq = cpu_to_le32(seq); in dlm_fill_opts_header()
968 atomic_inc(&mh->node->send_queue_cnt); in midcomms_new_msg_cb()
970 spin_lock_bh(&mh->node->send_queue_lock); in midcomms_new_msg_cb()
971 list_add_tail_rcu(&mh->list, &mh->node->send_queue); in midcomms_new_msg_cb()
972 spin_unlock_bh(&mh->node->send_queue_lock); in midcomms_new_msg_cb()
974 mh->seq = atomic_fetch_inc(&mh->node->seq_send); in midcomms_new_msg_cb()
989 mh->opts = opts; in dlm_midcomms_get_msg_3_2()
992 dlm_fill_opts_header(opts, len, mh->seq); in dlm_midcomms_get_msg_3_2()
995 mh->inner_p = (const union dlm_packet *)*ppc; in dlm_midcomms_get_msg_3_2()
1016 WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_TX, &node->flags)); in dlm_midcomms_get_mhandle()
1022 mh->committed = false; in dlm_midcomms_get_mhandle()
1023 mh->ack_rcv = NULL; in dlm_midcomms_get_mhandle()
1024 mh->idx = idx; in dlm_midcomms_get_mhandle()
1025 mh->node = node; in dlm_midcomms_get_mhandle()
1027 switch (node->version) { in dlm_midcomms_get_mhandle()
1037 /* send ack back if necessary */ in dlm_midcomms_get_mhandle()
1052 mh->msg = msg; in dlm_midcomms_get_mhandle()
1056 * nodes_srcu using mh->idx which is assumed in dlm_midcomms_get_mhandle()
1070 switch (mh->inner_p->header.h_cmd) { in dlm_midcomms_commit_msg_3_2_trace()
1072 trace_dlm_send_message(mh->node->nodeid, mh->seq, in dlm_midcomms_commit_msg_3_2_trace()
1073 &mh->inner_p->message, in dlm_midcomms_commit_msg_3_2_trace()
1077 trace_dlm_send_rcom(mh->node->nodeid, mh->seq, in dlm_midcomms_commit_msg_3_2_trace()
1078 &mh->inner_p->rcom); in dlm_midcomms_commit_msg_3_2_trace()
1090 mh->opts->o_nextcmd = mh->inner_p->header.h_cmd; in dlm_midcomms_commit_msg_3_2()
1091 mh->committed = true; in dlm_midcomms_commit_msg_3_2()
1093 dlm_lowcomms_commit_msg(mh->msg); in dlm_midcomms_commit_msg_3_2()
1104 switch (mh->node->version) { in dlm_midcomms_commit_mhandle()
1106 srcu_read_unlock(&nodes_srcu, mh->idx); in dlm_midcomms_commit_mhandle()
1108 dlm_lowcomms_commit_msg(mh->msg); in dlm_midcomms_commit_mhandle()
1109 dlm_lowcomms_put_msg(mh->msg); in dlm_midcomms_commit_mhandle()
1116 * an ack back which releases the mhandle and we in dlm_midcomms_commit_mhandle()
1121 srcu_read_unlock(&nodes_srcu, mh->idx); in dlm_midcomms_commit_mhandle()
1125 srcu_read_unlock(&nodes_srcu, mh->idx); in dlm_midcomms_commit_mhandle()
1156 WARN_ON_ONCE(atomic_read(&node->send_queue_cnt)); in midcomms_node_release()
1169 dlm_delete_debug_comms_file(node->debugfs); in dlm_midcomms_exit()
1172 hlist_del_rcu(&node->hlist); in dlm_midcomms_exit()
1175 call_srcu(&nodes_srcu, &node->rcu, midcomms_node_release); in dlm_midcomms_exit()
1185 spin_lock_bh(&node->state_lock); in dlm_act_fin_ack_rcv()
1186 pr_debug("receive active fin ack from node %d with state %s\n", in dlm_act_fin_ack_rcv()
1187 node->nodeid, dlm_state_str(node->state)); in dlm_act_fin_ack_rcv()
1189 switch (node->state) { in dlm_act_fin_ack_rcv()
1191 node->state = DLM_FIN_WAIT2; in dlm_act_fin_ack_rcv()
1193 node->nodeid, dlm_state_str(node->state)); in dlm_act_fin_ack_rcv()
1198 node->nodeid, dlm_state_str(node->state)); in dlm_act_fin_ack_rcv()
1202 wake_up(&node->shutdown_wait); in dlm_act_fin_ack_rcv()
1205 spin_unlock_bh(&node->state_lock); in dlm_act_fin_ack_rcv()
1207 __func__, node->state); in dlm_act_fin_ack_rcv()
1211 spin_unlock_bh(&node->state_lock); in dlm_act_fin_ack_rcv()
1226 spin_lock_bh(&node->state_lock); in dlm_midcomms_add_member()
1227 if (!node->users) { in dlm_midcomms_add_member()
1229 node->nodeid, dlm_state_str(node->state)); in dlm_midcomms_add_member()
1230 switch (node->state) { in dlm_midcomms_add_member()
1234 node->state = DLM_ESTABLISHED; in dlm_midcomms_add_member()
1236 node->nodeid, dlm_state_str(node->state)); in dlm_midcomms_add_member()
1239 /* some invalid state passive shutdown in dlm_midcomms_add_member()
1243 log_print("reset node %d because shutdown stuck", in dlm_midcomms_add_member()
1244 node->nodeid); in dlm_midcomms_add_member()
1247 node->state = DLM_ESTABLISHED; in dlm_midcomms_add_member()
1252 node->users++; in dlm_midcomms_add_member()
1253 pr_debug("node %d users inc count %d\n", nodeid, node->users); in dlm_midcomms_add_member()
1254 spin_unlock_bh(&node->state_lock); in dlm_midcomms_add_member()
1272 spin_lock_bh(&node->state_lock); in dlm_midcomms_remove_member()
1277 if (!node->users) { in dlm_midcomms_remove_member()
1278 spin_unlock_bh(&node->state_lock); in dlm_midcomms_remove_member()
1283 node->users--; in dlm_midcomms_remove_member()
1284 pr_debug("node %d users dec count %d\n", nodeid, node->users); in dlm_midcomms_remove_member()
1290 if (node->users == 0) { in dlm_midcomms_remove_member()
1292 node->nodeid, dlm_state_str(node->state)); in dlm_midcomms_remove_member()
1293 switch (node->state) { in dlm_midcomms_remove_member()
1297 /* passive shutdown DLM_LAST_ACK case 2 */ in dlm_midcomms_remove_member()
1298 node->state = DLM_LAST_ACK; in dlm_midcomms_remove_member()
1300 node->nodeid, dlm_state_str(node->state)); in dlm_midcomms_remove_member()
1301 set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags); in dlm_midcomms_remove_member()
1312 __func__, node->state); in dlm_midcomms_remove_member()
1316 spin_unlock_bh(&node->state_lock); in dlm_midcomms_remove_member()
1329 ret = wait_event_timeout(node->shutdown_wait, in dlm_midcomms_version_wait()
1330 node->version != DLM_VERSION_NOT_SET || in dlm_midcomms_version_wait()
1331 node->state == DLM_CLOSED || in dlm_midcomms_version_wait()
1332 test_bit(DLM_NODE_FLAG_CLOSE, &node->flags), in dlm_midcomms_version_wait()
1334 if (!ret || test_bit(DLM_NODE_FLAG_CLOSE, &node->flags)) in dlm_midcomms_version_wait()
1336 node->nodeid, dlm_state_str(node->state)); in dlm_midcomms_version_wait()
1347 switch (node->version) { in midcomms_shutdown()
1354 spin_lock_bh(&node->state_lock); in midcomms_shutdown()
1355 pr_debug("receive active shutdown for node %d with state %s\n", in midcomms_shutdown()
1356 node->nodeid, dlm_state_str(node->state)); in midcomms_shutdown()
1357 switch (node->state) { in midcomms_shutdown()
1359 node->state = DLM_FIN_WAIT1; in midcomms_shutdown()
1361 node->nodeid, dlm_state_str(node->state)); in midcomms_shutdown()
1373 spin_unlock_bh(&node->state_lock); in midcomms_shutdown()
1379 ret = wait_event_timeout(node->shutdown_wait, in midcomms_shutdown()
1380 node->state == DLM_CLOSED || in midcomms_shutdown()
1381 test_bit(DLM_NODE_FLAG_CLOSE, &node->flags), in midcomms_shutdown()
1384 pr_debug("active shutdown timed out for node %d with state %s\n", in midcomms_shutdown()
1385 node->nodeid, dlm_state_str(node->state)); in midcomms_shutdown()
1387 pr_debug("active shutdown done for node %d with state %s\n", in midcomms_shutdown()
1388 node->nodeid, dlm_state_str(node->state)); in midcomms_shutdown()
1424 /* let shutdown waiters leave */ in dlm_midcomms_close()
1425 set_bit(DLM_NODE_FLAG_CLOSE, &node->flags); in dlm_midcomms_close()
1426 wake_up(&node->shutdown_wait); in dlm_midcomms_close()
1442 dlm_delete_debug_comms_file(node->debugfs); in dlm_midcomms_close()
1445 hlist_del_rcu(&node->hlist); in dlm_midcomms_close()
1457 call_srcu(&nodes_srcu, &node->rcu, midcomms_node_release); in dlm_midcomms_close()
1472 struct dlm_header *h = rd->buf; in midcomms_new_rawmsg_cb()
1474 switch (h->h_version) { in midcomms_new_rawmsg_cb()
1478 switch (h->h_cmd) { in midcomms_new_rawmsg_cb()
1480 if (!h->u.h_seq) in midcomms_new_rawmsg_cb()
1481 h->u.h_seq = cpu_to_le32(atomic_fetch_inc(&rd->node->seq_send)); in midcomms_new_rawmsg_cb()
1500 msg = dlm_lowcomms_new_msg(node->nodeid, buflen, &msgbuf, in dlm_midcomms_rawmsg_send()
1503 return -ENOMEM; in dlm_midcomms_rawmsg_send()