Lines Matching +full:mode +full:- +full:recovery

1 // SPDX-License-Identifier: GPL-2.0-or-later
13 #include "md-bitmap.h"
14 #include "md-cluster.h"
27 void (*bast)(void *arg, int mode); /* blocking AST function pointer*/
29 int mode; member
124 res->sync_locking_done = true; in sync_ast()
125 wake_up(&res->sync_locking); in sync_ast()
128 static int dlm_lock_sync(struct dlm_lock_resource *res, int mode) in dlm_lock_sync() argument
132 ret = dlm_lock(res->ls, mode, &res->lksb, in dlm_lock_sync()
133 res->flags, res->name, strlen(res->name), in dlm_lock_sync()
134 0, sync_ast, res, res->bast); in dlm_lock_sync()
137 ret = wait_event_timeout(res->sync_locking, res->sync_locking_done, in dlm_lock_sync()
139 res->sync_locking_done = false; in dlm_lock_sync()
141 pr_err("locking DLM '%s' timeout!\n", res->name); in dlm_lock_sync()
142 return -EBUSY; in dlm_lock_sync()
144 if (res->lksb.sb_status == 0) in dlm_lock_sync()
145 res->mode = mode; in dlm_lock_sync()
146 return res->lksb.sb_status; in dlm_lock_sync()
158 static int dlm_lock_sync_interruptible(struct dlm_lock_resource *res, int mode, in dlm_lock_sync_interruptible() argument
163 ret = dlm_lock(res->ls, mode, &res->lksb, in dlm_lock_sync_interruptible()
164 res->flags, res->name, strlen(res->name), in dlm_lock_sync_interruptible()
165 0, sync_ast, res, res->bast); in dlm_lock_sync_interruptible()
169 wait_event(res->sync_locking, res->sync_locking_done in dlm_lock_sync_interruptible()
171 || test_bit(MD_CLOSING, &mddev->flags)); in dlm_lock_sync_interruptible()
172 if (!res->sync_locking_done) { in dlm_lock_sync_interruptible()
178 ret = dlm_unlock(res->ls, res->lksb.sb_lkid, DLM_LKF_CANCEL, in dlm_lock_sync_interruptible()
179 &res->lksb, res); in dlm_lock_sync_interruptible()
180 res->sync_locking_done = false; in dlm_lock_sync_interruptible()
183 "%s return %d\n", res->name, ret); in dlm_lock_sync_interruptible()
184 return -EPERM; in dlm_lock_sync_interruptible()
186 res->sync_locking_done = false; in dlm_lock_sync_interruptible()
187 if (res->lksb.sb_status == 0) in dlm_lock_sync_interruptible()
188 res->mode = mode; in dlm_lock_sync_interruptible()
189 return res->lksb.sb_status; in dlm_lock_sync_interruptible()
193 char *name, void (*bastfn)(void *arg, int mode), int with_lvb) in lockres_init() argument
197 struct md_cluster_info *cinfo = mddev->cluster_info; in lockres_init()
202 init_waitqueue_head(&res->sync_locking); in lockres_init()
203 res->sync_locking_done = false; in lockres_init()
204 res->ls = cinfo->lockspace; in lockres_init()
205 res->mddev = mddev; in lockres_init()
206 res->mode = DLM_LOCK_IV; in lockres_init()
208 res->name = kzalloc(namelen + 1, GFP_KERNEL); in lockres_init()
209 if (!res->name) { in lockres_init()
210 pr_err("md-cluster: Unable to allocate resource name for resource %s\n", name); in lockres_init()
213 strscpy(res->name, name, namelen + 1); in lockres_init()
215 res->lksb.sb_lvbptr = kzalloc(LVB_SIZE, GFP_KERNEL); in lockres_init()
216 if (!res->lksb.sb_lvbptr) { in lockres_init()
217 pr_err("md-cluster: Unable to allocate LVB for resource %s\n", name); in lockres_init()
220 res->flags = DLM_LKF_VALBLK; in lockres_init()
224 res->bast = bastfn; in lockres_init()
226 res->flags |= DLM_LKF_EXPEDITE; in lockres_init()
230 pr_err("md-cluster: Unable to lock NL on new lock resource %s\n", name); in lockres_init()
233 res->flags &= ~DLM_LKF_EXPEDITE; in lockres_init()
234 res->flags |= DLM_LKF_CONVERT; in lockres_init()
238 kfree(res->lksb.sb_lvbptr); in lockres_init()
239 kfree(res->name); in lockres_init()
255 ret = dlm_unlock(res->ls, res->lksb.sb_lkid, DLM_LKF_FORCEUNLOCK, in lockres_free()
256 &res->lksb, res); in lockres_free()
258 pr_err("failed to unlock %s return %d\n", res->name, ret); in lockres_free()
260 wait_event(res->sync_locking, res->sync_locking_done); in lockres_free()
262 kfree(res->name); in lockres_free()
263 kfree(res->lksb.sb_lvbptr); in lockres_free()
272 ri = (struct resync_info *)lockres->lksb.sb_lvbptr; in add_resync_info()
273 ri->lo = cpu_to_le64(lo); in add_resync_info()
274 ri->hi = cpu_to_le64(hi); in add_resync_info()
281 struct md_cluster_info *cinfo = mddev->cluster_info; in read_resync_info()
285 memcpy(&ri, lockres->lksb.sb_lvbptr, sizeof(struct resync_info)); in read_resync_info()
287 cinfo->suspend_hi = le64_to_cpu(ri.hi); in read_resync_info()
288 cinfo->suspend_lo = le64_to_cpu(ri.lo); in read_resync_info()
297 struct mddev *mddev = thread->mddev; in recover_bitmaps()
298 struct md_cluster_info *cinfo = mddev->cluster_info; in recover_bitmaps()
304 while (cinfo->recovery_map) { in recover_bitmaps()
305 slot = fls64((u64)cinfo->recovery_map) - 1; in recover_bitmaps()
310 pr_err("md-cluster: Cannot initialize bitmaps\n"); in recover_bitmaps()
316 pr_err("md-cluster: Could not DLM lock %s: %d\n", in recover_bitmaps()
320 ret = mddev->bitmap_ops->copy_from_slot(mddev, slot, &lo, &hi, true); in recover_bitmaps()
322 pr_err("md-cluster: Could not copy data from bitmap %d\n", slot); in recover_bitmaps()
327 spin_lock_irq(&cinfo->suspend_lock); in recover_bitmaps()
328 cinfo->suspend_hi = 0; in recover_bitmaps()
329 cinfo->suspend_lo = 0; in recover_bitmaps()
330 cinfo->suspend_from = -1; in recover_bitmaps()
331 spin_unlock_irq(&cinfo->suspend_lock); in recover_bitmaps()
334 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && in recover_bitmaps()
335 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in recover_bitmaps()
336 mddev->reshape_position != MaxSector) in recover_bitmaps()
337 md_wakeup_thread(mddev->sync_thread); in recover_bitmaps()
340 if (lo < mddev->recovery_cp) in recover_bitmaps()
341 mddev->recovery_cp = lo; in recover_bitmaps()
344 if (mddev->recovery_cp != MaxSector) { in recover_bitmaps()
350 &mddev->recovery); in recover_bitmaps()
351 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in recover_bitmaps()
352 md_wakeup_thread(mddev->thread); in recover_bitmaps()
357 clear_bit(slot, &cinfo->recovery_map); in recover_bitmaps()
364 struct md_cluster_info *cinfo = mddev->cluster_info; in recover_prep()
365 set_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state); in recover_prep()
370 struct md_cluster_info *cinfo = mddev->cluster_info; in __recover_slot()
372 set_bit(slot, &cinfo->recovery_map); in __recover_slot()
373 if (!cinfo->recovery_thread) { in __recover_slot()
374 rcu_assign_pointer(cinfo->recovery_thread, in __recover_slot()
376 if (!cinfo->recovery_thread) { in __recover_slot()
377 pr_warn("md-cluster: Could not create recovery thread\n"); in __recover_slot()
381 md_wakeup_thread(cinfo->recovery_thread); in __recover_slot()
387 struct md_cluster_info *cinfo = mddev->cluster_info; in recover_slot()
389 pr_info("md-cluster: %s Node %d/%d down. My slot: %d. Initiating recovery.\n", in recover_slot()
390 mddev->bitmap_info.cluster_name, in recover_slot()
391 slot->nodeid, slot->slot, in recover_slot()
392 cinfo->slot_number); in recover_slot()
394 * cluster-md begins with 0 */ in recover_slot()
395 __recover_slot(mddev, slot->slot - 1); in recover_slot()
403 struct md_cluster_info *cinfo = mddev->cluster_info; in recover_done()
405 cinfo->slot_number = our_slot; in recover_done()
408 if (test_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state)) { in recover_done()
409 complete(&cinfo->completion); in recover_done()
410 clear_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state); in recover_done()
412 clear_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state); in recover_done()
415 /* the ops is called when node join the cluster, and do lock recovery
428 static void ack_bast(void *arg, int mode) in ack_bast() argument
431 struct md_cluster_info *cinfo = res->mddev->cluster_info; in ack_bast()
433 if (mode == DLM_LOCK_EX) { in ack_bast()
434 if (test_bit(MD_CLUSTER_ALREADY_IN_CLUSTER, &cinfo->state)) in ack_bast()
435 md_wakeup_thread(cinfo->recv_thread); in ack_bast()
437 set_bit(MD_CLUSTER_PENDING_RECV_EVENT, &cinfo->state); in ack_bast()
443 struct md_cluster_info *cinfo = mddev->cluster_info; in remove_suspend_info()
444 mddev->pers->quiesce(mddev, 1); in remove_suspend_info()
445 spin_lock_irq(&cinfo->suspend_lock); in remove_suspend_info()
446 cinfo->suspend_hi = 0; in remove_suspend_info()
447 cinfo->suspend_lo = 0; in remove_suspend_info()
448 spin_unlock_irq(&cinfo->suspend_lock); in remove_suspend_info()
449 mddev->pers->quiesce(mddev, 0); in remove_suspend_info()
455 struct md_cluster_info *cinfo = mddev->cluster_info; in process_suspend_info()
461 * clear the REMOTE flag since resync or recovery is finished in process_suspend_info()
464 clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery); in process_suspend_info()
466 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in process_suspend_info()
467 clear_bit(MD_CLUSTER_WAITING_FOR_SYNC, &cinfo->state); in process_suspend_info()
468 md_wakeup_thread(mddev->thread); in process_suspend_info()
473 if (rdev->raid_disk > -1 && !test_bit(Faulty, &rdev->flags)) { in process_suspend_info()
474 sb = page_address(rdev->sb_page); in process_suspend_info()
499 if (sb && !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) in process_suspend_info()
500 mddev->bitmap_ops->sync_with_cluster(mddev, cinfo->sync_low, in process_suspend_info()
501 cinfo->sync_hi, lo, hi); in process_suspend_info()
502 cinfo->sync_low = lo; in process_suspend_info()
503 cinfo->sync_hi = hi; in process_suspend_info()
505 mddev->pers->quiesce(mddev, 1); in process_suspend_info()
506 spin_lock_irq(&cinfo->suspend_lock); in process_suspend_info()
507 cinfo->suspend_from = slot; in process_suspend_info()
508 cinfo->suspend_lo = lo; in process_suspend_info()
509 cinfo->suspend_hi = hi; in process_suspend_info()
510 spin_unlock_irq(&cinfo->suspend_lock); in process_suspend_info()
511 mddev->pers->quiesce(mddev, 0); in process_suspend_info()
517 struct md_cluster_info *cinfo = mddev->cluster_info; in process_add_new_disk()
525 sprintf(disk_uuid + len, "%pU", cmsg->uuid); in process_add_new_disk()
526 snprintf(raid_slot, 16, "RAID_DISK=%d", le32_to_cpu(cmsg->raid_slot)); in process_add_new_disk()
528 init_completion(&cinfo->newdisk_completion); in process_add_new_disk()
529 set_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state); in process_add_new_disk()
530 kobject_uevent_env(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE, envp); in process_add_new_disk()
531 if (!wait_for_completion_timeout(&cinfo->newdisk_completion, in process_add_new_disk()
533 pr_err("md-cluster(%s:%d): timeout on a new disk adding\n", in process_add_new_disk()
535 res = -1; in process_add_new_disk()
537 clear_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state); in process_add_new_disk()
538 set_bit(MD_CLUSTER_WAITING_FOR_SYNC, &cinfo->state); in process_add_new_disk()
547 struct md_cluster_info *cinfo = mddev->cluster_info; in process_metadata_update()
548 mddev->good_device_nr = le32_to_cpu(msg->raid_slot); in process_metadata_update()
550 dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR); in process_metadata_update()
553 thread = rcu_dereference_protected(mddev->thread, true); in process_metadata_update()
554 wait_event(thread->wqueue, in process_metadata_update()
556 test_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state)); in process_metadata_update()
557 md_reload_sb(mddev, mddev->good_device_nr); in process_metadata_update()
567 rdev = md_find_rdev_nr_rcu(mddev, le32_to_cpu(msg->raid_slot)); in process_remove_disk()
569 set_bit(ClusterRemove, &rdev->flags); in process_remove_disk()
570 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in process_remove_disk()
571 md_wakeup_thread(mddev->thread); in process_remove_disk()
575 __func__, __LINE__, le32_to_cpu(msg->raid_slot)); in process_remove_disk()
584 rdev = md_find_rdev_nr_rcu(mddev, le32_to_cpu(msg->raid_slot)); in process_readd_disk()
585 if (rdev && test_bit(Faulty, &rdev->flags)) in process_readd_disk()
586 clear_bit(Faulty, &rdev->flags); in process_readd_disk()
589 __func__, __LINE__, le32_to_cpu(msg->raid_slot)); in process_readd_disk()
597 if (WARN(mddev->cluster_info->slot_number - 1 == le32_to_cpu(msg->slot), in process_recvd_msg()
598 "node %d received its own msg\n", le32_to_cpu(msg->slot))) in process_recvd_msg()
599 return -1; in process_recvd_msg()
600 switch (le32_to_cpu(msg->type)) { in process_recvd_msg()
605 set_capacity_and_notify(mddev->gendisk, mddev->array_sectors); in process_recvd_msg()
608 clear_bit(MD_CLUSTER_WAITING_FOR_SYNC, &mddev->cluster_info->state); in process_recvd_msg()
611 set_bit(MD_RESYNCING_REMOTE, &mddev->recovery); in process_recvd_msg()
612 process_suspend_info(mddev, le32_to_cpu(msg->slot), in process_recvd_msg()
613 le64_to_cpu(msg->low), in process_recvd_msg()
614 le64_to_cpu(msg->high)); in process_recvd_msg()
618 ret = -1; in process_recvd_msg()
627 __recover_slot(mddev, le32_to_cpu(msg->slot)); in process_recvd_msg()
630 if (le64_to_cpu(msg->high) != mddev->pers->size(mddev, 0, 0)) in process_recvd_msg()
631 ret = mddev->bitmap_ops->resize(mddev, in process_recvd_msg()
632 le64_to_cpu(msg->high), in process_recvd_msg()
636 ret = -1; in process_recvd_msg()
638 __func__, __LINE__, msg->slot); in process_recvd_msg()
648 struct md_cluster_info *cinfo = thread->mddev->cluster_info; in recv_daemon()
649 struct dlm_lock_resource *ack_lockres = cinfo->ack_lockres; in recv_daemon()
650 struct dlm_lock_resource *message_lockres = cinfo->message_lockres; in recv_daemon()
654 mutex_lock(&cinfo->recv_mutex); in recv_daemon()
658 mutex_unlock(&cinfo->recv_mutex); in recv_daemon()
663 memcpy(&msg, message_lockres->lksb.sb_lvbptr, sizeof(struct cluster_msg)); in recv_daemon()
664 ret = process_recvd_msg(thread->mddev, &msg); in recv_daemon()
672 /*up-convert to PR on message_lockres*/ in recv_daemon()
685 mutex_unlock(&cinfo->recv_mutex); in recv_daemon()
696 error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX); in lock_token()
698 pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n", in lock_token()
702 mutex_lock(&cinfo->recv_mutex); in lock_token()
713 struct mddev *mddev = cinfo->mddev; in lock_comm()
722 &cinfo->state)) { in lock_comm()
724 &cinfo->state); in lock_comm()
726 md_wakeup_thread(mddev->thread); in lock_comm()
730 wait_event(cinfo->wait, in lock_comm()
731 !test_and_set_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state)); in lock_comm()
734 clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state); in lock_comm()
740 WARN_ON(cinfo->token_lockres->mode != DLM_LOCK_EX); in unlock_comm()
741 mutex_unlock(&cinfo->recv_mutex); in unlock_comm()
742 dlm_unlock_sync(cinfo->token_lockres); in unlock_comm()
743 clear_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state); in unlock_comm()
744 wake_up(&cinfo->wait); in unlock_comm()
751 * 1. Grabs the message lockresource in EX mode
762 int slot = cinfo->slot_number - 1; in __sendmsg()
764 cmsg->slot = cpu_to_le32(slot); in __sendmsg()
766 error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_EX); in __sendmsg()
768 pr_err("md-cluster: failed to get EX on MESSAGE (%d)\n", error); in __sendmsg()
772 memcpy(cinfo->message_lockres->lksb.sb_lvbptr, (void *)cmsg, in __sendmsg()
774 /*down-convert EX to CW on Message*/ in __sendmsg()
775 error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_CW); in __sendmsg()
777 pr_err("md-cluster: failed to convert EX to CW on MESSAGE(%d)\n", in __sendmsg()
782 /*up-convert CR to EX on Ack*/ in __sendmsg()
783 error = dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_EX); in __sendmsg()
785 pr_err("md-cluster: failed to convert CR to EX on ACK(%d)\n", in __sendmsg()
790 /*down-convert EX to CR on Ack*/ in __sendmsg()
791 error = dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR); in __sendmsg()
793 pr_err("md-cluster: failed to convert EX to CR on ACK(%d)\n", in __sendmsg()
799 while ((unlock_error = dlm_unlock_sync(cinfo->message_lockres))) in __sendmsg()
800 pr_err("md-cluster: failed convert to NL on MESSAGE(%d)\n", in __sendmsg()
821 struct md_cluster_info *cinfo = mddev->cluster_info; in gather_all_resync_info()
833 return -ENOMEM; in gather_all_resync_info()
834 if (i == (cinfo->slot_number - 1)) { in gather_all_resync_info()
839 bm_lockres->flags |= DLM_LKF_NOQUEUE; in gather_all_resync_info()
841 if (ret == -EAGAIN) { in gather_all_resync_info()
845 (unsigned long long) cinfo->suspend_lo, in gather_all_resync_info()
846 (unsigned long long) cinfo->suspend_hi, in gather_all_resync_info()
848 cinfo->suspend_from = i; in gather_all_resync_info()
859 /* Read the disk bitmap sb and check if it needs recovery */ in gather_all_resync_info()
860 ret = mddev->bitmap_ops->copy_from_slot(mddev, i, &lo, &hi, false); in gather_all_resync_info()
862 pr_warn("md-cluster: Could not gather bitmaps from slot %d", i); in gather_all_resync_info()
866 if ((hi > 0) && (lo < mddev->recovery_cp)) { in gather_all_resync_info()
867 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in gather_all_resync_info()
868 mddev->recovery_cp = lo; in gather_all_resync_info()
886 return -ENOMEM; in join()
888 INIT_LIST_HEAD(&cinfo->suspend_list); in join()
889 spin_lock_init(&cinfo->suspend_lock); in join()
890 init_completion(&cinfo->completion); in join()
891 set_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state); in join()
892 init_waitqueue_head(&cinfo->wait); in join()
893 mutex_init(&cinfo->recv_mutex); in join()
895 mddev->cluster_info = cinfo; in join()
896 cinfo->mddev = mddev; in join()
899 sprintf(str, "%pU", mddev->uuid); in join()
900 ret = dlm_new_lockspace(str, mddev->bitmap_info.cluster_name, in join()
902 &ops_rv, &cinfo->lockspace); in join()
905 wait_for_completion(&cinfo->completion); in join()
906 if (nodes < cinfo->slot_number) { in join()
907 pr_err("md-cluster: Slot allotted(%d) is greater than available slots(%d).", in join()
908 cinfo->slot_number, nodes); in join()
909 ret = -ERANGE; in join()
913 ret = -ENOMEM; in join()
914 rcu_assign_pointer(cinfo->recv_thread, in join()
916 if (!cinfo->recv_thread) { in join()
917 pr_err("md-cluster: cannot allocate memory for recv_thread!\n"); in join()
920 cinfo->message_lockres = lockres_init(mddev, "message", NULL, 1); in join()
921 if (!cinfo->message_lockres) in join()
923 cinfo->token_lockres = lockres_init(mddev, "token", NULL, 0); in join()
924 if (!cinfo->token_lockres) in join()
926 cinfo->no_new_dev_lockres = lockres_init(mddev, "no-new-dev", NULL, 0); in join()
927 if (!cinfo->no_new_dev_lockres) in join()
930 ret = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX); in join()
932 ret = -EAGAIN; in join()
933 pr_err("md-cluster: can't join cluster to avoid lock issue\n"); in join()
936 cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0); in join()
937 if (!cinfo->ack_lockres) { in join()
938 ret = -ENOMEM; in join()
942 if (dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR)) in join()
943 pr_err("md-cluster: failed to get a sync CR lock on ACK!(%d)\n", in join()
945 dlm_unlock_sync(cinfo->token_lockres); in join()
946 /* get sync CR lock on no-new-dev. */ in join()
947 if (dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR)) in join()
948 pr_err("md-cluster: failed to get a sync CR lock on no-new-dev!(%d)\n", ret); in join()
951 pr_info("md-cluster: Joined cluster %s slot %d\n", str, cinfo->slot_number); in join()
952 snprintf(str, 64, "bitmap%04d", cinfo->slot_number - 1); in join()
953 cinfo->bitmap_lockres = lockres_init(mddev, str, NULL, 1); in join()
954 if (!cinfo->bitmap_lockres) { in join()
955 ret = -ENOMEM; in join()
958 if (dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW)) { in join()
960 ret = -EINVAL; in join()
964 cinfo->resync_lockres = lockres_init(mddev, "resync", NULL, 0); in join()
965 if (!cinfo->resync_lockres) { in join()
966 ret = -ENOMEM; in join()
972 set_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state); in join()
973 md_unregister_thread(mddev, &cinfo->recovery_thread); in join()
974 md_unregister_thread(mddev, &cinfo->recv_thread); in join()
975 lockres_free(cinfo->message_lockres); in join()
976 lockres_free(cinfo->token_lockres); in join()
977 lockres_free(cinfo->ack_lockres); in join()
978 lockres_free(cinfo->no_new_dev_lockres); in join()
979 lockres_free(cinfo->resync_lockres); in join()
980 lockres_free(cinfo->bitmap_lockres); in join()
981 if (cinfo->lockspace) in join()
982 dlm_release_lockspace(cinfo->lockspace, 2); in join()
983 mddev->cluster_info = NULL; in join()
990 struct md_cluster_info *cinfo = mddev->cluster_info; in load_bitmaps()
994 pr_err("md-cluster: failed to gather all resyn infos\n"); in load_bitmaps()
995 set_bit(MD_CLUSTER_ALREADY_IN_CLUSTER, &cinfo->state); in load_bitmaps()
997 if (test_and_clear_bit(MD_CLUSTER_PENDING_RECV_EVENT, &cinfo->state)) in load_bitmaps()
998 md_wakeup_thread(cinfo->recv_thread); in load_bitmaps()
1003 struct md_cluster_info *cinfo = mddev->cluster_info; in resync_bitmap()
1017 struct md_cluster_info *cinfo = mddev->cluster_info; in leave()
1030 if ((cinfo->slot_number > 0 && mddev->recovery_cp != MaxSector) || in leave()
1031 (mddev->reshape_position != MaxSector && in leave()
1032 test_bit(MD_CLOSING, &mddev->flags))) in leave()
1035 set_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state); in leave()
1036 md_unregister_thread(mddev, &cinfo->recovery_thread); in leave()
1037 md_unregister_thread(mddev, &cinfo->recv_thread); in leave()
1038 lockres_free(cinfo->message_lockres); in leave()
1039 lockres_free(cinfo->token_lockres); in leave()
1040 lockres_free(cinfo->ack_lockres); in leave()
1041 lockres_free(cinfo->no_new_dev_lockres); in leave()
1042 lockres_free(cinfo->resync_lockres); in leave()
1043 lockres_free(cinfo->bitmap_lockres); in leave()
1045 dlm_release_lockspace(cinfo->lockspace, 2); in leave()
1051 * DLM starts the slot numbers from 1, wheras cluster-md
1056 struct md_cluster_info *cinfo = mddev->cluster_info; in slot_number()
1058 return cinfo->slot_number - 1; in slot_number()
1064 * If it is already locked, token is in EX mode, and hence lock_token()
1069 struct md_cluster_info *cinfo = mddev->cluster_info; in metadata_update_start()
1077 &cinfo->state); in metadata_update_start()
1079 md_wakeup_thread(mddev->thread); in metadata_update_start()
1081 wait_event(cinfo->wait, in metadata_update_start()
1082 !test_and_set_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state) || in metadata_update_start()
1083 test_and_clear_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state)); in metadata_update_start()
1086 if (cinfo->token_lockres->mode == DLM_LOCK_EX) { in metadata_update_start()
1087 clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state); in metadata_update_start()
1092 clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state); in metadata_update_start()
1098 struct md_cluster_info *cinfo = mddev->cluster_info; in metadata_update_finish()
1102 int raid_slot = -1; in metadata_update_finish()
1109 if (rdev->raid_disk > -1 && !test_bit(Faulty, &rdev->flags)) { in metadata_update_finish()
1110 raid_slot = rdev->desc_nr; in metadata_update_finish()
1117 pr_warn("md-cluster: No good device id found to send\n"); in metadata_update_finish()
1118 clear_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state); in metadata_update_finish()
1125 struct md_cluster_info *cinfo = mddev->cluster_info; in metadata_update_cancel()
1126 clear_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state); in metadata_update_cancel()
1132 struct md_cluster_info *cinfo = mddev->cluster_info; in update_bitmap_size()
1147 void *bitmap = mddev->bitmap; in resize_bitmaps()
1152 rv = mddev->bitmap_ops->get_stats(bitmap, &stats); in resize_bitmaps()
1165 for (i = 0; i < mddev->bitmap_info.nodes; i++) { in resize_bitmaps()
1169 if (i == md_cluster_ops->slot_number(mddev)) in resize_bitmaps()
1172 bitmap = mddev->bitmap_ops->get_from_slot(mddev, i); in resize_bitmaps()
1179 rv = mddev->bitmap_ops->get_stats(bitmap, &stats); in resize_bitmaps()
1192 bm_lockres->flags |= DLM_LKF_NOQUEUE; in resize_bitmaps()
1195 mddev->bitmap_ops->set_pages(bitmap, my_pages); in resize_bitmaps()
1204 mddev->bitmap_ops->free(bitmap); in resize_bitmaps()
1209 mddev->bitmap_ops->free(bitmap); in resize_bitmaps()
1211 return -1; in resize_bitmaps()
1219 int current_slot = md_cluster_ops->slot_number(mddev); in cluster_check_sync_size()
1220 int node_num = mddev->bitmap_info.nodes; in cluster_check_sync_size()
1223 void *bitmap = mddev->bitmap; in cluster_check_sync_size()
1229 rv = mddev->bitmap_ops->get_stats(bitmap, &stats); in cluster_check_sync_size()
1239 bitmap = mddev->bitmap_ops->get_from_slot(mddev, i); in cluster_check_sync_size()
1242 return -1; in cluster_check_sync_size()
1252 pr_err("md-cluster: Cannot initialize %s\n", str); in cluster_check_sync_size()
1253 mddev->bitmap_ops->free(bitmap); in cluster_check_sync_size()
1254 return -1; in cluster_check_sync_size()
1256 bm_lockres->flags |= DLM_LKF_NOQUEUE; in cluster_check_sync_size()
1259 mddev->bitmap_ops->update_sb(bitmap); in cluster_check_sync_size()
1262 rv = mddev->bitmap_ops->get_stats(bitmap, &stats); in cluster_check_sync_size()
1264 mddev->bitmap_ops->free(bitmap); in cluster_check_sync_size()
1271 mddev->bitmap_ops->free(bitmap); in cluster_check_sync_size()
1272 return -1; in cluster_check_sync_size()
1274 mddev->bitmap_ops->free(bitmap); in cluster_check_sync_size()
1277 return (my_sync_size == sync_size) ? 0 : -1; in cluster_check_sync_size()
1292 struct md_cluster_info *cinfo = mddev->cluster_info; in update_size()
1296 int raid_slot = -1; in update_size()
1307 if (rdev->raid_disk >= 0 && !test_bit(Faulty, &rdev->flags)) { in update_size()
1308 raid_slot = rdev->desc_nr; in update_size()
1326 pr_err("md-cluster: No good device id found to send\n"); in update_size()
1343 set_capacity_and_notify(mddev->gendisk, mddev->array_sectors); in update_size()
1346 ret = mddev->pers->resize(mddev, old_dev_sectors); in update_size()
1357 struct md_cluster_info *cinfo = mddev->cluster_info; in resync_start()
1358 return dlm_lock_sync_interruptible(cinfo->resync_lockres, DLM_LOCK_EX, mddev); in resync_start()
1363 struct md_cluster_info *cinfo = mddev->cluster_info; in resync_info_get()
1365 spin_lock_irq(&cinfo->suspend_lock); in resync_info_get()
1366 *lo = cinfo->suspend_lo; in resync_info_get()
1367 *hi = cinfo->suspend_hi; in resync_info_get()
1368 spin_unlock_irq(&cinfo->suspend_lock); in resync_info_get()
1373 struct md_cluster_info *cinfo = mddev->cluster_info; in resync_status_get()
1375 return test_bit(MD_CLUSTER_WAITING_FOR_SYNC, &cinfo->state); in resync_status_get()
1380 struct md_cluster_info *cinfo = mddev->cluster_info; in resync_start_notify()
1390 struct md_cluster_info *cinfo = mddev->cluster_info; in resync_info_update()
1396 memcpy(&ri, cinfo->bitmap_lockres->lksb.sb_lvbptr, sizeof(struct resync_info)); in resync_info_update()
1401 add_resync_info(cinfo->bitmap_lockres, lo, hi); in resync_info_update()
1402 /* Re-acquire the lock to refresh LVB */ in resync_info_update()
1403 dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW); in resync_info_update()
1410 * resync_finish (md_reap_sync_thread -> resync_finish) in resync_info_update()
1420 struct md_cluster_info *cinfo = mddev->cluster_info; in resync_finish()
1423 clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery); in resync_finish()
1429 if (!test_bit(MD_CLOSING, &mddev->flags)) in resync_finish()
1431 dlm_unlock_sync(cinfo->resync_lockres); in resync_finish()
1438 struct md_cluster_info *cinfo = mddev->cluster_info; in area_resyncing()
1442 test_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state)) in area_resyncing()
1445 spin_lock_irq(&cinfo->suspend_lock); in area_resyncing()
1446 if (hi > cinfo->suspend_lo && lo < cinfo->suspend_hi) in area_resyncing()
1448 spin_unlock_irq(&cinfo->suspend_lock); in area_resyncing()
1452 /* add_new_disk() - initiates a disk add
1458 struct md_cluster_info *cinfo = mddev->cluster_info; in add_new_disk()
1461 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); in add_new_disk()
1462 char *uuid = sb->device_uuid; in add_new_disk()
1467 cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); in add_new_disk()
1469 return -EAGAIN; in add_new_disk()
1475 cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE; in add_new_disk()
1476 ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX); in add_new_disk()
1477 cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE; in add_new_disk()
1479 if (ret == -EAGAIN) in add_new_disk()
1480 ret = -ENOENT; in add_new_disk()
1484 dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR); in add_new_disk()
1488 * md_wakeup_thread(mddev->thread) in add_new_disk()
1489 * -> conf->thread (raid1d) in add_new_disk()
1490 * -> md_check_recovery -> md_update_sb in add_new_disk()
1491 * -> metadata_update_start/finish in add_new_disk()
1497 set_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state); in add_new_disk()
1498 wake_up(&cinfo->wait); in add_new_disk()
1505 struct md_cluster_info *cinfo = mddev->cluster_info; in add_new_disk_cancel()
1506 clear_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state); in add_new_disk_cancel()
1512 struct md_cluster_info *cinfo = mddev->cluster_info; in new_disk_ack()
1514 if (!test_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state)) { in new_disk_ack()
1515 pr_warn("md-cluster(%s): Spurious cluster confirmation\n", mdname(mddev)); in new_disk_ack()
1516 return -EINVAL; in new_disk_ack()
1520 dlm_unlock_sync(cinfo->no_new_dev_lockres); in new_disk_ack()
1521 complete(&cinfo->newdisk_completion); in new_disk_ack()
1528 struct md_cluster_info *cinfo = mddev->cluster_info; in remove_disk()
1530 cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); in remove_disk()
1538 struct md_cluster_info *cinfo = mddev->cluster_info; in lock_all_bitmaps()
1540 cinfo->other_bitmap_lockres = in lock_all_bitmaps()
1541 kcalloc(mddev->bitmap_info.nodes - 1, in lock_all_bitmaps()
1543 if (!cinfo->other_bitmap_lockres) { in lock_all_bitmaps()
1549 for (slot = 0; slot < mddev->bitmap_info.nodes; slot++) { in lock_all_bitmaps()
1555 cinfo->other_bitmap_lockres[i] = lockres_init(mddev, str, NULL, 1); in lock_all_bitmaps()
1556 if (!cinfo->other_bitmap_lockres[i]) in lock_all_bitmaps()
1557 return -ENOMEM; in lock_all_bitmaps()
1559 cinfo->other_bitmap_lockres[i]->flags |= DLM_LKF_NOQUEUE; in lock_all_bitmaps()
1560 ret = dlm_lock_sync(cinfo->other_bitmap_lockres[i], DLM_LOCK_PW); in lock_all_bitmaps()
1562 held = -1; in lock_all_bitmaps()
1571 struct md_cluster_info *cinfo = mddev->cluster_info; in unlock_all_bitmaps()
1575 if (cinfo->other_bitmap_lockres) { in unlock_all_bitmaps()
1576 for (i = 0; i < mddev->bitmap_info.nodes - 1; i++) { in unlock_all_bitmaps()
1577 if (cinfo->other_bitmap_lockres[i]) { in unlock_all_bitmaps()
1578 lockres_free(cinfo->other_bitmap_lockres[i]); in unlock_all_bitmaps()
1581 kfree(cinfo->other_bitmap_lockres); in unlock_all_bitmaps()
1582 cinfo->other_bitmap_lockres = NULL; in unlock_all_bitmaps()
1591 struct mddev *mddev = rdev->mddev; in gather_bitmaps()
1592 struct md_cluster_info *cinfo = mddev->cluster_info; in gather_bitmaps()
1595 cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); in gather_bitmaps()
1600 for (sn = 0; sn < mddev->bitmap_info.nodes; sn++) { in gather_bitmaps()
1601 if (sn == (cinfo->slot_number - 1)) in gather_bitmaps()
1603 err = mddev->bitmap_ops->copy_from_slot(mddev, sn, &lo, &hi, false); in gather_bitmaps()
1605 pr_warn("md-cluster: Could not gather bitmaps from slot %d", sn); in gather_bitmaps()
1608 if ((hi > 0) && (lo < mddev->recovery_cp)) in gather_bitmaps()
1609 mddev->recovery_cp = lo; in gather_bitmaps()
1643 pr_warn("md-cluster: support raid1 and raid10 (limited support)\n"); in cluster_init()