Lines Matching full:migration

822 		/* if there is a migration in progress, let the migration  in dlm_get_lock_resource()
824 * of the MIGRATION mle: either the migrate finished or in dlm_get_lock_resource()
836 mig ? "MIGRATION" : "BLOCK"); in dlm_get_lock_resource()
1591 mlog(0, "migration mle was found (%u->%u)\n", in dlm_master_request_handler()
1824 " from %u for migration\n", in dlm_assert_master_handler()
1829 " from %u for migration, ignoring\n", in dlm_assert_master_handler()
1931 mlog(0, "finishing off migration of lockres %.*s, " in dlm_assert_master_handler()
1977 * ref given by the master / migration request message. in dlm_assert_master_handler()
2119 * prevent migration from starting while we're still asserting in dlm_assert_master_worker()
2120 * our dominance. The reserved ast delays migration. in dlm_assert_master_worker()
2125 "in the middle of migration. Skipping assert, " in dlm_assert_master_worker()
2144 /* Ok, we've asserted ourselves. Let's let migration start. */ in dlm_assert_master_worker()
2493 /* delay migration when the lockres is in MIGRATING state */ in dlm_is_lockres_migratable()
2497 /* delay migration when the lockres is in RECOCERING state */ in dlm_is_lockres_migratable()
2578 * add the migration mle to the list in dlm_migrate_lockres()
2635 * at this point, we have a migration target, an mle in dlm_migrate_lockres()
2645 /* call send_one_lockres with migration flag. in dlm_migrate_lockres()
2647 * migration is starting. */ in dlm_migrate_lockres()
2652 mlog(0, "migration to node %u failed with %d\n", in dlm_migrate_lockres()
2654 /* migration failed, detach and clean up mle */ in dlm_migrate_lockres()
2672 * will be the last one notified, ensuring that the migration in dlm_migrate_lockres()
2675 * master, so it is important that my recovery finds the migration in dlm_migrate_lockres()
2690 mlog(0, "%s:%.*s: timed out during migration\n", in dlm_migrate_lockres()
2695 mlog(0, "%s:%.*s: expected migration " in dlm_migrate_lockres()
2700 /* migration failed, detach and clean up mle */ in dlm_migrate_lockres()
2711 mlog(0, "%s:%.*s: caught signal during migration\n", in dlm_migrate_lockres()
2736 * but migration failed */ in dlm_migrate_lockres()
2755 * Called with the dlm spinlock held, may drop it to do migration, but
2888 mlog(ML_ERROR, "aha. migration target %u just went down\n", in dlm_mark_lockres_migrating()
2919 /* last step in the migration process.
3072 /* during the migration request we short-circuited in dlm_do_migrate_request()
3097 * the migration and this should be the only one found for those scanning the
3173 * when adding a migration mle, we can clear any other mles
3177 * the new migration mle. this way we can hold with the rule
3209 mlog(ML_ERROR, "migration error mle: " in dlm_add_migration_mle()
3231 "migration\n", dlm->name, in dlm_add_migration_mle()
3239 /* now add a migration mle to the tail of the list */ in dlm_add_migration_mle()
3362 /* Everything else is a MIGRATION mle */ in dlm_clean_master_list()
3364 /* The rule for MIGRATION mles is that the master in dlm_clean_master_list()
3379 "migration from %u, the MLE is " in dlm_clean_master_list()
3390 mlog(0, "%s: node %u died during migration from " in dlm_clean_master_list()
3425 * a reference after the migration completes */ in dlm_finish_migration()
3478 * this is integral to migration
3500 * also, if there is a pending migration on this lockres,
3503 * this is how we ensure that migration can proceed with no