Lines Matching full:migration
824 /* if there is a migration in progress, let the migration in dlm_get_lock_resource()
826 * of the MIGRATION mle: either the migrate finished or in dlm_get_lock_resource()
838 mig ? "MIGRATION" : "BLOCK"); in dlm_get_lock_resource()
1593 mlog(0, "migration mle was found (%u->%u)\n", in dlm_master_request_handler()
1826 " from %u for migration\n", in dlm_assert_master_handler()
1831 " from %u for migration, ignoring\n", in dlm_assert_master_handler()
1933 mlog(0, "finishing off migration of lockres %.*s, " in dlm_assert_master_handler()
1979 * ref given by the master / migration request message. in dlm_assert_master_handler()
2121 * prevent migration from starting while we're still asserting in dlm_assert_master_worker()
2122 * our dominance. The reserved ast delays migration. in dlm_assert_master_worker()
2127 "in the middle of migration. Skipping assert, " in dlm_assert_master_worker()
2146 /* Ok, we've asserted ourselves. Let's let migration start. */ in dlm_assert_master_worker()
2495 /* delay migration when the lockres is in MIGRATING state */ in dlm_is_lockres_migratable()
2499 /* delay migration when the lockres is in RECOCERING state */ in dlm_is_lockres_migratable()
2580 * add the migration mle to the list in dlm_migrate_lockres()
2637 * at this point, we have a migration target, an mle in dlm_migrate_lockres()
2647 /* call send_one_lockres with migration flag. in dlm_migrate_lockres()
2649 * migration is starting. */ in dlm_migrate_lockres()
2654 mlog(0, "migration to node %u failed with %d\n", in dlm_migrate_lockres()
2656 /* migration failed, detach and clean up mle */ in dlm_migrate_lockres()
2674 * will be the last one notified, ensuring that the migration in dlm_migrate_lockres()
2677 * master, so it is important that my recovery finds the migration in dlm_migrate_lockres()
2692 mlog(0, "%s:%.*s: timed out during migration\n", in dlm_migrate_lockres()
2697 mlog(0, "%s:%.*s: expected migration " in dlm_migrate_lockres()
2702 /* migration failed, detach and clean up mle */ in dlm_migrate_lockres()
2713 mlog(0, "%s:%.*s: caught signal during migration\n", in dlm_migrate_lockres()
2738 * but migration failed */ in dlm_migrate_lockres()
2757 * Called with the dlm spinlock held, may drop it to do migration, but
2890 mlog(ML_ERROR, "aha. migration target %u just went down\n", in dlm_mark_lockres_migrating()
2921 /* last step in the migration process.
3074 /* during the migration request we short-circuited in dlm_do_migrate_request()
3099 * the migration and this should be the only one found for those scanning the
3175 * when adding a migration mle, we can clear any other mles
3179 * the new migration mle. this way we can hold with the rule
3211 mlog(ML_ERROR, "migration error mle: " in dlm_add_migration_mle()
3233 "migration\n", dlm->name, in dlm_add_migration_mle()
3241 /* now add a migration mle to the tail of the list */ in dlm_add_migration_mle()
3364 /* Everything else is a MIGRATION mle */ in dlm_clean_master_list()
3366 /* The rule for MIGRATION mles is that the master in dlm_clean_master_list()
3381 "migration from %u, the MLE is " in dlm_clean_master_list()
3392 mlog(0, "%s: node %u died during migration from " in dlm_clean_master_list()
3427 * a reference after the migration completes */ in dlm_finish_migration()
3480 * this is integral to migration
3502 * also, if there is a pending migration on this lockres,
3505 * this is how we ensure that migration can proceed with no