Lines Matching +full:send +full:- +full:migration

1 // SPDX-License-Identifier: GPL-2.0-or-later
58 if (dlm != mle->dlm) in dlm_mle_equal()
61 if (namelen != mle->mnamelen || in dlm_mle_equal()
62 memcmp(name, mle->mname, namelen) != 0) in dlm_mle_equal()
118 case -EBADF: in dlm_is_host_down()
119 case -ECONNREFUSED: in dlm_is_host_down()
120 case -ENOTCONN: in dlm_is_host_down()
121 case -ECONNRESET: in dlm_is_host_down()
122 case -EPIPE: in dlm_is_host_down()
123 case -EHOSTDOWN: in dlm_is_host_down()
124 case -EHOSTUNREACH: in dlm_is_host_down()
125 case -ETIMEDOUT: in dlm_is_host_down()
126 case -ECONNABORTED: in dlm_is_host_down()
127 case -ENETDOWN: in dlm_is_host_down()
128 case -ENETUNREACH: in dlm_is_host_down()
129 case -ENETRESET: in dlm_is_host_down()
130 case -ESHUTDOWN: in dlm_is_host_down()
131 case -ENOPROTOOPT: in dlm_is_host_down()
132 case -EINVAL: /* if returned from our tcp code, in dlm_is_host_down()
151 * when it is created, and since the dlm->spinlock is held at
154 * dlm->mle_hb_events list as soon as heartbeat events are no
164 assert_spin_locked(&dlm->spinlock); in __dlm_mle_attach_hb_events()
166 list_add_tail(&mle->hb_events, &dlm->mle_hb_events); in __dlm_mle_attach_hb_events()
173 if (!list_empty(&mle->hb_events)) in __dlm_mle_detach_hb_events()
174 list_del_init(&mle->hb_events); in __dlm_mle_detach_hb_events()
181 spin_lock(&dlm->spinlock); in dlm_mle_detach_hb_events()
183 spin_unlock(&dlm->spinlock); in dlm_mle_detach_hb_events()
189 dlm = mle->dlm; in dlm_get_mle_inuse()
191 assert_spin_locked(&dlm->spinlock); in dlm_get_mle_inuse()
192 assert_spin_locked(&dlm->master_lock); in dlm_get_mle_inuse()
193 mle->inuse++; in dlm_get_mle_inuse()
194 kref_get(&mle->mle_refs); in dlm_get_mle_inuse()
200 dlm = mle->dlm; in dlm_put_mle_inuse()
202 spin_lock(&dlm->spinlock); in dlm_put_mle_inuse()
203 spin_lock(&dlm->master_lock); in dlm_put_mle_inuse()
204 mle->inuse--; in dlm_put_mle_inuse()
206 spin_unlock(&dlm->master_lock); in dlm_put_mle_inuse()
207 spin_unlock(&dlm->spinlock); in dlm_put_mle_inuse()
215 dlm = mle->dlm; in __dlm_put_mle()
217 assert_spin_locked(&dlm->spinlock); in __dlm_put_mle()
218 assert_spin_locked(&dlm->master_lock); in __dlm_put_mle()
219 if (!kref_read(&mle->mle_refs)) { in __dlm_put_mle()
226 kref_put(&mle->mle_refs, dlm_mle_release); in __dlm_put_mle()
234 dlm = mle->dlm; in dlm_put_mle()
236 spin_lock(&dlm->spinlock); in dlm_put_mle()
237 spin_lock(&dlm->master_lock); in dlm_put_mle()
239 spin_unlock(&dlm->master_lock); in dlm_put_mle()
240 spin_unlock(&dlm->spinlock); in dlm_put_mle()
245 kref_get(&mle->mle_refs); in dlm_get_mle()
255 assert_spin_locked(&dlm->spinlock); in dlm_init_mle()
257 mle->dlm = dlm; in dlm_init_mle()
258 mle->type = type; in dlm_init_mle()
259 INIT_HLIST_NODE(&mle->master_hash_node); in dlm_init_mle()
260 INIT_LIST_HEAD(&mle->hb_events); in dlm_init_mle()
261 bitmap_zero(mle->maybe_map, O2NM_MAX_NODES); in dlm_init_mle()
262 spin_lock_init(&mle->spinlock); in dlm_init_mle()
263 init_waitqueue_head(&mle->wq); in dlm_init_mle()
264 atomic_set(&mle->woken, 0); in dlm_init_mle()
265 kref_init(&mle->mle_refs); in dlm_init_mle()
266 bitmap_zero(mle->response_map, O2NM_MAX_NODES); in dlm_init_mle()
267 mle->master = O2NM_MAX_NODES; in dlm_init_mle()
268 mle->new_master = O2NM_MAX_NODES; in dlm_init_mle()
269 mle->inuse = 0; in dlm_init_mle()
271 BUG_ON(mle->type != DLM_MLE_BLOCK && in dlm_init_mle()
272 mle->type != DLM_MLE_MASTER && in dlm_init_mle()
273 mle->type != DLM_MLE_MIGRATION); in dlm_init_mle()
275 if (mle->type == DLM_MLE_MASTER) { in dlm_init_mle()
277 mle->mleres = res; in dlm_init_mle()
278 memcpy(mle->mname, res->lockname.name, res->lockname.len); in dlm_init_mle()
279 mle->mnamelen = res->lockname.len; in dlm_init_mle()
280 mle->mnamehash = res->lockname.hash; in dlm_init_mle()
283 mle->mleres = NULL; in dlm_init_mle()
284 memcpy(mle->mname, name, namelen); in dlm_init_mle()
285 mle->mnamelen = namelen; in dlm_init_mle()
286 mle->mnamehash = dlm_lockid_hash(name, namelen); in dlm_init_mle()
289 atomic_inc(&dlm->mle_tot_count[mle->type]); in dlm_init_mle()
290 atomic_inc(&dlm->mle_cur_count[mle->type]); in dlm_init_mle()
293 bitmap_copy(mle->node_map, dlm->domain_map, O2NM_MAX_NODES); in dlm_init_mle()
294 bitmap_copy(mle->vote_map, dlm->domain_map, O2NM_MAX_NODES); in dlm_init_mle()
295 clear_bit(dlm->node_num, mle->vote_map); in dlm_init_mle()
296 clear_bit(dlm->node_num, mle->node_map); in dlm_init_mle()
304 assert_spin_locked(&dlm->spinlock); in __dlm_unlink_mle()
305 assert_spin_locked(&dlm->master_lock); in __dlm_unlink_mle()
307 if (!hlist_unhashed(&mle->master_hash_node)) in __dlm_unlink_mle()
308 hlist_del_init(&mle->master_hash_node); in __dlm_unlink_mle()
315 assert_spin_locked(&dlm->master_lock); in __dlm_insert_mle()
317 bucket = dlm_master_hash(dlm, mle->mnamehash); in __dlm_insert_mle()
318 hlist_add_head(&mle->master_hash_node, bucket); in __dlm_insert_mle()
330 assert_spin_locked(&dlm->master_lock); in dlm_find_mle()
348 assert_spin_locked(&dlm->spinlock); in dlm_hb_event_notify_attached()
350 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) { in dlm_hb_event_notify_attached()
362 spin_lock(&mle->spinlock); in dlm_mle_node_down()
364 if (!test_bit(idx, mle->node_map)) in dlm_mle_node_down()
367 clear_bit(idx, mle->node_map); in dlm_mle_node_down()
369 spin_unlock(&mle->spinlock); in dlm_mle_node_down()
376 spin_lock(&mle->spinlock); in dlm_mle_node_up()
378 if (test_bit(idx, mle->node_map)) in dlm_mle_node_up()
381 set_bit(idx, mle->node_map); in dlm_mle_node_up()
383 spin_unlock(&mle->spinlock); in dlm_mle_node_up()
394 return -ENOMEM; in dlm_init_mle_cache()
409 dlm = mle->dlm; in dlm_mle_release()
411 assert_spin_locked(&dlm->spinlock); in dlm_mle_release()
412 assert_spin_locked(&dlm->master_lock); in dlm_mle_release()
414 mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname, in dlm_mle_release()
415 mle->type); in dlm_mle_release()
423 atomic_dec(&dlm->mle_cur_count[mle->type]); in dlm_mle_release()
452 return -ENOMEM; in dlm_init_master_caches()
470 dlm = res->dlm; in dlm_lockres_release()
472 /* This should not happen -- all lockres' have a name in dlm_lockres_release()
474 BUG_ON(!res->lockname.name); in dlm_lockres_release()
476 mlog(0, "destroying lockres %.*s\n", res->lockname.len, in dlm_lockres_release()
477 res->lockname.name); in dlm_lockres_release()
479 atomic_dec(&dlm->res_cur_count); in dlm_lockres_release()
481 if (!hlist_unhashed(&res->hash_node) || in dlm_lockres_release()
482 !list_empty(&res->granted) || in dlm_lockres_release()
483 !list_empty(&res->converting) || in dlm_lockres_release()
484 !list_empty(&res->blocked) || in dlm_lockres_release()
485 !list_empty(&res->dirty) || in dlm_lockres_release()
486 !list_empty(&res->recovering) || in dlm_lockres_release()
487 !list_empty(&res->purge)) { in dlm_lockres_release()
491 res->lockname.len, res->lockname.name, in dlm_lockres_release()
492 !hlist_unhashed(&res->hash_node) ? 'H' : ' ', in dlm_lockres_release()
493 !list_empty(&res->granted) ? 'G' : ' ', in dlm_lockres_release()
494 !list_empty(&res->converting) ? 'C' : ' ', in dlm_lockres_release()
495 !list_empty(&res->blocked) ? 'B' : ' ', in dlm_lockres_release()
496 !list_empty(&res->dirty) ? 'D' : ' ', in dlm_lockres_release()
497 !list_empty(&res->recovering) ? 'R' : ' ', in dlm_lockres_release()
498 !list_empty(&res->purge) ? 'P' : ' '); in dlm_lockres_release()
505 BUG_ON(!hlist_unhashed(&res->hash_node)); in dlm_lockres_release()
506 BUG_ON(!list_empty(&res->granted)); in dlm_lockres_release()
507 BUG_ON(!list_empty(&res->converting)); in dlm_lockres_release()
508 BUG_ON(!list_empty(&res->blocked)); in dlm_lockres_release()
509 BUG_ON(!list_empty(&res->dirty)); in dlm_lockres_release()
510 BUG_ON(!list_empty(&res->recovering)); in dlm_lockres_release()
511 BUG_ON(!list_empty(&res->purge)); in dlm_lockres_release()
513 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name); in dlm_lockres_release()
520 kref_put(&res->refs, dlm_lockres_release); in dlm_lockres_put()
530 * res->lockname.name, so be sure to init every field in dlm_init_lockres()
533 qname = (char *) res->lockname.name; in dlm_init_lockres()
536 res->lockname.len = namelen; in dlm_init_lockres()
537 res->lockname.hash = dlm_lockid_hash(name, namelen); in dlm_init_lockres()
539 init_waitqueue_head(&res->wq); in dlm_init_lockres()
540 spin_lock_init(&res->spinlock); in dlm_init_lockres()
541 INIT_HLIST_NODE(&res->hash_node); in dlm_init_lockres()
542 INIT_LIST_HEAD(&res->granted); in dlm_init_lockres()
543 INIT_LIST_HEAD(&res->converting); in dlm_init_lockres()
544 INIT_LIST_HEAD(&res->blocked); in dlm_init_lockres()
545 INIT_LIST_HEAD(&res->dirty); in dlm_init_lockres()
546 INIT_LIST_HEAD(&res->recovering); in dlm_init_lockres()
547 INIT_LIST_HEAD(&res->purge); in dlm_init_lockres()
548 INIT_LIST_HEAD(&res->tracking); in dlm_init_lockres()
549 atomic_set(&res->asts_reserved, 0); in dlm_init_lockres()
550 res->migration_pending = 0; in dlm_init_lockres()
551 res->inflight_locks = 0; in dlm_init_lockres()
552 res->inflight_assert_workers = 0; in dlm_init_lockres()
554 res->dlm = dlm; in dlm_init_lockres()
556 kref_init(&res->refs); in dlm_init_lockres()
558 atomic_inc(&dlm->res_tot_count); in dlm_init_lockres()
559 atomic_inc(&dlm->res_cur_count); in dlm_init_lockres()
562 spin_lock(&res->spinlock); in dlm_init_lockres()
564 spin_unlock(&res->spinlock); in dlm_init_lockres()
566 res->state = DLM_LOCK_RES_IN_PROGRESS; in dlm_init_lockres()
568 res->last_used = 0; in dlm_init_lockres()
570 spin_lock(&dlm->track_lock); in dlm_init_lockres()
571 list_add_tail(&res->tracking, &dlm->tracking_list); in dlm_init_lockres()
572 spin_unlock(&dlm->track_lock); in dlm_init_lockres()
574 memset(res->lvb, 0, DLM_LVB_LEN); in dlm_init_lockres()
575 bitmap_zero(res->refmap, O2NM_MAX_NODES); in dlm_init_lockres()
588 res->lockname.name = kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS); in dlm_new_lockres()
589 if (!res->lockname.name) in dlm_new_lockres()
604 assert_spin_locked(&res->spinlock); in dlm_lockres_set_refmap_bit()
606 mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len, in dlm_lockres_set_refmap_bit()
607 res->lockname.name, bit, __builtin_return_address(0)); in dlm_lockres_set_refmap_bit()
609 set_bit(bit, res->refmap); in dlm_lockres_set_refmap_bit()
615 assert_spin_locked(&res->spinlock); in dlm_lockres_clear_refmap_bit()
617 mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len, in dlm_lockres_clear_refmap_bit()
618 res->lockname.name, bit, __builtin_return_address(0)); in dlm_lockres_clear_refmap_bit()
620 clear_bit(bit, res->refmap); in dlm_lockres_clear_refmap_bit()
626 res->inflight_locks++; in __dlm_lockres_grab_inflight_ref()
628 mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name, in __dlm_lockres_grab_inflight_ref()
629 res->lockname.len, res->lockname.name, res->inflight_locks, in __dlm_lockres_grab_inflight_ref()
636 assert_spin_locked(&res->spinlock); in dlm_lockres_grab_inflight_ref()
643 assert_spin_locked(&res->spinlock); in dlm_lockres_drop_inflight_ref()
645 BUG_ON(res->inflight_locks == 0); in dlm_lockres_drop_inflight_ref()
647 res->inflight_locks--; in dlm_lockres_drop_inflight_ref()
649 mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name, in dlm_lockres_drop_inflight_ref()
650 res->lockname.len, res->lockname.name, res->inflight_locks, in dlm_lockres_drop_inflight_ref()
653 wake_up(&res->wq); in dlm_lockres_drop_inflight_ref()
659 assert_spin_locked(&res->spinlock); in __dlm_lockres_grab_inflight_worker()
660 res->inflight_assert_workers++; in __dlm_lockres_grab_inflight_worker()
662 dlm->name, res->lockname.len, res->lockname.name, in __dlm_lockres_grab_inflight_worker()
663 res->inflight_assert_workers); in __dlm_lockres_grab_inflight_worker()
669 assert_spin_locked(&res->spinlock); in __dlm_lockres_drop_inflight_worker()
670 BUG_ON(res->inflight_assert_workers == 0); in __dlm_lockres_drop_inflight_worker()
671 res->inflight_assert_workers--; in __dlm_lockres_drop_inflight_worker()
672 mlog(0, "%s:%.*s: inflight assert worker--: now %u\n", in __dlm_lockres_drop_inflight_worker()
673 dlm->name, res->lockname.len, res->lockname.name, in __dlm_lockres_drop_inflight_worker()
674 res->inflight_assert_workers); in __dlm_lockres_drop_inflight_worker()
680 spin_lock(&res->spinlock); in dlm_lockres_drop_inflight_worker()
682 spin_unlock(&res->spinlock); in dlm_lockres_drop_inflight_worker()
693 * also, do a lookup in the dlm->master_list to see
723 spin_lock(&dlm->spinlock); in dlm_get_lock_resource()
726 spin_unlock(&dlm->spinlock); in dlm_get_lock_resource()
727 spin_lock(&tmpres->spinlock); in dlm_get_lock_resource()
734 if (hlist_unhashed(&tmpres->hash_node)) { in dlm_get_lock_resource()
735 spin_unlock(&tmpres->spinlock); in dlm_get_lock_resource()
742 if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { in dlm_get_lock_resource()
744 BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN); in dlm_get_lock_resource()
745 spin_unlock(&tmpres->spinlock); in dlm_get_lock_resource()
752 if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) { in dlm_get_lock_resource()
753 BUG_ON(tmpres->owner == dlm->node_num); in dlm_get_lock_resource()
756 spin_unlock(&tmpres->spinlock); in dlm_get_lock_resource()
765 spin_unlock(&tmpres->spinlock); in dlm_get_lock_resource()
767 spin_lock(&dlm->track_lock); in dlm_get_lock_resource()
768 if (!list_empty(&res->tracking)) in dlm_get_lock_resource()
769 list_del_init(&res->tracking); in dlm_get_lock_resource()
773 res->lockname.len, in dlm_get_lock_resource()
774 res->lockname.name); in dlm_get_lock_resource()
775 spin_unlock(&dlm->track_lock); in dlm_get_lock_resource()
783 spin_unlock(&dlm->spinlock); in dlm_get_lock_resource()
800 spin_lock(&res->spinlock); in dlm_get_lock_resource()
801 dlm_change_lockres_owner(dlm, res, dlm->node_num); in dlm_get_lock_resource()
804 spin_unlock(&res->spinlock); in dlm_get_lock_resource()
805 spin_unlock(&dlm->spinlock); in dlm_get_lock_resource()
811 spin_lock(&dlm->master_lock); in dlm_get_lock_resource()
817 if (mle->type == DLM_MLE_MASTER) { in dlm_get_lock_resource()
821 mig = (mle->type == DLM_MLE_MIGRATION); in dlm_get_lock_resource()
822 /* if there is a migration in progress, let the migration in dlm_get_lock_resource()
824 * of the MIGRATION mle: either the migrate finished or in dlm_get_lock_resource()
830 if (mig || mle->master != O2NM_MAX_NODES) { in dlm_get_lock_resource()
831 BUG_ON(mig && mle->master == dlm->node_num); in dlm_get_lock_resource()
835 dlm->name, namelen, lockid, in dlm_get_lock_resource()
836 mig ? "MIGRATION" : "BLOCK"); in dlm_get_lock_resource()
837 spin_unlock(&dlm->master_lock); in dlm_get_lock_resource()
838 spin_unlock(&dlm->spinlock); in dlm_get_lock_resource()
857 set_bit(dlm->node_num, mle->maybe_map); in dlm_get_lock_resource()
864 bit = find_first_bit(dlm->recovery_map, O2NM_MAX_NODES); in dlm_get_lock_resource()
868 dlm->name, namelen, (char *)lockid, bit); in dlm_get_lock_resource()
889 spin_unlock(&dlm->master_lock); in dlm_get_lock_resource()
890 spin_unlock(&dlm->spinlock); in dlm_get_lock_resource()
899 "master $RECOVERY lock now\n", dlm->name); in dlm_get_lock_resource()
904 "change\n", dlm->name); in dlm_get_lock_resource()
914 spin_lock(&dlm->spinlock); in dlm_get_lock_resource()
915 bit = find_first_bit(dlm->recovery_map, O2NM_MAX_NODES); in dlm_get_lock_resource()
919 dlm->name, namelen, (char *)lockid, bit); in dlm_get_lock_resource()
923 spin_unlock(&dlm->spinlock); in dlm_get_lock_resource()
933 ret = -EINVAL; in dlm_get_lock_resource()
934 dlm_node_iter_init(mle->vote_map, &iter); in dlm_get_lock_resource()
939 if (mle->master != O2NM_MAX_NODES) { in dlm_get_lock_resource()
941 if (mle->master <= nodenum) in dlm_get_lock_resource()
948 "master is %u, keep going\n", dlm->name, namelen, in dlm_get_lock_resource()
949 lockid, nodenum, mle->master); in dlm_get_lock_resource()
959 "request now, blocked=%d\n", dlm->name, res->lockname.len, in dlm_get_lock_resource()
960 res->lockname.name, blocked); in dlm_get_lock_resource()
964 dlm->name, res->lockname.len, in dlm_get_lock_resource()
965 res->lockname.name, blocked); in dlm_get_lock_resource()
973 mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len, in dlm_get_lock_resource()
974 res->lockname.name, res->owner); in dlm_get_lock_resource()
976 BUG_ON(res->owner == O2NM_MAX_NODES); in dlm_get_lock_resource()
985 spin_lock(&res->spinlock); in dlm_get_lock_resource()
986 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; in dlm_get_lock_resource()
987 spin_unlock(&res->spinlock); in dlm_get_lock_resource()
988 wake_up(&res->wq); in dlm_get_lock_resource()
1016 spin_lock(&res->spinlock); in dlm_wait_for_lock_mastery()
1017 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { in dlm_wait_for_lock_mastery()
1018 mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name, in dlm_wait_for_lock_mastery()
1019 res->lockname.len, res->lockname.name, res->owner); in dlm_wait_for_lock_mastery()
1020 spin_unlock(&res->spinlock); in dlm_wait_for_lock_mastery()
1021 /* this will cause the master to re-assert across in dlm_wait_for_lock_mastery()
1023 if (res->owner != dlm->node_num) { in dlm_wait_for_lock_mastery()
1024 ret = dlm_do_master_request(res, mle, res->owner); in dlm_wait_for_lock_mastery()
1027 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret); in dlm_wait_for_lock_mastery()
1035 spin_unlock(&res->spinlock); in dlm_wait_for_lock_mastery()
1037 spin_lock(&mle->spinlock); in dlm_wait_for_lock_mastery()
1038 m = mle->master; in dlm_wait_for_lock_mastery()
1039 map_changed = !bitmap_equal(mle->vote_map, mle->node_map, in dlm_wait_for_lock_mastery()
1041 voting_done = bitmap_equal(mle->vote_map, mle->response_map, in dlm_wait_for_lock_mastery()
1048 dlm->name, res->lockname.len, res->lockname.name); in dlm_wait_for_lock_mastery()
1050 b = (mle->type == DLM_MLE_BLOCK); in dlm_wait_for_lock_mastery()
1053 dlm->name, res->lockname.len, res->lockname.name, in dlm_wait_for_lock_mastery()
1057 spin_unlock(&mle->spinlock); in dlm_wait_for_lock_mastery()
1063 "rechecking now\n", dlm->name, res->lockname.len, in dlm_wait_for_lock_mastery()
1064 res->lockname.name); in dlm_wait_for_lock_mastery()
1069 "for %s:%.*s\n", dlm->name, res->lockname.len, in dlm_wait_for_lock_mastery()
1070 res->lockname.name); in dlm_wait_for_lock_mastery()
1082 bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES); in dlm_wait_for_lock_mastery()
1083 if (dlm->node_num <= bit) { in dlm_wait_for_lock_mastery()
1087 mle->master = dlm->node_num; in dlm_wait_for_lock_mastery()
1098 spin_unlock(&mle->spinlock); in dlm_wait_for_lock_mastery()
1103 atomic_set(&mle->woken, 0); in dlm_wait_for_lock_mastery()
1104 (void)wait_event_timeout(mle->wq, in dlm_wait_for_lock_mastery()
1105 (atomic_read(&mle->woken) == 1), in dlm_wait_for_lock_mastery()
1107 if (res->owner == O2NM_MAX_NODES) { in dlm_wait_for_lock_mastery()
1108 mlog(0, "%s:%.*s: waiting again\n", dlm->name, in dlm_wait_for_lock_mastery()
1109 res->lockname.len, res->lockname.name); in dlm_wait_for_lock_mastery()
1112 mlog(0, "done waiting, master is %u\n", res->owner); in dlm_wait_for_lock_mastery()
1119 m = dlm->node_num; in dlm_wait_for_lock_mastery()
1121 res->lockname.len, res->lockname.name, m); in dlm_wait_for_lock_mastery()
1122 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0); in dlm_wait_for_lock_mastery()
1137 spin_lock(&res->spinlock); in dlm_wait_for_lock_mastery()
1141 spin_unlock(&res->spinlock); in dlm_wait_for_lock_mastery()
1157 NODE_DOWN = -1,
1169 iter->curnode = -1; in dlm_bitmap_diff_iter_init()
1170 iter->orig_bm = orig_bm; in dlm_bitmap_diff_iter_init()
1171 iter->cur_bm = cur_bm; in dlm_bitmap_diff_iter_init()
1174 p1 = *(iter->orig_bm + i); in dlm_bitmap_diff_iter_init()
1175 p2 = *(iter->cur_bm + i); in dlm_bitmap_diff_iter_init()
1176 iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1); in dlm_bitmap_diff_iter_init()
1185 if (iter->curnode >= O2NM_MAX_NODES) in dlm_bitmap_diff_iter_next()
1186 return -ENOENT; in dlm_bitmap_diff_iter_next()
1188 bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES, in dlm_bitmap_diff_iter_next()
1189 iter->curnode+1); in dlm_bitmap_diff_iter_next()
1191 iter->curnode = O2NM_MAX_NODES; in dlm_bitmap_diff_iter_next()
1192 return -ENOENT; in dlm_bitmap_diff_iter_next()
1196 if (test_bit(bit, iter->orig_bm)) in dlm_bitmap_diff_iter_next()
1201 iter->curnode = bit; in dlm_bitmap_diff_iter_next()
1219 assert_spin_locked(&mle->spinlock); in dlm_restart_lock_mastery()
1221 dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map); in dlm_restart_lock_mastery()
1232 clear_bit(node, mle->response_map); in dlm_restart_lock_mastery()
1233 set_bit(node, mle->vote_map); in dlm_restart_lock_mastery()
1237 int lowest = find_first_bit(mle->maybe_map, in dlm_restart_lock_mastery()
1241 clear_bit(node, mle->maybe_map); in dlm_restart_lock_mastery()
1247 lowest = find_next_bit(mle->maybe_map, in dlm_restart_lock_mastery()
1253 "now\n", dlm->name, in dlm_restart_lock_mastery()
1254 res->lockname.len, in dlm_restart_lock_mastery()
1255 res->lockname.name, in dlm_restart_lock_mastery()
1269 dlm->name, in dlm_restart_lock_mastery()
1270 res->lockname.len, in dlm_restart_lock_mastery()
1271 res->lockname.name); in dlm_restart_lock_mastery()
1272 mle->type = DLM_MLE_MASTER; in dlm_restart_lock_mastery()
1273 mle->mleres = res; in dlm_restart_lock_mastery()
1280 bitmap_zero(mle->maybe_map, O2NM_MAX_NODES); in dlm_restart_lock_mastery()
1281 bitmap_zero(mle->response_map, O2NM_MAX_NODES); in dlm_restart_lock_mastery()
1283 bitmap_copy(mle->vote_map, mle->node_map, in dlm_restart_lock_mastery()
1286 if (mle->type != DLM_MLE_BLOCK) in dlm_restart_lock_mastery()
1287 set_bit(dlm->node_num, mle->maybe_map); in dlm_restart_lock_mastery()
1289 ret = -EAGAIN; in dlm_restart_lock_mastery()
1300 * -errno on a network error
1309 struct dlm_ctxt *dlm = mle->dlm; in dlm_do_master_request()
1314 request.node_idx = dlm->node_num; in dlm_do_master_request()
1316 BUG_ON(mle->type == DLM_MLE_MIGRATION); in dlm_do_master_request()
1318 request.namelen = (u8)mle->mnamelen; in dlm_do_master_request()
1319 memcpy(request.name, mle->mname, request.namelen); in dlm_do_master_request()
1322 ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request, in dlm_do_master_request()
1325 if (ret == -ESRCH) { in dlm_do_master_request()
1329 } else if (ret == -EINVAL) { in dlm_do_master_request()
1332 } else if (ret == -ENOMEM) { in dlm_do_master_request()
1333 mlog(ML_ERROR, "out of memory while trying to send " in dlm_do_master_request()
1352 spin_lock(&mle->spinlock); in dlm_do_master_request()
1355 set_bit(to, mle->response_map); in dlm_do_master_request()
1358 "reference\n", dlm->name, res->lockname.len, in dlm_do_master_request()
1359 res->lockname.name, to); in dlm_do_master_request()
1360 mle->master = to; in dlm_do_master_request()
1364 set_bit(to, mle->response_map); in dlm_do_master_request()
1368 set_bit(to, mle->response_map); in dlm_do_master_request()
1369 set_bit(to, mle->maybe_map); in dlm_do_master_request()
1380 spin_unlock(&mle->spinlock); in dlm_do_master_request()
1393 * dlm->spinlock
1394 * res->spinlock
1395 * mle->spinlock
1396 * dlm->master_list
1406 struct dlm_master_request *request = (struct dlm_master_request *) msg->buf; in dlm_master_request_handler()
1423 name = request->name; in dlm_master_request_handler()
1424 namelen = request->namelen; in dlm_master_request_handler()
1433 spin_lock(&dlm->spinlock); in dlm_master_request_handler()
1436 spin_unlock(&dlm->spinlock); in dlm_master_request_handler()
1439 spin_lock(&res->spinlock); in dlm_master_request_handler()
1446 if (hlist_unhashed(&res->hash_node)) { in dlm_master_request_handler()
1447 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1452 if (res->state & (DLM_LOCK_RES_RECOVERING| in dlm_master_request_handler()
1454 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1463 if (res->owner == dlm->node_num) { in dlm_master_request_handler()
1464 dlm_lockres_set_refmap_bit(dlm, res, request->node_idx); in dlm_master_request_handler()
1465 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1478 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { in dlm_master_request_handler()
1479 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1480 // mlog(0, "node %u is the master\n", res->owner); in dlm_master_request_handler()
1490 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { in dlm_master_request_handler()
1492 "in-progress!\n"); in dlm_master_request_handler()
1497 spin_lock(&dlm->master_lock); in dlm_master_request_handler()
1504 spin_lock(&tmpmle->spinlock); in dlm_master_request_handler()
1505 if (tmpmle->type == DLM_MLE_BLOCK) { in dlm_master_request_handler()
1509 } else if (tmpmle->type == DLM_MLE_MIGRATION) { in dlm_master_request_handler()
1511 "node %u.\n", tmpmle->master, tmpmle->new_master); in dlm_master_request_handler()
1512 if (tmpmle->master == dlm->node_num) { in dlm_master_request_handler()
1515 tmpmle->new_master); in dlm_master_request_handler()
1521 } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) { in dlm_master_request_handler()
1523 if (tmpmle->master == dlm->node_num) { in dlm_master_request_handler()
1530 request->node_idx); in dlm_master_request_handler()
1539 set_bit(request->node_idx, tmpmle->maybe_map); in dlm_master_request_handler()
1540 spin_unlock(&tmpmle->spinlock); in dlm_master_request_handler()
1542 spin_unlock(&dlm->master_lock); in dlm_master_request_handler()
1543 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1558 spin_lock(&dlm->master_lock); in dlm_master_request_handler()
1564 spin_unlock(&dlm->master_lock); in dlm_master_request_handler()
1565 spin_unlock(&dlm->spinlock); in dlm_master_request_handler()
1570 mlog_errno(-ENOMEM); in dlm_master_request_handler()
1579 set_bit(request->node_idx, mle->maybe_map); in dlm_master_request_handler()
1583 spin_lock(&tmpmle->spinlock); in dlm_master_request_handler()
1584 if (tmpmle->master == dlm->node_num) { in dlm_master_request_handler()
1588 if (tmpmle->type == DLM_MLE_BLOCK) in dlm_master_request_handler()
1590 else if (tmpmle->type == DLM_MLE_MIGRATION) { in dlm_master_request_handler()
1591 mlog(0, "migration mle was found (%u->%u)\n", in dlm_master_request_handler()
1592 tmpmle->master, tmpmle->new_master); in dlm_master_request_handler()
1597 set_bit(request->node_idx, tmpmle->maybe_map); in dlm_master_request_handler()
1598 spin_unlock(&tmpmle->spinlock); in dlm_master_request_handler()
1600 spin_unlock(&dlm->master_lock); in dlm_master_request_handler()
1601 spin_unlock(&dlm->spinlock); in dlm_master_request_handler()
1616 dlm->node_num, res->lockname.len, res->lockname.name); in dlm_master_request_handler()
1617 spin_lock(&res->spinlock); in dlm_master_request_handler()
1618 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, in dlm_master_request_handler()
1623 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1628 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1648 * and re-assert across the cluster...
1659 const char *lockname = res->lockname.name; in dlm_do_assert_master()
1660 unsigned int namelen = res->lockname.len; in dlm_do_assert_master()
1664 spin_lock(&res->spinlock); in dlm_do_assert_master()
1665 res->state |= DLM_LOCK_RES_SETREF_INPROG; in dlm_do_assert_master()
1666 spin_unlock(&res->spinlock); in dlm_do_assert_master()
1680 assert.node_idx = dlm->node_num; in dlm_do_assert_master()
1685 tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key, in dlm_do_assert_master()
1690 DLM_ASSERT_MASTER_MSG, dlm->key, to); in dlm_do_assert_master()
1704 spin_lock(&dlm->spinlock); in dlm_do_assert_master()
1705 spin_lock(&dlm->master_lock); in dlm_do_assert_master()
1711 spin_unlock(&dlm->master_lock); in dlm_do_assert_master()
1712 spin_unlock(&dlm->spinlock); in dlm_do_assert_master()
1725 "nodes and requests a re-assert\n", in dlm_do_assert_master()
1733 spin_lock(&res->spinlock); in dlm_do_assert_master()
1735 spin_unlock(&res->spinlock); in dlm_do_assert_master()
1742 spin_lock(&res->spinlock); in dlm_do_assert_master()
1743 res->state &= ~DLM_LOCK_RES_SETREF_INPROG; in dlm_do_assert_master()
1744 spin_unlock(&res->spinlock); in dlm_do_assert_master()
1745 wake_up(&res->wq); in dlm_do_assert_master()
1752 * dlm->spinlock
1753 * res->spinlock
1754 * mle->spinlock
1755 * dlm->master_list
1764 struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf; in dlm_assert_master_handler()
1775 name = assert->name; in dlm_assert_master_handler()
1776 namelen = assert->namelen; in dlm_assert_master_handler()
1778 flags = be32_to_cpu(assert->flags); in dlm_assert_master_handler()
1785 spin_lock(&dlm->spinlock); in dlm_assert_master_handler()
1791 spin_lock(&dlm->master_lock); in dlm_assert_master_handler()
1793 /* not an error, could be master just re-asserting */ in dlm_assert_master_handler()
1795 "MLE for it! (%.*s)\n", assert->node_idx, in dlm_assert_master_handler()
1798 int bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES); in dlm_assert_master_handler()
1801 * could be master just re-asserting. */ in dlm_assert_master_handler()
1803 "is asserting! (%.*s)\n", assert->node_idx, in dlm_assert_master_handler()
1805 } else if (bit != assert->node_idx) { in dlm_assert_master_handler()
1808 "back off\n", assert->node_idx, bit); in dlm_assert_master_handler()
1817 assert->node_idx, namelen, name, bit, in dlm_assert_master_handler()
1818 assert->node_idx); in dlm_assert_master_handler()
1821 if (mle->type == DLM_MLE_MIGRATION) { in dlm_assert_master_handler()
1824 " from %u for migration\n", in dlm_assert_master_handler()
1825 dlm->name, namelen, name, in dlm_assert_master_handler()
1826 assert->node_idx); in dlm_assert_master_handler()
1829 " from %u for migration, ignoring\n", in dlm_assert_master_handler()
1830 dlm->name, namelen, name, in dlm_assert_master_handler()
1831 assert->node_idx); in dlm_assert_master_handler()
1833 spin_unlock(&dlm->master_lock); in dlm_assert_master_handler()
1834 spin_unlock(&dlm->spinlock); in dlm_assert_master_handler()
1839 spin_unlock(&dlm->master_lock); in dlm_assert_master_handler()
1845 spin_lock(&res->spinlock); in dlm_assert_master_handler()
1846 if (res->state & DLM_LOCK_RES_RECOVERING) { in dlm_assert_master_handler()
1848 "RECOVERING!\n", assert->node_idx, namelen, name); in dlm_assert_master_handler()
1852 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN && in dlm_assert_master_handler()
1853 res->owner != assert->node_idx) { in dlm_assert_master_handler()
1856 assert->node_idx, res->owner, namelen, in dlm_assert_master_handler()
1861 } else if (mle->type != DLM_MLE_MIGRATION) { in dlm_assert_master_handler()
1862 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { in dlm_assert_master_handler()
1863 /* owner is just re-asserting */ in dlm_assert_master_handler()
1864 if (res->owner == assert->node_idx) { in dlm_assert_master_handler()
1865 mlog(0, "owner %u re-asserting on " in dlm_assert_master_handler()
1866 "lock %.*s\n", assert->node_idx, in dlm_assert_master_handler()
1872 "(%.*s)\n", assert->node_idx, in dlm_assert_master_handler()
1873 res->owner, namelen, name); in dlm_assert_master_handler()
1876 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { in dlm_assert_master_handler()
1879 "in-progress! (%.*s)\n", in dlm_assert_master_handler()
1880 assert->node_idx, in dlm_assert_master_handler()
1884 } else /* mle->type == DLM_MLE_MIGRATION */ { in dlm_assert_master_handler()
1886 if (assert->node_idx != mle->new_master) { in dlm_assert_master_handler()
1890 assert->node_idx, mle->new_master, in dlm_assert_master_handler()
1891 mle->master, namelen, name); in dlm_assert_master_handler()
1897 spin_unlock(&res->spinlock); in dlm_assert_master_handler()
1901 // assert->node_idx); in dlm_assert_master_handler()
1904 int nn = -1; in dlm_assert_master_handler()
1907 spin_lock(&mle->spinlock); in dlm_assert_master_handler()
1908 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) in dlm_assert_master_handler()
1912 * then the calling node needs to re-assert to clear in dlm_assert_master_handler()
1914 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, in dlm_assert_master_handler()
1916 if (nn != dlm->node_num && nn != assert->node_idx) { in dlm_assert_master_handler()
1922 mle->master = assert->node_idx; in dlm_assert_master_handler()
1923 atomic_set(&mle->woken, 1); in dlm_assert_master_handler()
1924 wake_up(&mle->wq); in dlm_assert_master_handler()
1925 spin_unlock(&mle->spinlock); in dlm_assert_master_handler()
1929 spin_lock(&res->spinlock); in dlm_assert_master_handler()
1930 if (mle->type == DLM_MLE_MIGRATION) { in dlm_assert_master_handler()
1931 mlog(0, "finishing off migration of lockres %.*s, " in dlm_assert_master_handler()
1933 res->lockname.len, res->lockname.name, in dlm_assert_master_handler()
1934 dlm->node_num, mle->new_master); in dlm_assert_master_handler()
1935 res->state &= ~DLM_LOCK_RES_MIGRATING; in dlm_assert_master_handler()
1937 dlm_change_lockres_owner(dlm, res, mle->new_master); in dlm_assert_master_handler()
1938 BUG_ON(res->state & DLM_LOCK_RES_DIRTY); in dlm_assert_master_handler()
1940 dlm_change_lockres_owner(dlm, res, mle->master); in dlm_assert_master_handler()
1942 spin_unlock(&res->spinlock); in dlm_assert_master_handler()
1945 wake_up(&res->wq); in dlm_assert_master_handler()
1951 spin_lock(&dlm->master_lock); in dlm_assert_master_handler()
1953 rr = kref_read(&mle->mle_refs); in dlm_assert_master_handler()
1954 if (mle->inuse > 0) { in dlm_assert_master_handler()
1968 "inuse=%d\n", dlm->name, namelen, name, in dlm_assert_master_handler()
1969 assert->node_idx, rr, extra_ref, mle->inuse); in dlm_assert_master_handler()
1977 * ref given by the master / migration request message. in dlm_assert_master_handler()
1982 spin_unlock(&dlm->master_lock); in dlm_assert_master_handler()
1984 if (res->owner != assert->node_idx) { in dlm_assert_master_handler()
1986 "owner is %u (%.*s), no mle\n", assert->node_idx, in dlm_assert_master_handler()
1987 res->owner, namelen, name); in dlm_assert_master_handler()
1990 spin_unlock(&dlm->spinlock); in dlm_assert_master_handler()
1995 spin_lock(&res->spinlock); in dlm_assert_master_handler()
1996 res->state |= DLM_LOCK_RES_SETREF_INPROG; in dlm_assert_master_handler()
1997 spin_unlock(&res->spinlock); in dlm_assert_master_handler()
2008 assert->node_idx, dlm->name, namelen, name); in dlm_assert_master_handler()
2015 dlm->name, namelen, name, assert->node_idx); in dlm_assert_master_handler()
2024 spin_unlock(&res->spinlock); in dlm_assert_master_handler()
2025 spin_lock(&dlm->master_lock); in dlm_assert_master_handler()
2028 spin_unlock(&dlm->master_lock); in dlm_assert_master_handler()
2029 spin_unlock(&dlm->spinlock); in dlm_assert_master_handler()
2032 return -EINVAL; in dlm_assert_master_handler()
2040 spin_lock(&res->spinlock); in dlm_assert_master_post_handler()
2041 res->state &= ~DLM_LOCK_RES_SETREF_INPROG; in dlm_assert_master_post_handler()
2042 spin_unlock(&res->spinlock); in dlm_assert_master_post_handler()
2043 wake_up(&res->wq); in dlm_assert_master_post_handler()
2056 return -ENOMEM; in dlm_dispatch_assert_master()
2061 item->u.am.lockres = res; /* already have a ref */ in dlm_dispatch_assert_master()
2063 item->u.am.ignore_higher = ignore_higher; in dlm_dispatch_assert_master()
2064 item->u.am.request_from = request_from; in dlm_dispatch_assert_master()
2065 item->u.am.flags = flags; in dlm_dispatch_assert_master()
2068 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, in dlm_dispatch_assert_master()
2069 res->lockname.name); in dlm_dispatch_assert_master()
2071 spin_lock(&dlm->work_lock); in dlm_dispatch_assert_master()
2072 list_add_tail(&item->list, &dlm->work_list); in dlm_dispatch_assert_master()
2073 spin_unlock(&dlm->work_lock); in dlm_dispatch_assert_master()
2075 queue_work(dlm->dlm_worker, &dlm->dispatched_work); in dlm_dispatch_assert_master()
2090 dlm = item->dlm; in dlm_assert_master_worker()
2091 res = item->u.am.lockres; in dlm_assert_master_worker()
2092 ignore_higher = item->u.am.ignore_higher; in dlm_assert_master_worker()
2093 request_from = item->u.am.request_from; in dlm_assert_master_worker()
2094 flags = item->u.am.flags; in dlm_assert_master_worker()
2096 spin_lock(&dlm->spinlock); in dlm_assert_master_worker()
2097 bitmap_copy(nodemap, dlm->domain_map, O2NM_MAX_NODES); in dlm_assert_master_worker()
2098 spin_unlock(&dlm->spinlock); in dlm_assert_master_worker()
2100 clear_bit(dlm->node_num, nodemap); in dlm_assert_master_worker()
2103 * this node, do not send the message to the original in dlm_assert_master_worker()
2106 bit = dlm->node_num; in dlm_assert_master_worker()
2119 * prevent migration from starting while we're still asserting in dlm_assert_master_worker()
2120 * our dominance. The reserved ast delays migration. in dlm_assert_master_worker()
2122 spin_lock(&res->spinlock); in dlm_assert_master_worker()
2123 if (res->state & DLM_LOCK_RES_MIGRATING) { in dlm_assert_master_worker()
2125 "in the middle of migration. Skipping assert, " in dlm_assert_master_worker()
2127 spin_unlock(&res->spinlock); in dlm_assert_master_worker()
2131 spin_unlock(&res->spinlock); in dlm_assert_master_worker()
2136 res->lockname.len, res->lockname.name, dlm->node_num); in dlm_assert_master_worker()
2144 /* Ok, we've asserted ourselves. Let's let migration start. */ in dlm_assert_master_worker()
2157 * lockres because this lockres is used to kick off recovery! ;-)
2158 * So, do a pre-check on all living nodes to see if any of those nodes
2173 spin_lock(&dlm->spinlock); in dlm_pre_master_reco_lockres()
2174 dlm_node_iter_init(dlm->domain_map, &iter); in dlm_pre_master_reco_lockres()
2175 spin_unlock(&dlm->spinlock); in dlm_pre_master_reco_lockres()
2178 /* do not send to self */ in dlm_pre_master_reco_lockres()
2179 if (nodenum == dlm->node_num) in dlm_pre_master_reco_lockres()
2193 spin_lock(&dlm->spinlock); in dlm_pre_master_reco_lockres()
2194 if (test_bit(master, dlm->recovery_map)) { in dlm_pre_master_reco_lockres()
2198 "lock. must wait.\n", dlm->name, in dlm_pre_master_reco_lockres()
2200 ret = -EAGAIN; in dlm_pre_master_reco_lockres()
2202 spin_unlock(&dlm->spinlock); in dlm_pre_master_reco_lockres()
2203 mlog(0, "%s: reco lock master is %u\n", dlm->name, in dlm_pre_master_reco_lockres()
2222 lockname = res->lockname.name; in dlm_drop_lockres_ref()
2223 namelen = res->lockname.len; in dlm_drop_lockres_ref()
2227 deref.node_idx = dlm->node_num; in dlm_drop_lockres_ref()
2231 ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key, in dlm_drop_lockres_ref()
2232 &deref, sizeof(deref), res->owner, &r); in dlm_drop_lockres_ref()
2234 mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n", in dlm_drop_lockres_ref()
2235 dlm->name, namelen, lockname, ret, res->owner); in dlm_drop_lockres_ref()
2239 dlm->name, namelen, lockname, res->owner, r); in dlm_drop_lockres_ref()
2241 if (r == -ENOMEM) in dlm_drop_lockres_ref()
2253 struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf; in dlm_deref_lockres_handler()
2257 int ret = -EINVAL; in dlm_deref_lockres_handler()
2267 name = deref->name; in dlm_deref_lockres_handler()
2268 namelen = deref->namelen; in dlm_deref_lockres_handler()
2269 node = deref->node_idx; in dlm_deref_lockres_handler()
2275 if (deref->node_idx >= O2NM_MAX_NODES) { in dlm_deref_lockres_handler()
2282 spin_lock(&dlm->spinlock); in dlm_deref_lockres_handler()
2285 spin_unlock(&dlm->spinlock); in dlm_deref_lockres_handler()
2287 dlm->name, namelen, name); in dlm_deref_lockres_handler()
2290 spin_unlock(&dlm->spinlock); in dlm_deref_lockres_handler()
2292 spin_lock(&res->spinlock); in dlm_deref_lockres_handler()
2293 if (res->state & DLM_LOCK_RES_SETREF_INPROG) in dlm_deref_lockres_handler()
2296 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); in dlm_deref_lockres_handler()
2297 if (test_bit(node, res->refmap)) { in dlm_deref_lockres_handler()
2302 spin_unlock(&res->spinlock); in dlm_deref_lockres_handler()
2309 "but it is already dropped!\n", dlm->name, in dlm_deref_lockres_handler()
2310 res->lockname.len, res->lockname.name, node); in dlm_deref_lockres_handler()
2319 ret = -ENOMEM; in dlm_deref_lockres_handler()
2325 item->u.dl.deref_res = res; in dlm_deref_lockres_handler()
2326 item->u.dl.deref_node = node; in dlm_deref_lockres_handler()
2328 spin_lock(&dlm->work_lock); in dlm_deref_lockres_handler()
2329 list_add_tail(&item->list, &dlm->work_list); in dlm_deref_lockres_handler()
2330 spin_unlock(&dlm->work_lock); in dlm_deref_lockres_handler()
2332 queue_work(dlm->dlm_worker, &dlm->dispatched_work); in dlm_deref_lockres_handler()
2348 = (struct dlm_deref_lockres_done *)msg->buf; in dlm_deref_lockres_done_handler()
2352 int ret = -EINVAL; in dlm_deref_lockres_done_handler()
2359 name = deref->name; in dlm_deref_lockres_done_handler()
2360 namelen = deref->namelen; in dlm_deref_lockres_done_handler()
2361 node = deref->node_idx; in dlm_deref_lockres_done_handler()
2367 if (deref->node_idx >= O2NM_MAX_NODES) { in dlm_deref_lockres_done_handler()
2374 spin_lock(&dlm->spinlock); in dlm_deref_lockres_done_handler()
2377 spin_unlock(&dlm->spinlock); in dlm_deref_lockres_done_handler()
2379 dlm->name, namelen, name); in dlm_deref_lockres_done_handler()
2383 spin_lock(&res->spinlock); in dlm_deref_lockres_done_handler()
2384 if (!(res->state & DLM_LOCK_RES_DROPPING_REF)) { in dlm_deref_lockres_done_handler()
2385 spin_unlock(&res->spinlock); in dlm_deref_lockres_done_handler()
2386 spin_unlock(&dlm->spinlock); in dlm_deref_lockres_done_handler()
2388 "but it is already derefed!\n", dlm->name, in dlm_deref_lockres_done_handler()
2389 res->lockname.len, res->lockname.name, node); in dlm_deref_lockres_done_handler()
2395 spin_unlock(&res->spinlock); in dlm_deref_lockres_done_handler()
2396 wake_up(&res->wq); in dlm_deref_lockres_done_handler()
2398 spin_unlock(&dlm->spinlock); in dlm_deref_lockres_done_handler()
2416 lockname = res->lockname.name; in dlm_drop_lockres_ref_done()
2417 namelen = res->lockname.len; in dlm_drop_lockres_ref_done()
2421 deref.node_idx = dlm->node_num; in dlm_drop_lockres_ref_done()
2425 ret = o2net_send_message(DLM_DEREF_LOCKRES_DONE, dlm->key, in dlm_drop_lockres_ref_done()
2428 mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF DONE " in dlm_drop_lockres_ref_done()
2429 " to node %u\n", dlm->name, namelen, in dlm_drop_lockres_ref_done()
2434 dlm->name, namelen, lockname, node, r); in dlm_drop_lockres_ref_done()
2446 dlm = item->dlm; in dlm_deref_lockres_worker()
2447 res = item->u.dl.deref_res; in dlm_deref_lockres_worker()
2448 node = item->u.dl.deref_node; in dlm_deref_lockres_worker()
2450 spin_lock(&res->spinlock); in dlm_deref_lockres_worker()
2451 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); in dlm_deref_lockres_worker()
2453 if (test_bit(node, res->refmap)) { in dlm_deref_lockres_worker()
2457 spin_unlock(&res->spinlock); in dlm_deref_lockres_worker()
2463 dlm->name, res->lockname.len, res->lockname.name, node); in dlm_deref_lockres_worker()
2467 "but it is already dropped!\n", dlm->name, in dlm_deref_lockres_worker()
2468 res->lockname.len, res->lockname.name, node); in dlm_deref_lockres_worker()
2479 * 3. one or more non-local locks, or, one or more references
2491 assert_spin_locked(&res->spinlock); in dlm_is_lockres_migratable()
2493 /* delay migration when the lockres is in MIGRATING state */ in dlm_is_lockres_migratable()
2494 if (res->state & DLM_LOCK_RES_MIGRATING) in dlm_is_lockres_migratable()
2497 /* delay migration when the lockres is in RECOCERING state */ in dlm_is_lockres_migratable()
2498 if (res->state & (DLM_LOCK_RES_RECOVERING| in dlm_is_lockres_migratable()
2502 if (res->owner != dlm->node_num) in dlm_is_lockres_migratable()
2508 if (lock->ml.node != dlm->node_num) { in dlm_is_lockres_migratable()
2512 cookie = be64_to_cpu(lock->ml.cookie); in dlm_is_lockres_migratable()
2514 "%s list\n", dlm->name, res->lockname.len, in dlm_is_lockres_migratable()
2515 res->lockname.name, in dlm_is_lockres_migratable()
2524 node_ref = find_first_bit(res->refmap, O2NM_MAX_NODES); in dlm_is_lockres_migratable()
2529 mlog(0, "%s: res %.*s, Migratable\n", dlm->name, res->lockname.len, in dlm_is_lockres_migratable()
2530 res->lockname.name); in dlm_is_lockres_migratable()
2553 return -EINVAL; in dlm_migrate_lockres()
2555 name = res->lockname.name; in dlm_migrate_lockres()
2556 namelen = res->lockname.len; in dlm_migrate_lockres()
2558 mlog(0, "%s: Migrating %.*s to node %u\n", dlm->name, namelen, name, in dlm_migrate_lockres()
2562 ret = -ENOMEM; in dlm_migrate_lockres()
2578 * add the migration mle to the list in dlm_migrate_lockres()
2580 spin_lock(&dlm->spinlock); in dlm_migrate_lockres()
2581 spin_lock(&dlm->master_lock); in dlm_migrate_lockres()
2583 namelen, target, dlm->node_num); in dlm_migrate_lockres()
2588 if (ret != -EEXIST) in dlm_migrate_lockres()
2591 spin_unlock(&dlm->master_lock); in dlm_migrate_lockres()
2592 spin_unlock(&dlm->spinlock); in dlm_migrate_lockres()
2594 if (ret == -EEXIST) { in dlm_migrate_lockres()
2602 * if we fail after this we need to re-dirty the lockres in dlm_migrate_lockres()
2606 "the target went down.\n", res->lockname.len, in dlm_migrate_lockres()
2607 res->lockname.name, target); in dlm_migrate_lockres()
2608 spin_lock(&res->spinlock); in dlm_migrate_lockres()
2609 res->state &= ~DLM_LOCK_RES_MIGRATING; in dlm_migrate_lockres()
2611 spin_unlock(&res->spinlock); in dlm_migrate_lockres()
2612 ret = -EINVAL; in dlm_migrate_lockres()
2616 if (ret != -EEXIST && oldmle) { in dlm_migrate_lockres()
2635 * at this point, we have a migration target, an mle in dlm_migrate_lockres()
2642 flush_workqueue(dlm->dlm_worker); in dlm_migrate_lockres()
2644 /* notify new node and send all lock state */ in dlm_migrate_lockres()
2645 /* call send_one_lockres with migration flag. in dlm_migrate_lockres()
2647 * migration is starting. */ in dlm_migrate_lockres()
2652 mlog(0, "migration to node %u failed with %d\n", in dlm_migrate_lockres()
2654 /* migration failed, detach and clean up mle */ in dlm_migrate_lockres()
2658 spin_lock(&res->spinlock); in dlm_migrate_lockres()
2659 res->state &= ~DLM_LOCK_RES_MIGRATING; in dlm_migrate_lockres()
2661 spin_unlock(&res->spinlock); in dlm_migrate_lockres()
2672 * will be the last one notified, ensuring that the migration in dlm_migrate_lockres()
2675 * master, so it is important that my recovery finds the migration in dlm_migrate_lockres()
2681 ret = wait_event_interruptible_timeout(mle->wq, in dlm_migrate_lockres()
2682 (atomic_read(&mle->woken) == 1), in dlm_migrate_lockres()
2686 if (atomic_read(&mle->woken) == 1 || in dlm_migrate_lockres()
2687 res->owner == target) in dlm_migrate_lockres()
2690 mlog(0, "%s:%.*s: timed out during migration\n", in dlm_migrate_lockres()
2691 dlm->name, res->lockname.len, res->lockname.name); in dlm_migrate_lockres()
2695 mlog(0, "%s:%.*s: expected migration " in dlm_migrate_lockres()
2697 dlm->name, res->lockname.len, in dlm_migrate_lockres()
2698 res->lockname.name, target); in dlm_migrate_lockres()
2699 ret = -EINVAL; in dlm_migrate_lockres()
2700 /* migration failed, detach and clean up mle */ in dlm_migrate_lockres()
2704 spin_lock(&res->spinlock); in dlm_migrate_lockres()
2705 res->state &= ~DLM_LOCK_RES_MIGRATING; in dlm_migrate_lockres()
2707 spin_unlock(&res->spinlock); in dlm_migrate_lockres()
2711 mlog(0, "%s:%.*s: caught signal during migration\n", in dlm_migrate_lockres()
2712 dlm->name, res->lockname.len, res->lockname.name); in dlm_migrate_lockres()
2716 spin_lock(&res->spinlock); in dlm_migrate_lockres()
2718 res->state &= ~DLM_LOCK_RES_MIGRATING; in dlm_migrate_lockres()
2720 spin_unlock(&res->spinlock); in dlm_migrate_lockres()
2721 wake_up(&res->wq); in dlm_migrate_lockres()
2731 /* re-dirty the lockres if we failed */ in dlm_migrate_lockres()
2736 * but migration failed */ in dlm_migrate_lockres()
2738 wake_up(&res->wq); in dlm_migrate_lockres()
2745 mlog(0, "%s: Migrating %.*s to %u, returns %d\n", dlm->name, namelen, in dlm_migrate_lockres()
2755 * Called with the dlm spinlock held, may drop it to do migration, but
2756 * will re-acquire before exit.
2758 * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped
2761 __must_hold(&dlm->spinlock) in dlm_empty_lockres()
2767 assert_spin_locked(&dlm->spinlock); in dlm_empty_lockres()
2769 spin_lock(&res->spinlock); in dlm_empty_lockres()
2772 spin_unlock(&res->spinlock); in dlm_empty_lockres()
2778 spin_unlock(&dlm->spinlock); in dlm_empty_lockres()
2783 dlm->name, res->lockname.len, res->lockname.name, in dlm_empty_lockres()
2785 spin_lock(&dlm->spinlock); in dlm_empty_lockres()
2793 spin_lock(&dlm->ast_lock); in dlm_lock_basts_flushed()
2794 spin_lock(&lock->spinlock); in dlm_lock_basts_flushed()
2795 ret = (list_empty(&lock->bast_list) && !lock->bast_pending); in dlm_lock_basts_flushed()
2796 spin_unlock(&lock->spinlock); in dlm_lock_basts_flushed()
2797 spin_unlock(&dlm->ast_lock); in dlm_lock_basts_flushed()
2806 spin_lock(&res->spinlock); in dlm_migration_can_proceed()
2807 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING); in dlm_migration_can_proceed()
2808 spin_unlock(&res->spinlock); in dlm_migration_can_proceed()
2812 spin_lock(&dlm->spinlock); in dlm_migration_can_proceed()
2813 if (!test_bit(mig_target, dlm->domain_map)) in dlm_migration_can_proceed()
2815 spin_unlock(&dlm->spinlock); in dlm_migration_can_proceed()
2823 spin_lock(&res->spinlock); in dlm_lockres_is_dirty()
2824 ret = !!(res->state & DLM_LOCK_RES_DIRTY); in dlm_lockres_is_dirty()
2825 spin_unlock(&res->spinlock); in dlm_lockres_is_dirty()
2837 res->lockname.len, res->lockname.name, dlm->node_num, in dlm_mark_lockres_migrating()
2841 spin_lock(&res->spinlock); in dlm_mark_lockres_migrating()
2842 BUG_ON(res->migration_pending); in dlm_mark_lockres_migrating()
2843 res->migration_pending = 1; in dlm_mark_lockres_migrating()
2847 spin_unlock(&res->spinlock); in dlm_mark_lockres_migrating()
2853 spin_lock(&res->spinlock); in dlm_mark_lockres_migrating()
2854 BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY); in dlm_mark_lockres_migrating()
2855 res->state |= DLM_LOCK_RES_BLOCK_DIRTY; in dlm_mark_lockres_migrating()
2856 spin_unlock(&res->spinlock); in dlm_mark_lockres_migrating()
2858 wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res)); in dlm_mark_lockres_migrating()
2862 str_yes_no(res->state & DLM_LOCK_RES_DIRTY)); in dlm_mark_lockres_migrating()
2867 ret = wait_event_interruptible_timeout(dlm->migration_wq, in dlm_mark_lockres_migrating()
2872 str_yes_no(res->state & DLM_LOCK_RES_MIGRATING), in dlm_mark_lockres_migrating()
2873 str_no_yes(test_bit(target, dlm->domain_map))); in dlm_mark_lockres_migrating()
2876 str_yes_no(res->state & DLM_LOCK_RES_MIGRATING), in dlm_mark_lockres_migrating()
2877 str_no_yes(test_bit(target, dlm->domain_map))); in dlm_mark_lockres_migrating()
2886 spin_lock(&dlm->spinlock); in dlm_mark_lockres_migrating()
2887 if (!test_bit(target, dlm->domain_map)) { in dlm_mark_lockres_migrating()
2888 mlog(ML_ERROR, "aha. migration target %u just went down\n", in dlm_mark_lockres_migrating()
2890 ret = -EHOSTDOWN; in dlm_mark_lockres_migrating()
2892 spin_unlock(&dlm->spinlock); in dlm_mark_lockres_migrating()
2899 spin_lock(&res->spinlock); in dlm_mark_lockres_migrating()
2900 BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY)); in dlm_mark_lockres_migrating()
2901 res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY; in dlm_mark_lockres_migrating()
2903 BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING)); in dlm_mark_lockres_migrating()
2905 res->migration_pending = 0; in dlm_mark_lockres_migrating()
2906 spin_unlock(&res->spinlock); in dlm_mark_lockres_migrating()
2919 /* last step in the migration process.
2925 struct list_head *queue = &res->granted; in dlm_remove_nonlocal_locks()
2929 assert_spin_locked(&res->spinlock); in dlm_remove_nonlocal_locks()
2931 BUG_ON(res->owner == dlm->node_num); in dlm_remove_nonlocal_locks()
2935 if (lock->ml.node != dlm->node_num) { in dlm_remove_nonlocal_locks()
2937 lock->ml.node); in dlm_remove_nonlocal_locks()
2939 BUG_ON(!list_empty(&lock->ast_list)); in dlm_remove_nonlocal_locks()
2940 BUG_ON(!list_empty(&lock->bast_list)); in dlm_remove_nonlocal_locks()
2941 BUG_ON(lock->ast_pending); in dlm_remove_nonlocal_locks()
2942 BUG_ON(lock->bast_pending); in dlm_remove_nonlocal_locks()
2944 lock->ml.node); in dlm_remove_nonlocal_locks()
2945 list_del_init(&lock->list); in dlm_remove_nonlocal_locks()
2956 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit); in dlm_remove_nonlocal_locks()
2961 if (bit != dlm->node_num) { in dlm_remove_nonlocal_locks()
2963 "migrating lockres, clearing\n", dlm->name, in dlm_remove_nonlocal_locks()
2964 res->lockname.len, res->lockname.name, bit); in dlm_remove_nonlocal_locks()
2985 assert_spin_locked(&dlm->spinlock); in dlm_pick_migration_target()
2986 assert_spin_locked(&res->spinlock); in dlm_pick_migration_target()
2992 if (lock->ml.node == dlm->node_num) in dlm_pick_migration_target()
2994 if (test_bit(lock->ml.node, dlm->exit_domain_map)) in dlm_pick_migration_target()
2996 nodenum = lock->ml.node; in dlm_pick_migration_target()
3002 noderef = -1; in dlm_pick_migration_target()
3004 noderef = find_next_bit(res->refmap, O2NM_MAX_NODES, in dlm_pick_migration_target()
3008 if (noderef == dlm->node_num) in dlm_pick_migration_target()
3010 if (test_bit(noderef, dlm->exit_domain_map)) in dlm_pick_migration_target()
3032 migrate.namelen = res->lockname.len; in dlm_do_migrate_request()
3033 memcpy(migrate.name, res->lockname.name, migrate.namelen); in dlm_do_migrate_request()
3039 /* send message to all nodes, except the master and myself */ in dlm_do_migrate_request()
3046 spin_lock(&dlm->spinlock); in dlm_do_migrate_request()
3047 skip = (!test_bit(nodenum, dlm->domain_map)); in dlm_do_migrate_request()
3048 spin_unlock(&dlm->spinlock); in dlm_do_migrate_request()
3050 clear_bit(nodenum, iter->node_map); in dlm_do_migrate_request()
3054 ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key, in dlm_do_migrate_request()
3058 mlog(ML_ERROR, "%s: res %.*s, Error %d send " in dlm_do_migrate_request()
3059 "MIGRATE_REQUEST to node %u\n", dlm->name, in dlm_do_migrate_request()
3065 clear_bit(nodenum, iter->node_map); in dlm_do_migrate_request()
3072 /* during the migration request we short-circuited in dlm_do_migrate_request()
3076 dlm->name, res->lockname.len, res->lockname.name, in dlm_do_migrate_request()
3078 spin_lock(&res->spinlock); in dlm_do_migrate_request()
3080 spin_unlock(&res->spinlock); in dlm_do_migrate_request()
3097 * the migration and this should be the only one found for those scanning the
3104 struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf; in dlm_migrate_request_handler()
3113 name = migrate->name; in dlm_migrate_request_handler()
3114 namelen = migrate->namelen; in dlm_migrate_request_handler()
3121 ret = -ENOMEM; in dlm_migrate_request_handler()
3125 /* check for pre-existing lock */ in dlm_migrate_request_handler()
3126 spin_lock(&dlm->spinlock); in dlm_migrate_request_handler()
3129 spin_lock(&res->spinlock); in dlm_migrate_request_handler()
3130 if (res->state & DLM_LOCK_RES_RECOVERING) { in dlm_migrate_request_handler()
3134 spin_unlock(&res->spinlock); in dlm_migrate_request_handler()
3138 ret = -EINVAL; /* need a better solution */ in dlm_migrate_request_handler()
3141 res->state |= DLM_LOCK_RES_MIGRATING; in dlm_migrate_request_handler()
3142 spin_unlock(&res->spinlock); in dlm_migrate_request_handler()
3145 spin_lock(&dlm->master_lock); in dlm_migrate_request_handler()
3149 migrate->new_master, in dlm_migrate_request_handler()
3150 migrate->master); in dlm_migrate_request_handler()
3155 spin_unlock(&dlm->master_lock); in dlm_migrate_request_handler()
3157 spin_unlock(&dlm->spinlock); in dlm_migrate_request_handler()
3172 /* must be holding dlm->spinlock and dlm->master_lock
3173 * when adding a migration mle, we can clear any other mles
3177 * the new migration mle. this way we can hold with the rule
3191 assert_spin_locked(&dlm->spinlock); in dlm_add_migration_mle()
3192 assert_spin_locked(&dlm->master_lock); in dlm_add_migration_mle()
3198 spin_lock(&tmp->spinlock); in dlm_add_migration_mle()
3199 if (tmp->type == DLM_MLE_MIGRATION) { in dlm_add_migration_mle()
3200 if (master == dlm->node_num) { in dlm_add_migration_mle()
3205 spin_unlock(&tmp->spinlock); in dlm_add_migration_mle()
3206 return -EEXIST; in dlm_add_migration_mle()
3209 mlog(ML_ERROR, "migration error mle: " in dlm_add_migration_mle()
3213 tmp->master, tmp->new_master, in dlm_add_migration_mle()
3220 tmp->master = master; in dlm_add_migration_mle()
3221 atomic_set(&tmp->woken, 1); in dlm_add_migration_mle()
3222 wake_up(&tmp->wq); in dlm_add_migration_mle()
3226 if (tmp->type == DLM_MLE_MASTER) { in dlm_add_migration_mle()
3231 "migration\n", dlm->name, in dlm_add_migration_mle()
3236 spin_unlock(&tmp->spinlock); in dlm_add_migration_mle()
3239 /* now add a migration mle to the tail of the list */ in dlm_add_migration_mle()
3241 mle->new_master = new_master; in dlm_add_migration_mle()
3244 mle->master = master; in dlm_add_migration_mle()
3246 set_bit(new_master, mle->maybe_map); in dlm_add_migration_mle()
3261 res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen, in dlm_reset_mleres_owner()
3262 mle->mnamehash); in dlm_reset_mleres_owner()
3264 spin_unlock(&dlm->master_lock); in dlm_reset_mleres_owner()
3267 spin_lock(&res->spinlock); in dlm_reset_mleres_owner()
3270 spin_unlock(&res->spinlock); in dlm_reset_mleres_owner()
3277 spin_lock(&dlm->master_lock); in dlm_reset_mleres_owner()
3279 spin_unlock(&dlm->master_lock); in dlm_reset_mleres_owner()
3290 spin_lock(&mle->spinlock); in dlm_clean_migration_mle()
3292 atomic_set(&mle->woken, 1); in dlm_clean_migration_mle()
3293 spin_unlock(&mle->spinlock); in dlm_clean_migration_mle()
3295 wake_up(&mle->wq); in dlm_clean_migration_mle()
3303 BUG_ON(mle->type != DLM_MLE_BLOCK); in dlm_clean_block_mle()
3305 spin_lock(&mle->spinlock); in dlm_clean_block_mle()
3306 bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES); in dlm_clean_block_mle()
3310 spin_unlock(&mle->spinlock); in dlm_clean_block_mle()
3317 atomic_set(&mle->woken, 1); in dlm_clean_block_mle()
3318 spin_unlock(&mle->spinlock); in dlm_clean_block_mle()
3319 wake_up(&mle->wq); in dlm_clean_block_mle()
3335 mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node); in dlm_clean_master_list()
3337 assert_spin_locked(&dlm->spinlock); in dlm_clean_master_list()
3340 spin_lock(&dlm->master_lock); in dlm_clean_master_list()
3344 BUG_ON(mle->type != DLM_MLE_BLOCK && in dlm_clean_master_list()
3345 mle->type != DLM_MLE_MASTER && in dlm_clean_master_list()
3346 mle->type != DLM_MLE_MIGRATION); in dlm_clean_master_list()
3351 if (mle->type == DLM_MLE_MASTER) in dlm_clean_master_list()
3357 if (mle->type == DLM_MLE_BLOCK) { in dlm_clean_master_list()
3362 /* Everything else is a MIGRATION mle */ in dlm_clean_master_list()
3364 /* The rule for MIGRATION mles is that the master in dlm_clean_master_list()
3373 if (mle->master != dead_node && in dlm_clean_master_list()
3374 mle->new_master != dead_node) in dlm_clean_master_list()
3377 if (mle->new_master == dead_node && mle->inuse) { in dlm_clean_master_list()
3379 "migration from %u, the MLE is " in dlm_clean_master_list()
3381 dlm->name, dead_node, in dlm_clean_master_list()
3382 mle->master); in dlm_clean_master_list()
3390 mlog(0, "%s: node %u died during migration from " in dlm_clean_master_list()
3391 "%u to %u!\n", dlm->name, dead_node, mle->master, in dlm_clean_master_list()
3392 mle->new_master); in dlm_clean_master_list()
3408 spin_unlock(&dlm->master_lock); in dlm_clean_master_list()
3417 spin_lock(&dlm->spinlock); in dlm_finish_migration()
3418 dlm_node_iter_init(dlm->domain_map, &iter); in dlm_finish_migration()
3420 clear_bit(dlm->node_num, iter.node_map); in dlm_finish_migration()
3421 spin_unlock(&dlm->spinlock); in dlm_finish_migration()
3425 * a reference after the migration completes */ in dlm_finish_migration()
3426 spin_lock(&res->spinlock); in dlm_finish_migration()
3428 spin_unlock(&res->spinlock); in dlm_finish_migration()
3432 dlm->node_num, &iter); in dlm_finish_migration()
3439 res->lockname.len, res->lockname.name); in dlm_finish_migration()
3453 res->lockname.len, res->lockname.name, old_master); in dlm_finish_migration()
3465 spin_lock(&res->spinlock); in dlm_finish_migration()
3466 dlm_set_lockres_owner(dlm, res, dlm->node_num); in dlm_finish_migration()
3467 res->state &= ~DLM_LOCK_RES_MIGRATING; in dlm_finish_migration()
3468 spin_unlock(&res->spinlock); in dlm_finish_migration()
3469 /* re-dirty it on the new master */ in dlm_finish_migration()
3471 wake_up(&res->wq); in dlm_finish_migration()
3478 * this is integral to migration
3487 assert_spin_locked(&res->spinlock); in __dlm_lockres_reserve_ast()
3488 if (res->state & DLM_LOCK_RES_MIGRATING) { in __dlm_lockres_reserve_ast()
3491 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); in __dlm_lockres_reserve_ast()
3493 atomic_inc(&res->asts_reserved); in __dlm_lockres_reserve_ast()
3500 * also, if there is a pending migration on this lockres,
3503 * this is how we ensure that migration can proceed with no
3512 if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock)) in dlm_lockres_release_ast()
3515 if (!res->migration_pending) { in dlm_lockres_release_ast()
3516 spin_unlock(&res->spinlock); in dlm_lockres_release_ast()
3520 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); in dlm_lockres_release_ast()
3521 res->migration_pending = 0; in dlm_lockres_release_ast()
3522 res->state |= DLM_LOCK_RES_MIGRATING; in dlm_lockres_release_ast()
3523 spin_unlock(&res->spinlock); in dlm_lockres_release_ast()
3524 wake_up(&res->wq); in dlm_lockres_release_ast()
3525 wake_up(&dlm->migration_wq); in dlm_lockres_release_ast()
3541 spin_lock(&dlm->spinlock); in dlm_force_free_mles()
3542 spin_lock(&dlm->master_lock); in dlm_force_free_mles()
3544 BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING); in dlm_force_free_mles()
3545 BUG_ON((find_first_bit(dlm->domain_map, O2NM_MAX_NODES) < O2NM_MAX_NODES)); in dlm_force_free_mles()
3550 if (mle->type != DLM_MLE_BLOCK) { in dlm_force_free_mles()
3554 atomic_set(&mle->woken, 1); in dlm_force_free_mles()
3555 wake_up(&mle->wq); in dlm_force_free_mles()
3562 spin_unlock(&dlm->master_lock); in dlm_force_free_mles()
3563 spin_unlock(&dlm->spinlock); in dlm_force_free_mles()