Lines Matching +full:reset +full:- +full:pin +full:- +full:assert +full:- +full:time +full:- +full:ms

1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* -*- mode: c; c-basic-offset: 8; -*-
60 if (dlm != mle->dlm) in dlm_mle_equal()
63 if (namelen != mle->mnamelen || in dlm_mle_equal()
64 memcmp(name, mle->mname, namelen) != 0) in dlm_mle_equal()
120 case -EBADF: in dlm_is_host_down()
121 case -ECONNREFUSED: in dlm_is_host_down()
122 case -ENOTCONN: in dlm_is_host_down()
123 case -ECONNRESET: in dlm_is_host_down()
124 case -EPIPE: in dlm_is_host_down()
125 case -EHOSTDOWN: in dlm_is_host_down()
126 case -EHOSTUNREACH: in dlm_is_host_down()
127 case -ETIMEDOUT: in dlm_is_host_down()
128 case -ECONNABORTED: in dlm_is_host_down()
129 case -ENETDOWN: in dlm_is_host_down()
130 case -ENETUNREACH: in dlm_is_host_down()
131 case -ENETRESET: in dlm_is_host_down()
132 case -ESHUTDOWN: in dlm_is_host_down()
133 case -ENOPROTOOPT: in dlm_is_host_down()
134 case -EINVAL: /* if returned from our tcp code, in dlm_is_host_down()
153 * when it is created, and since the dlm->spinlock is held at
154 * that time, any heartbeat event will be properly discovered
156 * dlm->mle_hb_events list as soon as heartbeat events are no
166 assert_spin_locked(&dlm->spinlock); in __dlm_mle_attach_hb_events()
168 list_add_tail(&mle->hb_events, &dlm->mle_hb_events); in __dlm_mle_attach_hb_events()
175 if (!list_empty(&mle->hb_events)) in __dlm_mle_detach_hb_events()
176 list_del_init(&mle->hb_events); in __dlm_mle_detach_hb_events()
183 spin_lock(&dlm->spinlock); in dlm_mle_detach_hb_events()
185 spin_unlock(&dlm->spinlock); in dlm_mle_detach_hb_events()
191 dlm = mle->dlm; in dlm_get_mle_inuse()
193 assert_spin_locked(&dlm->spinlock); in dlm_get_mle_inuse()
194 assert_spin_locked(&dlm->master_lock); in dlm_get_mle_inuse()
195 mle->inuse++; in dlm_get_mle_inuse()
196 kref_get(&mle->mle_refs); in dlm_get_mle_inuse()
202 dlm = mle->dlm; in dlm_put_mle_inuse()
204 spin_lock(&dlm->spinlock); in dlm_put_mle_inuse()
205 spin_lock(&dlm->master_lock); in dlm_put_mle_inuse()
206 mle->inuse--; in dlm_put_mle_inuse()
208 spin_unlock(&dlm->master_lock); in dlm_put_mle_inuse()
209 spin_unlock(&dlm->spinlock); in dlm_put_mle_inuse()
217 dlm = mle->dlm; in __dlm_put_mle()
219 assert_spin_locked(&dlm->spinlock); in __dlm_put_mle()
220 assert_spin_locked(&dlm->master_lock); in __dlm_put_mle()
221 if (!kref_read(&mle->mle_refs)) { in __dlm_put_mle()
228 kref_put(&mle->mle_refs, dlm_mle_release); in __dlm_put_mle()
236 dlm = mle->dlm; in dlm_put_mle()
238 spin_lock(&dlm->spinlock); in dlm_put_mle()
239 spin_lock(&dlm->master_lock); in dlm_put_mle()
241 spin_unlock(&dlm->master_lock); in dlm_put_mle()
242 spin_unlock(&dlm->spinlock); in dlm_put_mle()
247 kref_get(&mle->mle_refs); in dlm_get_mle()
257 assert_spin_locked(&dlm->spinlock); in dlm_init_mle()
259 mle->dlm = dlm; in dlm_init_mle()
260 mle->type = type; in dlm_init_mle()
261 INIT_HLIST_NODE(&mle->master_hash_node); in dlm_init_mle()
262 INIT_LIST_HEAD(&mle->hb_events); in dlm_init_mle()
263 memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); in dlm_init_mle()
264 spin_lock_init(&mle->spinlock); in dlm_init_mle()
265 init_waitqueue_head(&mle->wq); in dlm_init_mle()
266 atomic_set(&mle->woken, 0); in dlm_init_mle()
267 kref_init(&mle->mle_refs); in dlm_init_mle()
268 memset(mle->response_map, 0, sizeof(mle->response_map)); in dlm_init_mle()
269 mle->master = O2NM_MAX_NODES; in dlm_init_mle()
270 mle->new_master = O2NM_MAX_NODES; in dlm_init_mle()
271 mle->inuse = 0; in dlm_init_mle()
273 BUG_ON(mle->type != DLM_MLE_BLOCK && in dlm_init_mle()
274 mle->type != DLM_MLE_MASTER && in dlm_init_mle()
275 mle->type != DLM_MLE_MIGRATION); in dlm_init_mle()
277 if (mle->type == DLM_MLE_MASTER) { in dlm_init_mle()
279 mle->mleres = res; in dlm_init_mle()
280 memcpy(mle->mname, res->lockname.name, res->lockname.len); in dlm_init_mle()
281 mle->mnamelen = res->lockname.len; in dlm_init_mle()
282 mle->mnamehash = res->lockname.hash; in dlm_init_mle()
285 mle->mleres = NULL; in dlm_init_mle()
286 memcpy(mle->mname, name, namelen); in dlm_init_mle()
287 mle->mnamelen = namelen; in dlm_init_mle()
288 mle->mnamehash = dlm_lockid_hash(name, namelen); in dlm_init_mle()
291 atomic_inc(&dlm->mle_tot_count[mle->type]); in dlm_init_mle()
292 atomic_inc(&dlm->mle_cur_count[mle->type]); in dlm_init_mle()
295 memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map)); in dlm_init_mle()
296 memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map)); in dlm_init_mle()
297 clear_bit(dlm->node_num, mle->vote_map); in dlm_init_mle()
298 clear_bit(dlm->node_num, mle->node_map); in dlm_init_mle()
306 assert_spin_locked(&dlm->spinlock); in __dlm_unlink_mle()
307 assert_spin_locked(&dlm->master_lock); in __dlm_unlink_mle()
309 if (!hlist_unhashed(&mle->master_hash_node)) in __dlm_unlink_mle()
310 hlist_del_init(&mle->master_hash_node); in __dlm_unlink_mle()
317 assert_spin_locked(&dlm->master_lock); in __dlm_insert_mle()
319 bucket = dlm_master_hash(dlm, mle->mnamehash); in __dlm_insert_mle()
320 hlist_add_head(&mle->master_hash_node, bucket); in __dlm_insert_mle()
332 assert_spin_locked(&dlm->master_lock); in dlm_find_mle()
350 assert_spin_locked(&dlm->spinlock); in dlm_hb_event_notify_attached()
352 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) { in dlm_hb_event_notify_attached()
364 spin_lock(&mle->spinlock); in dlm_mle_node_down()
366 if (!test_bit(idx, mle->node_map)) in dlm_mle_node_down()
369 clear_bit(idx, mle->node_map); in dlm_mle_node_down()
371 spin_unlock(&mle->spinlock); in dlm_mle_node_down()
378 spin_lock(&mle->spinlock); in dlm_mle_node_up()
380 if (test_bit(idx, mle->node_map)) in dlm_mle_node_up()
383 set_bit(idx, mle->node_map); in dlm_mle_node_up()
385 spin_unlock(&mle->spinlock); in dlm_mle_node_up()
396 return -ENOMEM; in dlm_init_mle_cache()
411 dlm = mle->dlm; in dlm_mle_release()
413 assert_spin_locked(&dlm->spinlock); in dlm_mle_release()
414 assert_spin_locked(&dlm->master_lock); in dlm_mle_release()
416 mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname, in dlm_mle_release()
417 mle->type); in dlm_mle_release()
425 atomic_dec(&dlm->mle_cur_count[mle->type]); in dlm_mle_release()
454 return -ENOMEM; in dlm_init_master_caches()
472 dlm = res->dlm; in dlm_lockres_release()
474 /* This should not happen -- all lockres' have a name in dlm_lockres_release()
475 * associated with them at init time. */ in dlm_lockres_release()
476 BUG_ON(!res->lockname.name); in dlm_lockres_release()
478 mlog(0, "destroying lockres %.*s\n", res->lockname.len, in dlm_lockres_release()
479 res->lockname.name); in dlm_lockres_release()
481 atomic_dec(&dlm->res_cur_count); in dlm_lockres_release()
483 if (!hlist_unhashed(&res->hash_node) || in dlm_lockres_release()
484 !list_empty(&res->granted) || in dlm_lockres_release()
485 !list_empty(&res->converting) || in dlm_lockres_release()
486 !list_empty(&res->blocked) || in dlm_lockres_release()
487 !list_empty(&res->dirty) || in dlm_lockres_release()
488 !list_empty(&res->recovering) || in dlm_lockres_release()
489 !list_empty(&res->purge)) { in dlm_lockres_release()
493 res->lockname.len, res->lockname.name, in dlm_lockres_release()
494 !hlist_unhashed(&res->hash_node) ? 'H' : ' ', in dlm_lockres_release()
495 !list_empty(&res->granted) ? 'G' : ' ', in dlm_lockres_release()
496 !list_empty(&res->converting) ? 'C' : ' ', in dlm_lockres_release()
497 !list_empty(&res->blocked) ? 'B' : ' ', in dlm_lockres_release()
498 !list_empty(&res->dirty) ? 'D' : ' ', in dlm_lockres_release()
499 !list_empty(&res->recovering) ? 'R' : ' ', in dlm_lockres_release()
500 !list_empty(&res->purge) ? 'P' : ' '); in dlm_lockres_release()
505 /* By the time we're ready to blow this guy away, we shouldn't in dlm_lockres_release()
507 BUG_ON(!hlist_unhashed(&res->hash_node)); in dlm_lockres_release()
508 BUG_ON(!list_empty(&res->granted)); in dlm_lockres_release()
509 BUG_ON(!list_empty(&res->converting)); in dlm_lockres_release()
510 BUG_ON(!list_empty(&res->blocked)); in dlm_lockres_release()
511 BUG_ON(!list_empty(&res->dirty)); in dlm_lockres_release()
512 BUG_ON(!list_empty(&res->recovering)); in dlm_lockres_release()
513 BUG_ON(!list_empty(&res->purge)); in dlm_lockres_release()
515 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name); in dlm_lockres_release()
522 kref_put(&res->refs, dlm_lockres_release); in dlm_lockres_put()
532 * res->lockname.name, so be sure to init every field in dlm_init_lockres()
535 qname = (char *) res->lockname.name; in dlm_init_lockres()
538 res->lockname.len = namelen; in dlm_init_lockres()
539 res->lockname.hash = dlm_lockid_hash(name, namelen); in dlm_init_lockres()
541 init_waitqueue_head(&res->wq); in dlm_init_lockres()
542 spin_lock_init(&res->spinlock); in dlm_init_lockres()
543 INIT_HLIST_NODE(&res->hash_node); in dlm_init_lockres()
544 INIT_LIST_HEAD(&res->granted); in dlm_init_lockres()
545 INIT_LIST_HEAD(&res->converting); in dlm_init_lockres()
546 INIT_LIST_HEAD(&res->blocked); in dlm_init_lockres()
547 INIT_LIST_HEAD(&res->dirty); in dlm_init_lockres()
548 INIT_LIST_HEAD(&res->recovering); in dlm_init_lockres()
549 INIT_LIST_HEAD(&res->purge); in dlm_init_lockres()
550 INIT_LIST_HEAD(&res->tracking); in dlm_init_lockres()
551 atomic_set(&res->asts_reserved, 0); in dlm_init_lockres()
552 res->migration_pending = 0; in dlm_init_lockres()
553 res->inflight_locks = 0; in dlm_init_lockres()
554 res->inflight_assert_workers = 0; in dlm_init_lockres()
556 res->dlm = dlm; in dlm_init_lockres()
558 kref_init(&res->refs); in dlm_init_lockres()
560 atomic_inc(&dlm->res_tot_count); in dlm_init_lockres()
561 atomic_inc(&dlm->res_cur_count); in dlm_init_lockres()
564 spin_lock(&res->spinlock); in dlm_init_lockres()
566 spin_unlock(&res->spinlock); in dlm_init_lockres()
568 res->state = DLM_LOCK_RES_IN_PROGRESS; in dlm_init_lockres()
570 res->last_used = 0; in dlm_init_lockres()
572 spin_lock(&dlm->track_lock); in dlm_init_lockres()
573 list_add_tail(&res->tracking, &dlm->tracking_list); in dlm_init_lockres()
574 spin_unlock(&dlm->track_lock); in dlm_init_lockres()
576 memset(res->lvb, 0, DLM_LVB_LEN); in dlm_init_lockres()
577 memset(res->refmap, 0, sizeof(res->refmap)); in dlm_init_lockres()
590 res->lockname.name = kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS); in dlm_new_lockres()
591 if (!res->lockname.name) in dlm_new_lockres()
606 assert_spin_locked(&res->spinlock); in dlm_lockres_set_refmap_bit()
608 mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len, in dlm_lockres_set_refmap_bit()
609 res->lockname.name, bit, __builtin_return_address(0)); in dlm_lockres_set_refmap_bit()
611 set_bit(bit, res->refmap); in dlm_lockres_set_refmap_bit()
617 assert_spin_locked(&res->spinlock); in dlm_lockres_clear_refmap_bit()
619 mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len, in dlm_lockres_clear_refmap_bit()
620 res->lockname.name, bit, __builtin_return_address(0)); in dlm_lockres_clear_refmap_bit()
622 clear_bit(bit, res->refmap); in dlm_lockres_clear_refmap_bit()
628 res->inflight_locks++; in __dlm_lockres_grab_inflight_ref()
630 mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name, in __dlm_lockres_grab_inflight_ref()
631 res->lockname.len, res->lockname.name, res->inflight_locks, in __dlm_lockres_grab_inflight_ref()
638 assert_spin_locked(&res->spinlock); in dlm_lockres_grab_inflight_ref()
645 assert_spin_locked(&res->spinlock); in dlm_lockres_drop_inflight_ref()
647 BUG_ON(res->inflight_locks == 0); in dlm_lockres_drop_inflight_ref()
649 res->inflight_locks--; in dlm_lockres_drop_inflight_ref()
651 mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name, in dlm_lockres_drop_inflight_ref()
652 res->lockname.len, res->lockname.name, res->inflight_locks, in dlm_lockres_drop_inflight_ref()
655 wake_up(&res->wq); in dlm_lockres_drop_inflight_ref()
661 assert_spin_locked(&res->spinlock); in __dlm_lockres_grab_inflight_worker()
662 res->inflight_assert_workers++; in __dlm_lockres_grab_inflight_worker()
663 mlog(0, "%s:%.*s: inflight assert worker++: now %u\n", in __dlm_lockres_grab_inflight_worker()
664 dlm->name, res->lockname.len, res->lockname.name, in __dlm_lockres_grab_inflight_worker()
665 res->inflight_assert_workers); in __dlm_lockres_grab_inflight_worker()
671 assert_spin_locked(&res->spinlock); in __dlm_lockres_drop_inflight_worker()
672 BUG_ON(res->inflight_assert_workers == 0); in __dlm_lockres_drop_inflight_worker()
673 res->inflight_assert_workers--; in __dlm_lockres_drop_inflight_worker()
674 mlog(0, "%s:%.*s: inflight assert worker--: now %u\n", in __dlm_lockres_drop_inflight_worker()
675 dlm->name, res->lockname.len, res->lockname.name, in __dlm_lockres_drop_inflight_worker()
676 res->inflight_assert_workers); in __dlm_lockres_drop_inflight_worker()
682 spin_lock(&res->spinlock); in dlm_lockres_drop_inflight_worker()
684 spin_unlock(&res->spinlock); in dlm_lockres_drop_inflight_worker()
695 * also, do a lookup in the dlm->master_list to see
725 spin_lock(&dlm->spinlock); in dlm_get_lock_resource()
728 spin_unlock(&dlm->spinlock); in dlm_get_lock_resource()
729 spin_lock(&tmpres->spinlock); in dlm_get_lock_resource()
736 if (hlist_unhashed(&tmpres->hash_node)) { in dlm_get_lock_resource()
737 spin_unlock(&tmpres->spinlock); in dlm_get_lock_resource()
744 if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { in dlm_get_lock_resource()
746 BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN); in dlm_get_lock_resource()
747 spin_unlock(&tmpres->spinlock); in dlm_get_lock_resource()
754 if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) { in dlm_get_lock_resource()
755 BUG_ON(tmpres->owner == dlm->node_num); in dlm_get_lock_resource()
758 spin_unlock(&tmpres->spinlock); in dlm_get_lock_resource()
764 /* Grab inflight ref to pin the resource */ in dlm_get_lock_resource()
767 spin_unlock(&tmpres->spinlock); in dlm_get_lock_resource()
769 spin_lock(&dlm->track_lock); in dlm_get_lock_resource()
770 if (!list_empty(&res->tracking)) in dlm_get_lock_resource()
771 list_del_init(&res->tracking); in dlm_get_lock_resource()
775 res->lockname.len, in dlm_get_lock_resource()
776 res->lockname.name); in dlm_get_lock_resource()
777 spin_unlock(&dlm->track_lock); in dlm_get_lock_resource()
785 spin_unlock(&dlm->spinlock); in dlm_get_lock_resource()
802 spin_lock(&res->spinlock); in dlm_get_lock_resource()
803 dlm_change_lockres_owner(dlm, res, dlm->node_num); in dlm_get_lock_resource()
806 spin_unlock(&res->spinlock); in dlm_get_lock_resource()
807 spin_unlock(&dlm->spinlock); in dlm_get_lock_resource()
813 spin_lock(&dlm->master_lock); in dlm_get_lock_resource()
819 if (mle->type == DLM_MLE_MASTER) { in dlm_get_lock_resource()
823 mig = (mle->type == DLM_MLE_MIGRATION); in dlm_get_lock_resource()
832 if (mig || mle->master != O2NM_MAX_NODES) { in dlm_get_lock_resource()
833 BUG_ON(mig && mle->master == dlm->node_num); in dlm_get_lock_resource()
837 dlm->name, namelen, lockid, in dlm_get_lock_resource()
839 spin_unlock(&dlm->master_lock); in dlm_get_lock_resource()
840 spin_unlock(&dlm->spinlock); in dlm_get_lock_resource()
859 set_bit(dlm->node_num, mle->maybe_map); in dlm_get_lock_resource()
866 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); in dlm_get_lock_resource()
870 dlm->name, namelen, (char *)lockid, bit); in dlm_get_lock_resource()
888 * ref at this time in the assert master handler, so we in dlm_get_lock_resource()
891 spin_unlock(&dlm->master_lock); in dlm_get_lock_resource()
892 spin_unlock(&dlm->spinlock); in dlm_get_lock_resource()
901 "master $RECOVERY lock now\n", dlm->name); in dlm_get_lock_resource()
905 mlog(0, "%s: waiting 500ms for heartbeat state " in dlm_get_lock_resource()
906 "change\n", dlm->name); in dlm_get_lock_resource()
916 spin_lock(&dlm->spinlock); in dlm_get_lock_resource()
917 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); in dlm_get_lock_resource()
921 dlm->name, namelen, (char *)lockid, bit); in dlm_get_lock_resource()
925 spin_unlock(&dlm->spinlock); in dlm_get_lock_resource()
935 ret = -EINVAL; in dlm_get_lock_resource()
936 dlm_node_iter_init(mle->vote_map, &iter); in dlm_get_lock_resource()
941 if (mle->master != O2NM_MAX_NODES) { in dlm_get_lock_resource()
943 if (mle->master <= nodenum) in dlm_get_lock_resource()
950 "master is %u, keep going\n", dlm->name, namelen, in dlm_get_lock_resource()
951 lockid, nodenum, mle->master); in dlm_get_lock_resource()
961 "request now, blocked=%d\n", dlm->name, res->lockname.len, in dlm_get_lock_resource()
962 res->lockname.name, blocked); in dlm_get_lock_resource()
966 dlm->name, res->lockname.len, in dlm_get_lock_resource()
967 res->lockname.name, blocked); in dlm_get_lock_resource()
975 mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len, in dlm_get_lock_resource()
976 res->lockname.name, res->owner); in dlm_get_lock_resource()
978 BUG_ON(res->owner == O2NM_MAX_NODES); in dlm_get_lock_resource()
987 spin_lock(&res->spinlock); in dlm_get_lock_resource()
988 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; in dlm_get_lock_resource()
989 spin_unlock(&res->spinlock); in dlm_get_lock_resource()
990 wake_up(&res->wq); in dlm_get_lock_resource()
1011 int assert, sleep; in dlm_wait_for_lock_mastery() local
1015 assert = 0; in dlm_wait_for_lock_mastery()
1018 spin_lock(&res->spinlock); in dlm_wait_for_lock_mastery()
1019 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { in dlm_wait_for_lock_mastery()
1020 mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name, in dlm_wait_for_lock_mastery()
1021 res->lockname.len, res->lockname.name, res->owner); in dlm_wait_for_lock_mastery()
1022 spin_unlock(&res->spinlock); in dlm_wait_for_lock_mastery()
1023 /* this will cause the master to re-assert across in dlm_wait_for_lock_mastery()
1025 if (res->owner != dlm->node_num) { in dlm_wait_for_lock_mastery()
1026 ret = dlm_do_master_request(res, mle, res->owner); in dlm_wait_for_lock_mastery()
1029 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret); in dlm_wait_for_lock_mastery()
1037 spin_unlock(&res->spinlock); in dlm_wait_for_lock_mastery()
1039 spin_lock(&mle->spinlock); in dlm_wait_for_lock_mastery()
1040 m = mle->master; in dlm_wait_for_lock_mastery()
1041 map_changed = (memcmp(mle->vote_map, mle->node_map, in dlm_wait_for_lock_mastery()
1042 sizeof(mle->vote_map)) != 0); in dlm_wait_for_lock_mastery()
1043 voting_done = (memcmp(mle->vote_map, mle->response_map, in dlm_wait_for_lock_mastery()
1044 sizeof(mle->vote_map)) == 0); in dlm_wait_for_lock_mastery()
1050 dlm->name, res->lockname.len, res->lockname.name); in dlm_wait_for_lock_mastery()
1052 b = (mle->type == DLM_MLE_BLOCK); in dlm_wait_for_lock_mastery()
1055 dlm->name, res->lockname.len, res->lockname.name, in dlm_wait_for_lock_mastery()
1059 spin_unlock(&mle->spinlock); in dlm_wait_for_lock_mastery()
1065 "rechecking now\n", dlm->name, res->lockname.len, in dlm_wait_for_lock_mastery()
1066 res->lockname.name); in dlm_wait_for_lock_mastery()
1071 "for %s:%.*s\n", dlm->name, res->lockname.len, in dlm_wait_for_lock_mastery()
1072 res->lockname.name); in dlm_wait_for_lock_mastery()
1077 /* another node has done an assert! in dlm_wait_for_lock_mastery()
1084 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); in dlm_wait_for_lock_mastery()
1085 if (dlm->node_num <= bit) { in dlm_wait_for_lock_mastery()
1089 mle->master = dlm->node_num; in dlm_wait_for_lock_mastery()
1092 assert = 1; in dlm_wait_for_lock_mastery()
1096 * an assert master yet, we must sleep */ in dlm_wait_for_lock_mastery()
1100 spin_unlock(&mle->spinlock); in dlm_wait_for_lock_mastery()
1105 atomic_set(&mle->woken, 0); in dlm_wait_for_lock_mastery()
1106 (void)wait_event_timeout(mle->wq, in dlm_wait_for_lock_mastery()
1107 (atomic_read(&mle->woken) == 1), in dlm_wait_for_lock_mastery()
1109 if (res->owner == O2NM_MAX_NODES) { in dlm_wait_for_lock_mastery()
1110 mlog(0, "%s:%.*s: waiting again\n", dlm->name, in dlm_wait_for_lock_mastery()
1111 res->lockname.len, res->lockname.name); in dlm_wait_for_lock_mastery()
1114 mlog(0, "done waiting, master is %u\n", res->owner); in dlm_wait_for_lock_mastery()
1120 if (assert) { in dlm_wait_for_lock_mastery()
1121 m = dlm->node_num; in dlm_wait_for_lock_mastery()
1123 res->lockname.len, res->lockname.name, m); in dlm_wait_for_lock_mastery()
1124 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0); in dlm_wait_for_lock_mastery()
1139 spin_lock(&res->spinlock); in dlm_wait_for_lock_mastery()
1143 spin_unlock(&res->spinlock); in dlm_wait_for_lock_mastery()
1159 NODE_DOWN = -1,
1171 iter->curnode = -1; in dlm_bitmap_diff_iter_init()
1172 iter->orig_bm = orig_bm; in dlm_bitmap_diff_iter_init()
1173 iter->cur_bm = cur_bm; in dlm_bitmap_diff_iter_init()
1176 p1 = *(iter->orig_bm + i); in dlm_bitmap_diff_iter_init()
1177 p2 = *(iter->cur_bm + i); in dlm_bitmap_diff_iter_init()
1178 iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1); in dlm_bitmap_diff_iter_init()
1187 if (iter->curnode >= O2NM_MAX_NODES) in dlm_bitmap_diff_iter_next()
1188 return -ENOENT; in dlm_bitmap_diff_iter_next()
1190 bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES, in dlm_bitmap_diff_iter_next()
1191 iter->curnode+1); in dlm_bitmap_diff_iter_next()
1193 iter->curnode = O2NM_MAX_NODES; in dlm_bitmap_diff_iter_next()
1194 return -ENOENT; in dlm_bitmap_diff_iter_next()
1198 if (test_bit(bit, iter->orig_bm)) in dlm_bitmap_diff_iter_next()
1203 iter->curnode = bit; in dlm_bitmap_diff_iter_next()
1221 assert_spin_locked(&mle->spinlock); in dlm_restart_lock_mastery()
1223 dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map); in dlm_restart_lock_mastery()
1234 clear_bit(node, mle->response_map); in dlm_restart_lock_mastery()
1235 set_bit(node, mle->vote_map); in dlm_restart_lock_mastery()
1239 int lowest = find_next_bit(mle->maybe_map, in dlm_restart_lock_mastery()
1243 clear_bit(node, mle->maybe_map); in dlm_restart_lock_mastery()
1249 lowest = find_next_bit(mle->maybe_map, in dlm_restart_lock_mastery()
1255 "now\n", dlm->name, in dlm_restart_lock_mastery()
1256 res->lockname.len, in dlm_restart_lock_mastery()
1257 res->lockname.name, in dlm_restart_lock_mastery()
1271 dlm->name, in dlm_restart_lock_mastery()
1272 res->lockname.len, in dlm_restart_lock_mastery()
1273 res->lockname.name); in dlm_restart_lock_mastery()
1274 mle->type = DLM_MLE_MASTER; in dlm_restart_lock_mastery()
1275 mle->mleres = res; in dlm_restart_lock_mastery()
1282 memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); in dlm_restart_lock_mastery()
1283 memset(mle->response_map, 0, sizeof(mle->response_map)); in dlm_restart_lock_mastery()
1284 /* reset the vote_map to the current node_map */ in dlm_restart_lock_mastery()
1285 memcpy(mle->vote_map, mle->node_map, in dlm_restart_lock_mastery()
1286 sizeof(mle->node_map)); in dlm_restart_lock_mastery()
1288 if (mle->type != DLM_MLE_BLOCK) in dlm_restart_lock_mastery()
1289 set_bit(dlm->node_num, mle->maybe_map); in dlm_restart_lock_mastery()
1291 ret = -EAGAIN; in dlm_restart_lock_mastery()
1302 * -errno on a network error
1311 struct dlm_ctxt *dlm = mle->dlm; in dlm_do_master_request()
1316 request.node_idx = dlm->node_num; in dlm_do_master_request()
1318 BUG_ON(mle->type == DLM_MLE_MIGRATION); in dlm_do_master_request()
1320 request.namelen = (u8)mle->mnamelen; in dlm_do_master_request()
1321 memcpy(request.name, mle->mname, request.namelen); in dlm_do_master_request()
1324 ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request, in dlm_do_master_request()
1327 if (ret == -ESRCH) { in dlm_do_master_request()
1331 } else if (ret == -EINVAL) { in dlm_do_master_request()
1334 } else if (ret == -ENOMEM) { in dlm_do_master_request()
1354 spin_lock(&mle->spinlock); in dlm_do_master_request()
1357 set_bit(to, mle->response_map); in dlm_do_master_request()
1360 "reference\n", dlm->name, res->lockname.len, in dlm_do_master_request()
1361 res->lockname.name, to); in dlm_do_master_request()
1362 mle->master = to; in dlm_do_master_request()
1366 set_bit(to, mle->response_map); in dlm_do_master_request()
1370 set_bit(to, mle->response_map); in dlm_do_master_request()
1371 set_bit(to, mle->maybe_map); in dlm_do_master_request()
1382 spin_unlock(&mle->spinlock); in dlm_do_master_request()
1395 * dlm->spinlock
1396 * res->spinlock
1397 * mle->spinlock
1398 * dlm->master_list
1408 struct dlm_master_request *request = (struct dlm_master_request *) msg->buf; in dlm_master_request_handler()
1425 name = request->name; in dlm_master_request_handler()
1426 namelen = request->namelen; in dlm_master_request_handler()
1435 spin_lock(&dlm->spinlock); in dlm_master_request_handler()
1438 spin_unlock(&dlm->spinlock); in dlm_master_request_handler()
1441 spin_lock(&res->spinlock); in dlm_master_request_handler()
1448 if (hlist_unhashed(&res->hash_node)) { in dlm_master_request_handler()
1449 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1454 if (res->state & (DLM_LOCK_RES_RECOVERING| in dlm_master_request_handler()
1456 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1465 if (res->owner == dlm->node_num) { in dlm_master_request_handler()
1466 dlm_lockres_set_refmap_bit(dlm, res, request->node_idx); in dlm_master_request_handler()
1467 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1480 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { in dlm_master_request_handler()
1481 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1482 // mlog(0, "node %u is the master\n", res->owner); in dlm_master_request_handler()
1492 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { in dlm_master_request_handler()
1494 "in-progress!\n"); in dlm_master_request_handler()
1499 spin_lock(&dlm->master_lock); in dlm_master_request_handler()
1506 spin_lock(&tmpmle->spinlock); in dlm_master_request_handler()
1507 if (tmpmle->type == DLM_MLE_BLOCK) { in dlm_master_request_handler()
1511 } else if (tmpmle->type == DLM_MLE_MIGRATION) { in dlm_master_request_handler()
1513 "node %u.\n", tmpmle->master, tmpmle->new_master); in dlm_master_request_handler()
1514 if (tmpmle->master == dlm->node_num) { in dlm_master_request_handler()
1517 tmpmle->new_master); in dlm_master_request_handler()
1523 } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) { in dlm_master_request_handler()
1525 if (tmpmle->master == dlm->node_num) { in dlm_master_request_handler()
1532 request->node_idx); in dlm_master_request_handler()
1541 set_bit(request->node_idx, tmpmle->maybe_map); in dlm_master_request_handler()
1542 spin_unlock(&tmpmle->spinlock); in dlm_master_request_handler()
1544 spin_unlock(&dlm->master_lock); in dlm_master_request_handler()
1545 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1560 spin_lock(&dlm->master_lock); in dlm_master_request_handler()
1566 spin_unlock(&dlm->master_lock); in dlm_master_request_handler()
1567 spin_unlock(&dlm->spinlock); in dlm_master_request_handler()
1572 mlog_errno(-ENOMEM); in dlm_master_request_handler()
1578 // mlog(0, "this is second time thru, already allocated, " in dlm_master_request_handler()
1581 set_bit(request->node_idx, mle->maybe_map); in dlm_master_request_handler()
1585 spin_lock(&tmpmle->spinlock); in dlm_master_request_handler()
1586 if (tmpmle->master == dlm->node_num) { in dlm_master_request_handler()
1590 if (tmpmle->type == DLM_MLE_BLOCK) in dlm_master_request_handler()
1592 else if (tmpmle->type == DLM_MLE_MIGRATION) { in dlm_master_request_handler()
1593 mlog(0, "migration mle was found (%u->%u)\n", in dlm_master_request_handler()
1594 tmpmle->master, tmpmle->new_master); in dlm_master_request_handler()
1599 set_bit(request->node_idx, tmpmle->maybe_map); in dlm_master_request_handler()
1600 spin_unlock(&tmpmle->spinlock); in dlm_master_request_handler()
1602 spin_unlock(&dlm->master_lock); in dlm_master_request_handler()
1603 spin_unlock(&dlm->spinlock); in dlm_master_request_handler()
1618 dlm->node_num, res->lockname.len, res->lockname.name); in dlm_master_request_handler()
1619 spin_lock(&res->spinlock); in dlm_master_request_handler()
1620 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, in dlm_master_request_handler()
1623 mlog(ML_ERROR, "failed to dispatch assert master work\n"); in dlm_master_request_handler()
1625 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1630 spin_unlock(&res->spinlock); in dlm_master_request_handler()
1650 * and re-assert across the cluster...
1656 struct dlm_assert_master assert; in dlm_do_assert_master() local
1661 const char *lockname = res->lockname.name; in dlm_do_assert_master()
1662 unsigned int namelen = res->lockname.len; in dlm_do_assert_master()
1666 spin_lock(&res->spinlock); in dlm_do_assert_master()
1667 res->state |= DLM_LOCK_RES_SETREF_INPROG; in dlm_do_assert_master()
1668 spin_unlock(&res->spinlock); in dlm_do_assert_master()
1679 mlog(0, "sending assert master to %d (%.*s)\n", to, in dlm_do_assert_master()
1681 memset(&assert, 0, sizeof(assert)); in dlm_do_assert_master()
1682 assert.node_idx = dlm->node_num; in dlm_do_assert_master()
1683 assert.namelen = namelen; in dlm_do_assert_master()
1684 memcpy(assert.name, lockname, namelen); in dlm_do_assert_master()
1685 assert.flags = cpu_to_be32(flags); in dlm_do_assert_master()
1687 tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key, in dlm_do_assert_master()
1688 &assert, sizeof(assert), to, &r); in dlm_do_assert_master()
1692 DLM_ASSERT_MASTER_MSG, dlm->key, to); in dlm_do_assert_master()
1704 mlog(ML_ERROR,"during assert master of %.*s to %u, " in dlm_do_assert_master()
1706 spin_lock(&dlm->spinlock); in dlm_do_assert_master()
1707 spin_lock(&dlm->master_lock); in dlm_do_assert_master()
1713 spin_unlock(&dlm->master_lock); in dlm_do_assert_master()
1714 spin_unlock(&dlm->spinlock); in dlm_do_assert_master()
1727 "nodes and requests a re-assert\n", in dlm_do_assert_master()
1735 spin_lock(&res->spinlock); in dlm_do_assert_master()
1737 spin_unlock(&res->spinlock); in dlm_do_assert_master()
1744 spin_lock(&res->spinlock); in dlm_do_assert_master()
1745 res->state &= ~DLM_LOCK_RES_SETREF_INPROG; in dlm_do_assert_master()
1746 spin_unlock(&res->spinlock); in dlm_do_assert_master()
1747 wake_up(&res->wq); in dlm_do_assert_master()
1754 * dlm->spinlock
1755 * res->spinlock
1756 * mle->spinlock
1757 * dlm->master_list
1766 struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf; in dlm_assert_master_handler() local
1777 name = assert->name; in dlm_assert_master_handler()
1778 namelen = assert->namelen; in dlm_assert_master_handler()
1780 flags = be32_to_cpu(assert->flags); in dlm_assert_master_handler()
1787 spin_lock(&dlm->spinlock); in dlm_assert_master_handler()
1793 spin_lock(&dlm->master_lock); in dlm_assert_master_handler()
1795 /* not an error, could be master just re-asserting */ in dlm_assert_master_handler()
1797 "MLE for it! (%.*s)\n", assert->node_idx, in dlm_assert_master_handler()
1800 int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0); in dlm_assert_master_handler()
1803 * could be master just re-asserting. */ in dlm_assert_master_handler()
1805 "is asserting! (%.*s)\n", assert->node_idx, in dlm_assert_master_handler()
1807 } else if (bit != assert->node_idx) { in dlm_assert_master_handler()
1810 "back off\n", assert->node_idx, bit); in dlm_assert_master_handler()
1819 assert->node_idx, namelen, name, bit, in dlm_assert_master_handler()
1820 assert->node_idx); in dlm_assert_master_handler()
1823 if (mle->type == DLM_MLE_MIGRATION) { in dlm_assert_master_handler()
1825 mlog(0, "%s:%.*s: got cleanup assert" in dlm_assert_master_handler()
1827 dlm->name, namelen, name, in dlm_assert_master_handler()
1828 assert->node_idx); in dlm_assert_master_handler()
1830 mlog(0, "%s:%.*s: got unrelated assert" in dlm_assert_master_handler()
1832 dlm->name, namelen, name, in dlm_assert_master_handler()
1833 assert->node_idx); in dlm_assert_master_handler()
1835 spin_unlock(&dlm->master_lock); in dlm_assert_master_handler()
1836 spin_unlock(&dlm->spinlock); in dlm_assert_master_handler()
1841 spin_unlock(&dlm->master_lock); in dlm_assert_master_handler()
1847 spin_lock(&res->spinlock); in dlm_assert_master_handler()
1848 if (res->state & DLM_LOCK_RES_RECOVERING) { in dlm_assert_master_handler()
1850 "RECOVERING!\n", assert->node_idx, namelen, name); in dlm_assert_master_handler()
1854 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN && in dlm_assert_master_handler()
1855 res->owner != assert->node_idx) { in dlm_assert_master_handler()
1856 mlog(ML_ERROR, "DIE! Mastery assert from %u, " in dlm_assert_master_handler()
1858 assert->node_idx, res->owner, namelen, in dlm_assert_master_handler()
1863 } else if (mle->type != DLM_MLE_MIGRATION) { in dlm_assert_master_handler()
1864 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { in dlm_assert_master_handler()
1865 /* owner is just re-asserting */ in dlm_assert_master_handler()
1866 if (res->owner == assert->node_idx) { in dlm_assert_master_handler()
1867 mlog(0, "owner %u re-asserting on " in dlm_assert_master_handler()
1868 "lock %.*s\n", assert->node_idx, in dlm_assert_master_handler()
1874 "(%.*s)\n", assert->node_idx, in dlm_assert_master_handler()
1875 res->owner, namelen, name); in dlm_assert_master_handler()
1878 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { in dlm_assert_master_handler()
1879 mlog(ML_ERROR, "got assert from %u, but lock " in dlm_assert_master_handler()
1881 "in-progress! (%.*s)\n", in dlm_assert_master_handler()
1882 assert->node_idx, in dlm_assert_master_handler()
1886 } else /* mle->type == DLM_MLE_MIGRATION */ { in dlm_assert_master_handler()
1887 /* should only be getting an assert from new master */ in dlm_assert_master_handler()
1888 if (assert->node_idx != mle->new_master) { in dlm_assert_master_handler()
1889 mlog(ML_ERROR, "got assert from %u, but " in dlm_assert_master_handler()
1892 assert->node_idx, mle->new_master, in dlm_assert_master_handler()
1893 mle->master, namelen, name); in dlm_assert_master_handler()
1899 spin_unlock(&res->spinlock); in dlm_assert_master_handler()
1903 // assert->node_idx); in dlm_assert_master_handler()
1906 int nn = -1; in dlm_assert_master_handler()
1909 spin_lock(&mle->spinlock); in dlm_assert_master_handler()
1910 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) in dlm_assert_master_handler()
1914 * then the calling node needs to re-assert to clear in dlm_assert_master_handler()
1916 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, in dlm_assert_master_handler()
1918 if (nn != dlm->node_num && nn != assert->node_idx) { in dlm_assert_master_handler()
1924 mle->master = assert->node_idx; in dlm_assert_master_handler()
1925 atomic_set(&mle->woken, 1); in dlm_assert_master_handler()
1926 wake_up(&mle->wq); in dlm_assert_master_handler()
1927 spin_unlock(&mle->spinlock); in dlm_assert_master_handler()
1931 spin_lock(&res->spinlock); in dlm_assert_master_handler()
1932 if (mle->type == DLM_MLE_MIGRATION) { in dlm_assert_master_handler()
1935 res->lockname.len, res->lockname.name, in dlm_assert_master_handler()
1936 dlm->node_num, mle->new_master); in dlm_assert_master_handler()
1937 res->state &= ~DLM_LOCK_RES_MIGRATING; in dlm_assert_master_handler()
1939 dlm_change_lockres_owner(dlm, res, mle->new_master); in dlm_assert_master_handler()
1940 BUG_ON(res->state & DLM_LOCK_RES_DIRTY); in dlm_assert_master_handler()
1942 dlm_change_lockres_owner(dlm, res, mle->master); in dlm_assert_master_handler()
1944 spin_unlock(&res->spinlock); in dlm_assert_master_handler()
1947 wake_up(&res->wq); in dlm_assert_master_handler()
1953 spin_lock(&dlm->master_lock); in dlm_assert_master_handler()
1955 rr = kref_read(&mle->mle_refs); in dlm_assert_master_handler()
1956 if (mle->inuse > 0) { in dlm_assert_master_handler()
1968 mlog(ML_ERROR, "%s:%.*s: got assert master from %u " in dlm_assert_master_handler()
1970 "inuse=%d\n", dlm->name, namelen, name, in dlm_assert_master_handler()
1971 assert->node_idx, rr, extra_ref, mle->inuse); in dlm_assert_master_handler()
1978 /* the assert master message now balances the extra in dlm_assert_master_handler()
1984 spin_unlock(&dlm->master_lock); in dlm_assert_master_handler()
1986 if (res->owner != assert->node_idx) { in dlm_assert_master_handler()
1988 "owner is %u (%.*s), no mle\n", assert->node_idx, in dlm_assert_master_handler()
1989 res->owner, namelen, name); in dlm_assert_master_handler()
1992 spin_unlock(&dlm->spinlock); in dlm_assert_master_handler()
1997 spin_lock(&res->spinlock); in dlm_assert_master_handler()
1998 res->state |= DLM_LOCK_RES_SETREF_INPROG; in dlm_assert_master_handler()
1999 spin_unlock(&res->spinlock); in dlm_assert_master_handler()
2008 mlog(ML_ERROR, "strange, got assert from %u, MASTER " in dlm_assert_master_handler()
2010 assert->node_idx, dlm->name, namelen, name); in dlm_assert_master_handler()
2016 mlog(0, "%s:%.*s: got assert from %u, need a ref\n", in dlm_assert_master_handler()
2017 dlm->name, namelen, name, assert->node_idx); in dlm_assert_master_handler()
2026 spin_unlock(&res->spinlock); in dlm_assert_master_handler()
2027 spin_lock(&dlm->master_lock); in dlm_assert_master_handler()
2030 spin_unlock(&dlm->master_lock); in dlm_assert_master_handler()
2031 spin_unlock(&dlm->spinlock); in dlm_assert_master_handler()
2034 return -EINVAL; in dlm_assert_master_handler()
2042 spin_lock(&res->spinlock); in dlm_assert_master_post_handler()
2043 res->state &= ~DLM_LOCK_RES_SETREF_INPROG; in dlm_assert_master_post_handler()
2044 spin_unlock(&res->spinlock); in dlm_assert_master_post_handler()
2045 wake_up(&res->wq); in dlm_assert_master_post_handler()
2058 return -ENOMEM; in dlm_dispatch_assert_master()
2063 item->u.am.lockres = res; /* already have a ref */ in dlm_dispatch_assert_master()
2065 item->u.am.ignore_higher = ignore_higher; in dlm_dispatch_assert_master()
2066 item->u.am.request_from = request_from; in dlm_dispatch_assert_master()
2067 item->u.am.flags = flags; in dlm_dispatch_assert_master()
2070 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, in dlm_dispatch_assert_master()
2071 res->lockname.name); in dlm_dispatch_assert_master()
2073 spin_lock(&dlm->work_lock); in dlm_dispatch_assert_master()
2074 list_add_tail(&item->list, &dlm->work_list); in dlm_dispatch_assert_master()
2075 spin_unlock(&dlm->work_lock); in dlm_dispatch_assert_master()
2077 queue_work(dlm->dlm_worker, &dlm->dispatched_work); in dlm_dispatch_assert_master()
2092 dlm = item->dlm; in dlm_assert_master_worker()
2093 res = item->u.am.lockres; in dlm_assert_master_worker()
2094 ignore_higher = item->u.am.ignore_higher; in dlm_assert_master_worker()
2095 request_from = item->u.am.request_from; in dlm_assert_master_worker()
2096 flags = item->u.am.flags; in dlm_assert_master_worker()
2098 spin_lock(&dlm->spinlock); in dlm_assert_master_worker()
2099 memcpy(nodemap, dlm->domain_map, sizeof(nodemap)); in dlm_assert_master_worker()
2100 spin_unlock(&dlm->spinlock); in dlm_assert_master_worker()
2102 clear_bit(dlm->node_num, nodemap); in dlm_assert_master_worker()
2108 bit = dlm->node_num; in dlm_assert_master_worker()
2120 * longer allowed to assert out own mastery. OTOH, we need to in dlm_assert_master_worker()
2124 spin_lock(&res->spinlock); in dlm_assert_master_worker()
2125 if (res->state & DLM_LOCK_RES_MIGRATING) { in dlm_assert_master_worker()
2126 mlog(0, "Someone asked us to assert mastery, but we're " in dlm_assert_master_worker()
2127 "in the middle of migration. Skipping assert, " in dlm_assert_master_worker()
2129 spin_unlock(&res->spinlock); in dlm_assert_master_worker()
2133 spin_unlock(&res->spinlock); in dlm_assert_master_worker()
2138 res->lockname.len, res->lockname.name, dlm->node_num); in dlm_assert_master_worker()
2159 * lockres because this lockres is used to kick off recovery! ;-)
2160 * So, do a pre-check on all living nodes to see if any of those nodes
2162 * we wait a short time to allow that node to get notified by its own
2175 spin_lock(&dlm->spinlock); in dlm_pre_master_reco_lockres()
2176 dlm_node_iter_init(dlm->domain_map, &iter); in dlm_pre_master_reco_lockres()
2177 spin_unlock(&dlm->spinlock); in dlm_pre_master_reco_lockres()
2181 if (nodenum == dlm->node_num) in dlm_pre_master_reco_lockres()
2195 spin_lock(&dlm->spinlock); in dlm_pre_master_reco_lockres()
2196 if (test_bit(master, dlm->recovery_map)) { in dlm_pre_master_reco_lockres()
2200 "lock. must wait.\n", dlm->name, in dlm_pre_master_reco_lockres()
2202 ret = -EAGAIN; in dlm_pre_master_reco_lockres()
2204 spin_unlock(&dlm->spinlock); in dlm_pre_master_reco_lockres()
2205 mlog(0, "%s: reco lock master is %u\n", dlm->name, in dlm_pre_master_reco_lockres()
2224 lockname = res->lockname.name; in dlm_drop_lockres_ref()
2225 namelen = res->lockname.len; in dlm_drop_lockres_ref()
2229 deref.node_idx = dlm->node_num; in dlm_drop_lockres_ref()
2233 ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key, in dlm_drop_lockres_ref()
2234 &deref, sizeof(deref), res->owner, &r); in dlm_drop_lockres_ref()
2237 dlm->name, namelen, lockname, ret, res->owner); in dlm_drop_lockres_ref()
2241 dlm->name, namelen, lockname, res->owner, r); in dlm_drop_lockres_ref()
2243 if (r == -ENOMEM) in dlm_drop_lockres_ref()
2255 struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf; in dlm_deref_lockres_handler()
2259 int ret = -EINVAL; in dlm_deref_lockres_handler()
2269 name = deref->name; in dlm_deref_lockres_handler()
2270 namelen = deref->namelen; in dlm_deref_lockres_handler()
2271 node = deref->node_idx; in dlm_deref_lockres_handler()
2277 if (deref->node_idx >= O2NM_MAX_NODES) { in dlm_deref_lockres_handler()
2284 spin_lock(&dlm->spinlock); in dlm_deref_lockres_handler()
2287 spin_unlock(&dlm->spinlock); in dlm_deref_lockres_handler()
2289 dlm->name, namelen, name); in dlm_deref_lockres_handler()
2292 spin_unlock(&dlm->spinlock); in dlm_deref_lockres_handler()
2294 spin_lock(&res->spinlock); in dlm_deref_lockres_handler()
2295 if (res->state & DLM_LOCK_RES_SETREF_INPROG) in dlm_deref_lockres_handler()
2298 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); in dlm_deref_lockres_handler()
2299 if (test_bit(node, res->refmap)) { in dlm_deref_lockres_handler()
2304 spin_unlock(&res->spinlock); in dlm_deref_lockres_handler()
2311 "but it is already dropped!\n", dlm->name, in dlm_deref_lockres_handler()
2312 res->lockname.len, res->lockname.name, node); in dlm_deref_lockres_handler()
2321 ret = -ENOMEM; in dlm_deref_lockres_handler()
2327 item->u.dl.deref_res = res; in dlm_deref_lockres_handler()
2328 item->u.dl.deref_node = node; in dlm_deref_lockres_handler()
2330 spin_lock(&dlm->work_lock); in dlm_deref_lockres_handler()
2331 list_add_tail(&item->list, &dlm->work_list); in dlm_deref_lockres_handler()
2332 spin_unlock(&dlm->work_lock); in dlm_deref_lockres_handler()
2334 queue_work(dlm->dlm_worker, &dlm->dispatched_work); in dlm_deref_lockres_handler()
2350 = (struct dlm_deref_lockres_done *)msg->buf; in dlm_deref_lockres_done_handler()
2354 int ret = -EINVAL; in dlm_deref_lockres_done_handler()
2361 name = deref->name; in dlm_deref_lockres_done_handler()
2362 namelen = deref->namelen; in dlm_deref_lockres_done_handler()
2363 node = deref->node_idx; in dlm_deref_lockres_done_handler()
2369 if (deref->node_idx >= O2NM_MAX_NODES) { in dlm_deref_lockres_done_handler()
2376 spin_lock(&dlm->spinlock); in dlm_deref_lockres_done_handler()
2379 spin_unlock(&dlm->spinlock); in dlm_deref_lockres_done_handler()
2381 dlm->name, namelen, name); in dlm_deref_lockres_done_handler()
2385 spin_lock(&res->spinlock); in dlm_deref_lockres_done_handler()
2386 if (!(res->state & DLM_LOCK_RES_DROPPING_REF)) { in dlm_deref_lockres_done_handler()
2387 spin_unlock(&res->spinlock); in dlm_deref_lockres_done_handler()
2388 spin_unlock(&dlm->spinlock); in dlm_deref_lockres_done_handler()
2390 "but it is already derefed!\n", dlm->name, in dlm_deref_lockres_done_handler()
2391 res->lockname.len, res->lockname.name, node); in dlm_deref_lockres_done_handler()
2397 spin_unlock(&res->spinlock); in dlm_deref_lockres_done_handler()
2398 wake_up(&res->wq); in dlm_deref_lockres_done_handler()
2400 spin_unlock(&dlm->spinlock); in dlm_deref_lockres_done_handler()
2418 lockname = res->lockname.name; in dlm_drop_lockres_ref_done()
2419 namelen = res->lockname.len; in dlm_drop_lockres_ref_done()
2423 deref.node_idx = dlm->node_num; in dlm_drop_lockres_ref_done()
2427 ret = o2net_send_message(DLM_DEREF_LOCKRES_DONE, dlm->key, in dlm_drop_lockres_ref_done()
2431 " to node %u\n", dlm->name, namelen, in dlm_drop_lockres_ref_done()
2436 dlm->name, namelen, lockname, node, r); in dlm_drop_lockres_ref_done()
2448 dlm = item->dlm; in dlm_deref_lockres_worker()
2449 res = item->u.dl.deref_res; in dlm_deref_lockres_worker()
2450 node = item->u.dl.deref_node; in dlm_deref_lockres_worker()
2452 spin_lock(&res->spinlock); in dlm_deref_lockres_worker()
2453 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); in dlm_deref_lockres_worker()
2455 if (test_bit(node, res->refmap)) { in dlm_deref_lockres_worker()
2459 spin_unlock(&res->spinlock); in dlm_deref_lockres_worker()
2465 dlm->name, res->lockname.len, res->lockname.name, node); in dlm_deref_lockres_worker()
2469 "but it is already dropped!\n", dlm->name, in dlm_deref_lockres_worker()
2470 res->lockname.len, res->lockname.name, node); in dlm_deref_lockres_worker()
2481 * 3. one or more non-local locks, or, one or more references
2493 assert_spin_locked(&res->spinlock); in dlm_is_lockres_migratable()
2496 if (res->state & DLM_LOCK_RES_MIGRATING) in dlm_is_lockres_migratable()
2500 if (res->state & (DLM_LOCK_RES_RECOVERING| in dlm_is_lockres_migratable()
2504 if (res->owner != dlm->node_num) in dlm_is_lockres_migratable()
2510 if (lock->ml.node != dlm->node_num) { in dlm_is_lockres_migratable()
2514 cookie = be64_to_cpu(lock->ml.cookie); in dlm_is_lockres_migratable()
2516 "%s list\n", dlm->name, res->lockname.len, in dlm_is_lockres_migratable()
2517 res->lockname.name, in dlm_is_lockres_migratable()
2526 node_ref = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); in dlm_is_lockres_migratable()
2531 mlog(0, "%s: res %.*s, Migratable\n", dlm->name, res->lockname.len, in dlm_is_lockres_migratable()
2532 res->lockname.name); in dlm_is_lockres_migratable()
2555 return -EINVAL; in dlm_migrate_lockres()
2557 name = res->lockname.name; in dlm_migrate_lockres()
2558 namelen = res->lockname.len; in dlm_migrate_lockres()
2560 mlog(0, "%s: Migrating %.*s to node %u\n", dlm->name, namelen, name, in dlm_migrate_lockres()
2564 ret = -ENOMEM; in dlm_migrate_lockres()
2582 spin_lock(&dlm->spinlock); in dlm_migrate_lockres()
2583 spin_lock(&dlm->master_lock); in dlm_migrate_lockres()
2585 namelen, target, dlm->node_num); in dlm_migrate_lockres()
2590 if (ret != -EEXIST) in dlm_migrate_lockres()
2593 spin_unlock(&dlm->master_lock); in dlm_migrate_lockres()
2594 spin_unlock(&dlm->spinlock); in dlm_migrate_lockres()
2596 if (ret == -EEXIST) { in dlm_migrate_lockres()
2604 * if we fail after this we need to re-dirty the lockres in dlm_migrate_lockres()
2608 "the target went down.\n", res->lockname.len, in dlm_migrate_lockres()
2609 res->lockname.name, target); in dlm_migrate_lockres()
2610 spin_lock(&res->spinlock); in dlm_migrate_lockres()
2611 res->state &= ~DLM_LOCK_RES_MIGRATING; in dlm_migrate_lockres()
2613 spin_unlock(&res->spinlock); in dlm_migrate_lockres()
2614 ret = -EINVAL; in dlm_migrate_lockres()
2618 if (ret != -EEXIST && oldmle) { in dlm_migrate_lockres()
2644 flush_workqueue(dlm->dlm_worker); in dlm_migrate_lockres()
2660 spin_lock(&res->spinlock); in dlm_migrate_lockres()
2661 res->state &= ~DLM_LOCK_RES_MIGRATING; in dlm_migrate_lockres()
2663 spin_unlock(&res->spinlock); in dlm_migrate_lockres()
2673 * node now waits for target to do an assert master. this node in dlm_migrate_lockres()
2681 /* wait for new node to assert master */ in dlm_migrate_lockres()
2683 ret = wait_event_interruptible_timeout(mle->wq, in dlm_migrate_lockres()
2684 (atomic_read(&mle->woken) == 1), in dlm_migrate_lockres()
2688 if (atomic_read(&mle->woken) == 1 || in dlm_migrate_lockres()
2689 res->owner == target) in dlm_migrate_lockres()
2693 dlm->name, res->lockname.len, res->lockname.name); in dlm_migrate_lockres()
2699 dlm->name, res->lockname.len, in dlm_migrate_lockres()
2700 res->lockname.name, target); in dlm_migrate_lockres()
2701 ret = -EINVAL; in dlm_migrate_lockres()
2706 spin_lock(&res->spinlock); in dlm_migrate_lockres()
2707 res->state &= ~DLM_LOCK_RES_MIGRATING; in dlm_migrate_lockres()
2709 spin_unlock(&res->spinlock); in dlm_migrate_lockres()
2714 dlm->name, res->lockname.len, res->lockname.name); in dlm_migrate_lockres()
2718 spin_lock(&res->spinlock); in dlm_migrate_lockres()
2720 res->state &= ~DLM_LOCK_RES_MIGRATING; in dlm_migrate_lockres()
2722 spin_unlock(&res->spinlock); in dlm_migrate_lockres()
2723 wake_up(&res->wq); in dlm_migrate_lockres()
2733 /* re-dirty the lockres if we failed */ in dlm_migrate_lockres()
2740 wake_up(&res->wq); in dlm_migrate_lockres()
2747 mlog(0, "%s: Migrating %.*s to %u, returns %d\n", dlm->name, namelen, in dlm_migrate_lockres()
2758 * will re-acquire before exit.
2760 * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped
2763 __must_hold(&dlm->spinlock) in dlm_empty_lockres()
2769 assert_spin_locked(&dlm->spinlock); in dlm_empty_lockres()
2771 spin_lock(&res->spinlock); in dlm_empty_lockres()
2774 spin_unlock(&res->spinlock); in dlm_empty_lockres()
2780 spin_unlock(&dlm->spinlock); in dlm_empty_lockres()
2785 dlm->name, res->lockname.len, res->lockname.name, in dlm_empty_lockres()
2787 spin_lock(&dlm->spinlock); in dlm_empty_lockres()
2795 spin_lock(&dlm->ast_lock); in dlm_lock_basts_flushed()
2796 spin_lock(&lock->spinlock); in dlm_lock_basts_flushed()
2797 ret = (list_empty(&lock->bast_list) && !lock->bast_pending); in dlm_lock_basts_flushed()
2798 spin_unlock(&lock->spinlock); in dlm_lock_basts_flushed()
2799 spin_unlock(&dlm->ast_lock); in dlm_lock_basts_flushed()
2808 spin_lock(&res->spinlock); in dlm_migration_can_proceed()
2809 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING); in dlm_migration_can_proceed()
2810 spin_unlock(&res->spinlock); in dlm_migration_can_proceed()
2814 spin_lock(&dlm->spinlock); in dlm_migration_can_proceed()
2815 if (!test_bit(mig_target, dlm->domain_map)) in dlm_migration_can_proceed()
2817 spin_unlock(&dlm->spinlock); in dlm_migration_can_proceed()
2825 spin_lock(&res->spinlock); in dlm_lockres_is_dirty()
2826 ret = !!(res->state & DLM_LOCK_RES_DIRTY); in dlm_lockres_is_dirty()
2827 spin_unlock(&res->spinlock); in dlm_lockres_is_dirty()
2839 res->lockname.len, res->lockname.name, dlm->node_num, in dlm_mark_lockres_migrating()
2843 spin_lock(&res->spinlock); in dlm_mark_lockres_migrating()
2844 BUG_ON(res->migration_pending); in dlm_mark_lockres_migrating()
2845 res->migration_pending = 1; in dlm_mark_lockres_migrating()
2849 spin_unlock(&res->spinlock); in dlm_mark_lockres_migrating()
2855 spin_lock(&res->spinlock); in dlm_mark_lockres_migrating()
2856 BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY); in dlm_mark_lockres_migrating()
2857 res->state |= DLM_LOCK_RES_BLOCK_DIRTY; in dlm_mark_lockres_migrating()
2858 spin_unlock(&res->spinlock); in dlm_mark_lockres_migrating()
2860 wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res)); in dlm_mark_lockres_migrating()
2864 res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no"); in dlm_mark_lockres_migrating()
2869 ret = wait_event_interruptible_timeout(dlm->migration_wq, in dlm_mark_lockres_migrating()
2874 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", in dlm_mark_lockres_migrating()
2875 test_bit(target, dlm->domain_map) ? "no":"yes"); in dlm_mark_lockres_migrating()
2878 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", in dlm_mark_lockres_migrating()
2879 test_bit(target, dlm->domain_map) ? "no":"yes"); in dlm_mark_lockres_migrating()
2888 spin_lock(&dlm->spinlock); in dlm_mark_lockres_migrating()
2889 if (!test_bit(target, dlm->domain_map)) { in dlm_mark_lockres_migrating()
2892 ret = -EHOSTDOWN; in dlm_mark_lockres_migrating()
2894 spin_unlock(&dlm->spinlock); in dlm_mark_lockres_migrating()
2901 spin_lock(&res->spinlock); in dlm_mark_lockres_migrating()
2902 BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY)); in dlm_mark_lockres_migrating()
2903 res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY; in dlm_mark_lockres_migrating()
2905 BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING)); in dlm_mark_lockres_migrating()
2907 res->migration_pending = 0; in dlm_mark_lockres_migrating()
2908 spin_unlock(&res->spinlock); in dlm_mark_lockres_migrating()
2927 struct list_head *queue = &res->granted; in dlm_remove_nonlocal_locks()
2931 assert_spin_locked(&res->spinlock); in dlm_remove_nonlocal_locks()
2933 BUG_ON(res->owner == dlm->node_num); in dlm_remove_nonlocal_locks()
2937 if (lock->ml.node != dlm->node_num) { in dlm_remove_nonlocal_locks()
2939 lock->ml.node); in dlm_remove_nonlocal_locks()
2941 BUG_ON(!list_empty(&lock->ast_list)); in dlm_remove_nonlocal_locks()
2942 BUG_ON(!list_empty(&lock->bast_list)); in dlm_remove_nonlocal_locks()
2943 BUG_ON(lock->ast_pending); in dlm_remove_nonlocal_locks()
2944 BUG_ON(lock->bast_pending); in dlm_remove_nonlocal_locks()
2946 lock->ml.node); in dlm_remove_nonlocal_locks()
2947 list_del_init(&lock->list); in dlm_remove_nonlocal_locks()
2958 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit); in dlm_remove_nonlocal_locks()
2963 if (bit != dlm->node_num) { in dlm_remove_nonlocal_locks()
2965 "migrating lockres, clearing\n", dlm->name, in dlm_remove_nonlocal_locks()
2966 res->lockname.len, res->lockname.name, bit); in dlm_remove_nonlocal_locks()
2982 struct list_head *queue = &res->granted; in dlm_pick_migration_target()
2987 assert_spin_locked(&dlm->spinlock); in dlm_pick_migration_target()
2988 assert_spin_locked(&res->spinlock); in dlm_pick_migration_target()
2994 if (lock->ml.node == dlm->node_num) in dlm_pick_migration_target()
2996 if (test_bit(lock->ml.node, dlm->exit_domain_map)) in dlm_pick_migration_target()
2998 nodenum = lock->ml.node; in dlm_pick_migration_target()
3004 noderef = -1; in dlm_pick_migration_target()
3006 noderef = find_next_bit(res->refmap, O2NM_MAX_NODES, in dlm_pick_migration_target()
3010 if (noderef == dlm->node_num) in dlm_pick_migration_target()
3012 if (test_bit(noderef, dlm->exit_domain_map)) in dlm_pick_migration_target()
3034 migrate.namelen = res->lockname.len; in dlm_do_migrate_request()
3035 memcpy(migrate.name, res->lockname.name, migrate.namelen); in dlm_do_migrate_request()
3048 spin_lock(&dlm->spinlock); in dlm_do_migrate_request()
3049 skip = (!test_bit(nodenum, dlm->domain_map)); in dlm_do_migrate_request()
3050 spin_unlock(&dlm->spinlock); in dlm_do_migrate_request()
3052 clear_bit(nodenum, iter->node_map); in dlm_do_migrate_request()
3056 ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key, in dlm_do_migrate_request()
3061 "MIGRATE_REQUEST to node %u\n", dlm->name, in dlm_do_migrate_request()
3067 clear_bit(nodenum, iter->node_map); in dlm_do_migrate_request()
3074 /* during the migration request we short-circuited in dlm_do_migrate_request()
3078 dlm->name, res->lockname.len, res->lockname.name, in dlm_do_migrate_request()
3080 spin_lock(&res->spinlock); in dlm_do_migrate_request()
3082 spin_unlock(&res->spinlock); in dlm_do_migrate_request()
3106 struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf; in dlm_migrate_request_handler()
3115 name = migrate->name; in dlm_migrate_request_handler()
3116 namelen = migrate->namelen; in dlm_migrate_request_handler()
3123 ret = -ENOMEM; in dlm_migrate_request_handler()
3127 /* check for pre-existing lock */ in dlm_migrate_request_handler()
3128 spin_lock(&dlm->spinlock); in dlm_migrate_request_handler()
3131 spin_lock(&res->spinlock); in dlm_migrate_request_handler()
3132 if (res->state & DLM_LOCK_RES_RECOVERING) { in dlm_migrate_request_handler()
3136 spin_unlock(&res->spinlock); in dlm_migrate_request_handler()
3140 ret = -EINVAL; /* need a better solution */ in dlm_migrate_request_handler()
3143 res->state |= DLM_LOCK_RES_MIGRATING; in dlm_migrate_request_handler()
3144 spin_unlock(&res->spinlock); in dlm_migrate_request_handler()
3147 spin_lock(&dlm->master_lock); in dlm_migrate_request_handler()
3151 migrate->new_master, in dlm_migrate_request_handler()
3152 migrate->master); in dlm_migrate_request_handler()
3157 spin_unlock(&dlm->master_lock); in dlm_migrate_request_handler()
3159 spin_unlock(&dlm->spinlock); in dlm_migrate_request_handler()
3174 /* must be holding dlm->spinlock and dlm->master_lock
3193 assert_spin_locked(&dlm->spinlock); in dlm_add_migration_mle()
3194 assert_spin_locked(&dlm->master_lock); in dlm_add_migration_mle()
3200 spin_lock(&tmp->spinlock); in dlm_add_migration_mle()
3201 if (tmp->type == DLM_MLE_MIGRATION) { in dlm_add_migration_mle()
3202 if (master == dlm->node_num) { in dlm_add_migration_mle()
3207 spin_unlock(&tmp->spinlock); in dlm_add_migration_mle()
3208 return -EEXIST; in dlm_add_migration_mle()
3215 tmp->master, tmp->new_master, in dlm_add_migration_mle()
3222 tmp->master = master; in dlm_add_migration_mle()
3223 atomic_set(&tmp->woken, 1); in dlm_add_migration_mle()
3224 wake_up(&tmp->wq); in dlm_add_migration_mle()
3228 if (tmp->type == DLM_MLE_MASTER) { in dlm_add_migration_mle()
3233 "migration\n", dlm->name, in dlm_add_migration_mle()
3238 spin_unlock(&tmp->spinlock); in dlm_add_migration_mle()
3243 mle->new_master = new_master; in dlm_add_migration_mle()
3244 /* the new master will be sending an assert master for this. in dlm_add_migration_mle()
3246 mle->master = master; in dlm_add_migration_mle()
3248 set_bit(new_master, mle->maybe_map); in dlm_add_migration_mle()
3263 res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen, in dlm_reset_mleres_owner()
3264 mle->mnamehash); in dlm_reset_mleres_owner()
3266 spin_unlock(&dlm->master_lock); in dlm_reset_mleres_owner()
3269 spin_lock(&res->spinlock); in dlm_reset_mleres_owner()
3272 spin_unlock(&res->spinlock); in dlm_reset_mleres_owner()
3279 spin_lock(&dlm->master_lock); in dlm_reset_mleres_owner()
3281 spin_unlock(&dlm->master_lock); in dlm_reset_mleres_owner()
3292 spin_lock(&mle->spinlock); in dlm_clean_migration_mle()
3294 atomic_set(&mle->woken, 1); in dlm_clean_migration_mle()
3295 spin_unlock(&mle->spinlock); in dlm_clean_migration_mle()
3297 wake_up(&mle->wq); in dlm_clean_migration_mle()
3305 BUG_ON(mle->type != DLM_MLE_BLOCK); in dlm_clean_block_mle()
3307 spin_lock(&mle->spinlock); in dlm_clean_block_mle()
3308 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); in dlm_clean_block_mle()
3312 spin_unlock(&mle->spinlock); in dlm_clean_block_mle()
3319 atomic_set(&mle->woken, 1); in dlm_clean_block_mle()
3320 spin_unlock(&mle->spinlock); in dlm_clean_block_mle()
3321 wake_up(&mle->wq); in dlm_clean_block_mle()
3337 mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node); in dlm_clean_master_list()
3339 assert_spin_locked(&dlm->spinlock); in dlm_clean_master_list()
3342 spin_lock(&dlm->master_lock); in dlm_clean_master_list()
3346 BUG_ON(mle->type != DLM_MLE_BLOCK && in dlm_clean_master_list()
3347 mle->type != DLM_MLE_MASTER && in dlm_clean_master_list()
3348 mle->type != DLM_MLE_MIGRATION); in dlm_clean_master_list()
3353 if (mle->type == DLM_MLE_MASTER) in dlm_clean_master_list()
3359 if (mle->type == DLM_MLE_BLOCK) { in dlm_clean_master_list()
3375 if (mle->master != dead_node && in dlm_clean_master_list()
3376 mle->new_master != dead_node) in dlm_clean_master_list()
3379 if (mle->new_master == dead_node && mle->inuse) { in dlm_clean_master_list()
3383 dlm->name, dead_node, in dlm_clean_master_list()
3384 mle->master); in dlm_clean_master_list()
3393 "%u to %u!\n", dlm->name, dead_node, mle->master, in dlm_clean_master_list()
3394 mle->new_master); in dlm_clean_master_list()
3410 spin_unlock(&dlm->master_lock); in dlm_clean_master_list()
3419 spin_lock(&dlm->spinlock); in dlm_finish_migration()
3420 dlm_node_iter_init(dlm->domain_map, &iter); in dlm_finish_migration()
3422 clear_bit(dlm->node_num, iter.node_map); in dlm_finish_migration()
3423 spin_unlock(&dlm->spinlock); in dlm_finish_migration()
3428 spin_lock(&res->spinlock); in dlm_finish_migration()
3430 spin_unlock(&res->spinlock); in dlm_finish_migration()
3432 mlog(0, "now time to do a migrate request to other nodes\n"); in dlm_finish_migration()
3434 dlm->node_num, &iter); in dlm_finish_migration()
3440 mlog(0, "doing assert master of %.*s to all except the original node\n", in dlm_finish_migration()
3441 res->lockname.len, res->lockname.name); in dlm_finish_migration()
3454 mlog(0, "doing assert master of %.*s back to %u\n", in dlm_finish_migration()
3455 res->lockname.len, res->lockname.name, old_master); in dlm_finish_migration()
3459 mlog(0, "assert master to original master failed " in dlm_finish_migration()
3467 spin_lock(&res->spinlock); in dlm_finish_migration()
3468 dlm_set_lockres_owner(dlm, res, dlm->node_num); in dlm_finish_migration()
3469 res->state &= ~DLM_LOCK_RES_MIGRATING; in dlm_finish_migration()
3470 spin_unlock(&res->spinlock); in dlm_finish_migration()
3471 /* re-dirty it on the new master */ in dlm_finish_migration()
3473 wake_up(&res->wq); in dlm_finish_migration()
3483 /* for future intent to call an ast, reserve one ahead of time.
3489 assert_spin_locked(&res->spinlock); in __dlm_lockres_reserve_ast()
3490 if (res->state & DLM_LOCK_RES_MIGRATING) { in __dlm_lockres_reserve_ast()
3493 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); in __dlm_lockres_reserve_ast()
3495 atomic_inc(&res->asts_reserved); in __dlm_lockres_reserve_ast()
3514 if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock)) in dlm_lockres_release_ast()
3517 if (!res->migration_pending) { in dlm_lockres_release_ast()
3518 spin_unlock(&res->spinlock); in dlm_lockres_release_ast()
3522 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); in dlm_lockres_release_ast()
3523 res->migration_pending = 0; in dlm_lockres_release_ast()
3524 res->state |= DLM_LOCK_RES_MIGRATING; in dlm_lockres_release_ast()
3525 spin_unlock(&res->spinlock); in dlm_lockres_release_ast()
3526 wake_up(&res->wq); in dlm_lockres_release_ast()
3527 wake_up(&dlm->migration_wq); in dlm_lockres_release_ast()
3543 spin_lock(&dlm->spinlock); in dlm_force_free_mles()
3544 spin_lock(&dlm->master_lock); in dlm_force_free_mles()
3546 BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING); in dlm_force_free_mles()
3547 BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES)); in dlm_force_free_mles()
3552 if (mle->type != DLM_MLE_BLOCK) { in dlm_force_free_mles()
3556 atomic_set(&mle->woken, 1); in dlm_force_free_mles()
3557 wake_up(&mle->wq); in dlm_force_free_mles()
3564 spin_unlock(&dlm->master_lock); in dlm_force_free_mles()
3565 spin_unlock(&dlm->spinlock); in dlm_force_free_mles()