Lines Matching +full:mode +full:- +full:recovery
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright 2004-2011 Red Hat, Inc.
20 #include "recovery.h"
26 * gfs2_update_stats - Update time based stats
49 s64 delta = sample - s->stats[index]; in gfs2_update_stats()
50 s->stats[index] += (delta >> 3); in gfs2_update_stats()
52 s->stats[index] += (s64)(abs(delta) - s->stats[index]) >> 2; in gfs2_update_stats()
56 * gfs2_update_reply_times - Update locking statistics
59 * This assumes that gl->gl_dstamp has been set earlier.
69 * TRY_1CB flags are set are classified as non-blocking. All
75 const unsigned gltype = gl->gl_name.ln_type; in gfs2_update_reply_times()
76 unsigned index = test_bit(GLF_BLOCKING, &gl->gl_flags) ? in gfs2_update_reply_times()
81 rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp)); in gfs2_update_reply_times()
82 lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats); in gfs2_update_reply_times()
83 gfs2_update_stats(&gl->gl_stats, index, rtt); /* Local */ in gfs2_update_reply_times()
84 gfs2_update_stats(&lks->lkstats[gltype], index, rtt); /* Global */ in gfs2_update_reply_times()
91 * gfs2_update_request_times - Update locking statistics
94 * The irt (lock inter-request times) measures the average time
102 const unsigned gltype = gl->gl_name.ln_type; in gfs2_update_request_times()
107 dstamp = gl->gl_dstamp; in gfs2_update_request_times()
108 gl->gl_dstamp = ktime_get_real(); in gfs2_update_request_times()
109 irt = ktime_to_ns(ktime_sub(gl->gl_dstamp, dstamp)); in gfs2_update_request_times()
110 lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats); in gfs2_update_request_times()
111 gfs2_update_stats(&gl->gl_stats, GFS2_LKS_SIRT, irt); /* Local */ in gfs2_update_request_times()
112 gfs2_update_stats(&lks->lkstats[gltype], GFS2_LKS_SIRT, irt); /* Global */ in gfs2_update_request_times()
119 unsigned ret = gl->gl_state; in gdlm_ast()
122 BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED); in gdlm_ast()
124 if ((gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID) && gl->gl_lksb.sb_lvbptr) in gdlm_ast()
125 memset(gl->gl_lksb.sb_lvbptr, 0, GDLM_LVB_SIZE); in gdlm_ast()
127 switch (gl->gl_lksb.sb_status) { in gdlm_ast()
128 case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */ in gdlm_ast()
129 if (gl->gl_ops->go_free) in gdlm_ast()
130 gl->gl_ops->go_free(gl); in gdlm_ast()
133 case -DLM_ECANCEL: /* Cancel while getting lock */ in gdlm_ast()
136 case -EAGAIN: /* Try lock fails */ in gdlm_ast()
137 case -EDEADLK: /* Deadlock detected */ in gdlm_ast()
139 case -ETIMEDOUT: /* Canceled due to timeout */ in gdlm_ast()
148 ret = gl->gl_req; in gdlm_ast()
149 if (gl->gl_lksb.sb_flags & DLM_SBF_ALTMODE) { in gdlm_ast()
150 if (gl->gl_req == LM_ST_SHARED) in gdlm_ast()
152 else if (gl->gl_req == LM_ST_DEFERRED) in gdlm_ast()
158 set_bit(GLF_INITIAL, &gl->gl_flags); in gdlm_ast()
162 if (!test_bit(GLF_INITIAL, &gl->gl_flags)) in gdlm_ast()
163 gl->gl_lksb.sb_lkid = 0; in gdlm_ast()
167 static void gdlm_bast(void *arg, int mode) in gdlm_bast() argument
171 switch (mode) { in gdlm_bast()
182 fs_err(gl->gl_name.ln_sbd, "unknown bast mode %d\n", mode); in gdlm_bast()
187 /* convert gfs lock-state to dlm lock-mode */
203 return -1; in make_mode()
211 if (gl->gl_lksb.sb_lvbptr) in make_flags()
236 if (gl->gl_lksb.sb_lkid != 0) { in make_flags()
238 if (test_bit(GLF_BLOCKING, &gl->gl_flags)) in make_flags()
249 *c-- = hex_asc[value & 0x0f]; in gfs2_reverse_hex()
257 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; in gdlm_lock()
262 req = make_mode(gl->gl_name.ln_sbd, req_state); in gdlm_lock()
266 if (gl->gl_lksb.sb_lkid) { in gdlm_lock()
269 memset(strname, ' ', GDLM_STRNAME_BYTES - 1); in gdlm_lock()
270 strname[GDLM_STRNAME_BYTES - 1] = '\0'; in gdlm_lock()
271 gfs2_reverse_hex(strname + 7, gl->gl_name.ln_type); in gdlm_lock()
272 gfs2_reverse_hex(strname + 23, gl->gl_name.ln_number); in gdlm_lock()
273 gl->gl_dstamp = ktime_get_real(); in gdlm_lock()
279 return dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname, in gdlm_lock()
280 GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast); in gdlm_lock()
285 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gdlm_put_lock()
286 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_put_lock()
290 if (gl->gl_lksb.sb_lkid == 0) { in gdlm_put_lock()
295 clear_bit(GLF_BLOCKING, &gl->gl_flags); in gdlm_put_lock()
302 if (gl->gl_lksb.sb_lvbptr && (gl->gl_state == LM_ST_EXCLUSIVE)) in gdlm_put_lock()
305 if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) && in gdlm_put_lock()
311 error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK, in gdlm_put_lock()
315 gl->gl_name.ln_type, in gdlm_put_lock()
316 (unsigned long long)gl->gl_name.ln_number, error); in gdlm_put_lock()
323 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; in gdlm_cancel()
324 dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl); in gdlm_cancel()
328 * dlm/gfs2 recovery coordination using dlm_recover callbacks
332 * 2. dlm_controld blocks dlm-kernel locking activity
333 * 3. dlm_controld within dlm-kernel notifies gfs2 (recover_prep)
334 * 4. dlm_controld starts and finishes its own user level recovery
335 * 5. dlm_controld starts dlm-kernel dlm_recoverd to do kernel recovery
337 * 7. dlm_recoverd does its own lock recovery
338 * 8. dlm_recoverd unblocks dlm-kernel locking activity
343 * 13. gfs2_recover provides recovery results to gfs2_control (recovery_result)
347 * - failures during recovery
362 * - more specific gfs2 steps in sequence above
374 * - parallel recovery steps across all nodes
388 * - is there a problem with clearing an lvb bit that should be set
389 * and missing a journal recovery?
400 * require recovery, because the mount in step 4 could not have
404 * and returning. The mount in step 4 waits until the recovery in
407 * - special case of first mounter: first node to mount the fs
410 * and recover any that need recovery before other nodes are allowed
420 * The mounted_lock is demoted to PR when first recovery is done, so
424 * mounter is doing first mount recovery of all journals.
425 * A mounting node needs to acquire control_lock in EX mode before
427 * the first mount recovery, blocking mounts from other nodes, then demotes
435 * do first mounter recovery
436 * mounted_lock EX->PR
437 * control_lock EX->NL, write lvb generation
440 * control_lock EX/NOQUEUE success (if fail -EAGAIN, retry)
441 * mounted_lock EX/NOQUEUE fail -EAGAIN (expected due to other mounters PR)
444 * control_lock EX->NL
447 * - mount during recovery
449 * If a node mounts while others are doing recovery (not first mounter),
458 * - control_lock lvb format
464 * GDLM_LVB_SIZE-8 bytes of jid bit map. If bit N is set, it indicates
465 * that jid N needs recovery.
474 memcpy(lvb_bits, ls->ls_control_lvb, GDLM_LVB_SIZE); in control_lvb_read()
483 memcpy(ls->ls_control_lvb, lvb_bits, GDLM_LVB_SIZE); in control_lvb_write()
485 memcpy(ls->ls_control_lvb, &gen, sizeof(__le32)); in control_lvb_write()
491 GDLM_LVB_SIZE - JID_BITMAP_OFFSET); in all_jid_bits_clear()
497 complete(&ls->ls_sync_wait); in sync_wait_cb()
502 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in sync_unlock()
505 error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls); in sync_unlock()
508 name, lksb->sb_lkid, error); in sync_unlock()
512 wait_for_completion(&ls->ls_sync_wait); in sync_unlock()
514 if (lksb->sb_status != -DLM_EUNLOCK) { in sync_unlock()
516 name, lksb->sb_lkid, lksb->sb_status); in sync_unlock()
517 return -1; in sync_unlock()
522 static int sync_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags, in sync_lock() argument
525 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in sync_lock()
532 error = dlm_lock(ls->ls_dlm, mode, lksb, flags, in sync_lock()
533 strname, GDLM_STRNAME_BYTES - 1, in sync_lock()
536 fs_err(sdp, "%s lkid %x flags %x mode %d error %d\n", in sync_lock()
537 name, lksb->sb_lkid, flags, mode, error); in sync_lock()
541 wait_for_completion(&ls->ls_sync_wait); in sync_lock()
543 status = lksb->sb_status; in sync_lock()
545 if (status && status != -EAGAIN) { in sync_lock()
546 fs_err(sdp, "%s lkid %x flags %x mode %d status %d\n", in sync_lock()
547 name, lksb->sb_lkid, flags, mode, status); in sync_lock()
555 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in mounted_unlock()
556 return sync_unlock(sdp, &ls->ls_mounted_lksb, "mounted_lock"); in mounted_unlock()
559 static int mounted_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags) in mounted_lock() argument
561 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in mounted_lock()
562 return sync_lock(sdp, mode, flags, GFS2_MOUNTED_LOCK, in mounted_lock()
563 &ls->ls_mounted_lksb, "mounted_lock"); in mounted_lock()
568 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_unlock()
569 return sync_unlock(sdp, &ls->ls_control_lksb, "control_lock"); in control_unlock()
572 static int control_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags) in control_lock() argument
574 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_lock()
575 return sync_lock(sdp, mode, flags, GFS2_CONTROL_LOCK, in control_lock()
576 &ls->ls_control_lksb, "control_lock"); in control_lock()
580 * remote_withdraw - react to a node withdrawing from the file system
588 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { in remote_withdraw()
589 if (jd->jd_jid == sdp->sd_lockstruct.ls_jid) in remote_withdraw()
604 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gfs2_control_func()
612 if (test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags)) { in gfs2_control_func()
614 clear_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags); in gfs2_control_func()
618 spin_lock(&ls->ls_recover_spin); in gfs2_control_func()
624 * FIRST_MOUNT means this node is doing first mounter recovery, in gfs2_control_func()
625 * for which recovery control is handled by in gfs2_control_func()
628 if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || in gfs2_control_func()
629 test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in gfs2_control_func()
630 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
633 block_gen = ls->ls_recover_block; in gfs2_control_func()
634 start_gen = ls->ls_recover_start; in gfs2_control_func()
635 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
640 * dlm recovery is in progress and dlm locking is blocked. in gfs2_control_func()
649 * dlm_recoverd adds to recover_submit[] jids needing recovery in gfs2_control_func()
650 * gfs2_recover adds to recover_result[] journal recovery results in gfs2_control_func()
656 * the journal recovery is SUCCESS in gfs2_control_func()
665 control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); in gfs2_control_func()
667 spin_lock(&ls->ls_recover_spin); in gfs2_control_func()
668 if (block_gen != ls->ls_recover_block || in gfs2_control_func()
669 start_gen != ls->ls_recover_start) { in gfs2_control_func()
671 start_gen, block_gen, ls->ls_recover_block); in gfs2_control_func()
672 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
677 recover_size = ls->ls_recover_size; in gfs2_control_func()
684 * in succession. Only the first will really do recovery, in gfs2_control_func()
686 * recovery. So, another node may have already recovered in gfs2_control_func()
690 if (ls->ls_recover_result[i] != LM_RD_SUCCESS) in gfs2_control_func()
693 ls->ls_recover_result[i] = 0; in gfs2_control_func()
695 if (!test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) in gfs2_control_func()
698 __clear_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); in gfs2_control_func()
708 if (!ls->ls_recover_submit[i]) in gfs2_control_func()
710 if (ls->ls_recover_submit[i] < lvb_gen) in gfs2_control_func()
711 ls->ls_recover_submit[i] = 0; in gfs2_control_func()
718 if (!ls->ls_recover_submit[i]) in gfs2_control_func()
720 if (ls->ls_recover_submit[i] < start_gen) { in gfs2_control_func()
721 ls->ls_recover_submit[i] = 0; in gfs2_control_func()
722 __set_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); in gfs2_control_func()
733 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
736 control_lvb_write(ls, start_gen, ls->ls_lvb_bits); in gfs2_control_func()
750 * and clear a jid bit in the lvb if the recovery is a success. in gfs2_control_func()
756 if (test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) { in gfs2_control_func()
767 * No more jid bits set in lvb, all recovery is done, unblock locks in gfs2_control_func()
772 spin_lock(&ls->ls_recover_spin); in gfs2_control_func()
773 if (ls->ls_recover_block == block_gen && in gfs2_control_func()
774 ls->ls_recover_start == start_gen) { in gfs2_control_func()
775 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in gfs2_control_func()
776 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
781 start_gen, block_gen, ls->ls_recover_block); in gfs2_control_func()
782 spin_unlock(&ls->ls_recover_spin); in gfs2_control_func()
788 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_mount()
794 memset(&ls->ls_mounted_lksb, 0, sizeof(struct dlm_lksb)); in control_mount()
795 memset(&ls->ls_control_lksb, 0, sizeof(struct dlm_lksb)); in control_mount()
796 memset(&ls->ls_control_lvb, 0, GDLM_LVB_SIZE); in control_mount()
797 ls->ls_control_lksb.sb_lvbptr = ls->ls_control_lvb; in control_mount()
798 init_completion(&ls->ls_sync_wait); in control_mount()
800 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in control_mount()
818 error = -EINTR; in control_mount()
835 * Other nodes need to do some work in dlm recovery and gfs2_control in control_mount()
849 if (error == -EAGAIN) { in control_mount()
858 * we cannot do the first-mount responsibility it implies: recovery. in control_mount()
860 if (sdp->sd_args.ar_spectator) in control_mount()
867 } else if (error != -EAGAIN) { in control_mount()
877 /* not even -EAGAIN should happen here */ in control_mount()
891 * lvb_gen will be non-zero. in control_mount()
894 control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); in control_mount()
899 error = -EINVAL; in control_mount()
904 /* first mounter, keep both EX while doing first recovery */ in control_mount()
905 spin_lock(&ls->ls_recover_spin); in control_mount()
906 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in control_mount()
907 set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags); in control_mount()
908 set_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); in control_mount()
909 spin_unlock(&ls->ls_recover_spin); in control_mount()
924 if (!all_jid_bits_clear(ls->ls_lvb_bits)) { in control_mount()
925 /* journals need recovery, wait until all are clear */ in control_mount()
926 fs_info(sdp, "control_mount wait for journal recovery\n"); in control_mount()
930 spin_lock(&ls->ls_recover_spin); in control_mount()
931 block_gen = ls->ls_recover_block; in control_mount()
932 start_gen = ls->ls_recover_start; in control_mount()
933 mount_gen = ls->ls_recover_mount; in control_mount()
937 generation, which might include new recovery bits set */ in control_mount()
938 if (sdp->sd_args.ar_spectator) { in control_mount()
939 fs_info(sdp, "Recovery is required. Waiting for a " in control_mount()
940 "non-spectator to mount.\n"); in control_mount()
946 ls->ls_recover_flags); in control_mount()
948 spin_unlock(&ls->ls_recover_spin); in control_mount()
954 latest recovery generation */ in control_mount()
957 lvb_gen, ls->ls_recover_flags); in control_mount()
958 spin_unlock(&ls->ls_recover_spin); in control_mount()
963 /* dlm recovery in progress, wait for it to finish */ in control_mount()
966 lvb_gen, ls->ls_recover_flags); in control_mount()
967 spin_unlock(&ls->ls_recover_spin); in control_mount()
971 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in control_mount()
972 set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags); in control_mount()
973 memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_mount()
974 memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_mount()
975 spin_unlock(&ls->ls_recover_spin); in control_mount()
986 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in control_first_done()
991 spin_lock(&ls->ls_recover_spin); in control_first_done()
992 start_gen = ls->ls_recover_start; in control_first_done()
993 block_gen = ls->ls_recover_block; in control_first_done()
995 if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags) || in control_first_done()
996 !test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || in control_first_done()
997 !test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in control_first_done()
1000 start_gen, block_gen, ls->ls_recover_flags); in control_first_done()
1001 spin_unlock(&ls->ls_recover_spin); in control_first_done()
1003 return -1; in control_first_done()
1008 * Wait for the end of a dlm recovery cycle to switch from in control_first_done()
1009 * first mounter recovery. We can ignore any recover_slot in control_first_done()
1012 * have not fully mounted, so they don't need recovery. in control_first_done()
1014 spin_unlock(&ls->ls_recover_spin); in control_first_done()
1017 wait_on_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY, in control_first_done()
1022 clear_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); in control_first_done()
1023 set_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags); in control_first_done()
1024 memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_first_done()
1025 memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); in control_first_done()
1026 spin_unlock(&ls->ls_recover_spin); in control_first_done()
1028 memset(ls->ls_lvb_bits, 0, GDLM_LVB_SIZE); in control_first_done()
1029 control_lvb_write(ls, start_gen, ls->ls_lvb_bits); in control_first_done()
1045 * gfs2 jids start at 0, so jid = slot - 1)
1053 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in set_recover_size()
1059 if (!ls->ls_lvb_bits) { in set_recover_size()
1060 ls->ls_lvb_bits = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); in set_recover_size()
1061 if (!ls->ls_lvb_bits) in set_recover_size()
1062 return -ENOMEM; in set_recover_size()
1067 if (max_jid < slots[i].slot - 1) in set_recover_size()
1068 max_jid = slots[i].slot - 1; in set_recover_size()
1071 old_size = ls->ls_recover_size; in set_recover_size()
1083 return -ENOMEM; in set_recover_size()
1086 spin_lock(&ls->ls_recover_spin); in set_recover_size()
1087 memcpy(submit, ls->ls_recover_submit, old_size * sizeof(uint32_t)); in set_recover_size()
1088 memcpy(result, ls->ls_recover_result, old_size * sizeof(uint32_t)); in set_recover_size()
1089 kfree(ls->ls_recover_submit); in set_recover_size()
1090 kfree(ls->ls_recover_result); in set_recover_size()
1091 ls->ls_recover_submit = submit; in set_recover_size()
1092 ls->ls_recover_result = result; in set_recover_size()
1093 ls->ls_recover_size = new_size; in set_recover_size()
1094 spin_unlock(&ls->ls_recover_spin); in set_recover_size()
1100 kfree(ls->ls_lvb_bits); in free_recover_size()
1101 kfree(ls->ls_recover_submit); in free_recover_size()
1102 kfree(ls->ls_recover_result); in free_recover_size()
1103 ls->ls_recover_submit = NULL; in free_recover_size()
1104 ls->ls_recover_result = NULL; in free_recover_size()
1105 ls->ls_recover_size = 0; in free_recover_size()
1106 ls->ls_lvb_bits = NULL; in free_recover_size()
1109 /* dlm calls before it does lock recovery */
1114 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recover_prep()
1120 spin_lock(&ls->ls_recover_spin); in gdlm_recover_prep()
1121 ls->ls_recover_block = ls->ls_recover_start; in gdlm_recover_prep()
1122 set_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags); in gdlm_recover_prep()
1124 if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || in gdlm_recover_prep()
1125 test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in gdlm_recover_prep()
1126 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_prep()
1129 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); in gdlm_recover_prep()
1130 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_prep()
1139 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recover_slot()
1140 int jid = slot->slot - 1; in gdlm_recover_slot()
1147 spin_lock(&ls->ls_recover_spin); in gdlm_recover_slot()
1148 if (ls->ls_recover_size < jid + 1) { in gdlm_recover_slot()
1150 jid, ls->ls_recover_block, ls->ls_recover_size); in gdlm_recover_slot()
1151 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_slot()
1155 if (ls->ls_recover_submit[jid]) { in gdlm_recover_slot()
1157 jid, ls->ls_recover_block, ls->ls_recover_submit[jid]); in gdlm_recover_slot()
1159 ls->ls_recover_submit[jid] = ls->ls_recover_block; in gdlm_recover_slot()
1160 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_slot()
1163 /* dlm calls after recover_slot and after it completes lock recovery */
1169 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recover_done()
1178 spin_lock(&ls->ls_recover_spin); in gdlm_recover_done()
1179 ls->ls_recover_start = generation; in gdlm_recover_done()
1181 if (!ls->ls_recover_mount) { in gdlm_recover_done()
1182 ls->ls_recover_mount = generation; in gdlm_recover_done()
1183 ls->ls_jid = our_slot - 1; in gdlm_recover_done()
1186 if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) in gdlm_recover_done()
1187 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0); in gdlm_recover_done()
1189 clear_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags); in gdlm_recover_done()
1191 wake_up_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY); in gdlm_recover_done()
1192 spin_unlock(&ls->ls_recover_spin); in gdlm_recover_done()
1195 /* gfs2_recover thread has a journal recovery result */
1200 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_recovery_result()
1207 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) in gdlm_recovery_result()
1210 /* don't care about the recovery of own journal during mount */ in gdlm_recovery_result()
1211 if (jid == ls->ls_jid) in gdlm_recovery_result()
1214 spin_lock(&ls->ls_recover_spin); in gdlm_recovery_result()
1215 if (test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { in gdlm_recovery_result()
1216 spin_unlock(&ls->ls_recover_spin); in gdlm_recovery_result()
1219 if (ls->ls_recover_size < jid + 1) { in gdlm_recovery_result()
1221 jid, ls->ls_recover_size); in gdlm_recovery_result()
1222 spin_unlock(&ls->ls_recover_spin); in gdlm_recovery_result()
1229 ls->ls_recover_result[jid] = result; in gdlm_recovery_result()
1235 if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) in gdlm_recovery_result()
1236 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, in gdlm_recovery_result()
1238 spin_unlock(&ls->ls_recover_spin); in gdlm_recovery_result()
1249 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_mount()
1259 INIT_DELAYED_WORK(&sdp->sd_control_work, gfs2_control_func); in gdlm_mount()
1260 spin_lock_init(&ls->ls_recover_spin); in gdlm_mount()
1261 ls->ls_recover_flags = 0; in gdlm_mount()
1262 ls->ls_recover_mount = 0; in gdlm_mount()
1263 ls->ls_recover_start = 0; in gdlm_mount()
1264 ls->ls_recover_block = 0; in gdlm_mount()
1265 ls->ls_recover_size = 0; in gdlm_mount()
1266 ls->ls_recover_submit = NULL; in gdlm_mount()
1267 ls->ls_recover_result = NULL; in gdlm_mount()
1268 ls->ls_lvb_bits = NULL; in gdlm_mount()
1281 error = -EINVAL; in gdlm_mount()
1285 memcpy(cluster, table, strlen(table) - strlen(fsname)); in gdlm_mount()
1296 &ls->ls_dlm); in gdlm_mount()
1309 set_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags); in gdlm_mount()
1313 if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags)) { in gdlm_mount()
1315 error = -EINVAL; in gdlm_mount()
1330 ls->ls_first = !!test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); in gdlm_mount()
1331 clear_bit(SDF_NOJOURNALID, &sdp->sd_flags); in gdlm_mount()
1333 wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID); in gdlm_mount()
1337 dlm_release_lockspace(ls->ls_dlm, 2); in gdlm_mount()
1346 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_first_done()
1349 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) in gdlm_first_done()
1359 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gdlm_unmount()
1361 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) in gdlm_unmount()
1366 spin_lock(&ls->ls_recover_spin); in gdlm_unmount()
1367 set_bit(DFL_UNMOUNT, &ls->ls_recover_flags); in gdlm_unmount()
1368 spin_unlock(&ls->ls_recover_spin); in gdlm_unmount()
1369 flush_delayed_work(&sdp->sd_control_work); in gdlm_unmount()
1371 /* mounted_lock and control_lock will be purged in dlm recovery */ in gdlm_unmount()
1373 if (ls->ls_dlm) { in gdlm_unmount()
1374 dlm_release_lockspace(ls->ls_dlm, 2); in gdlm_unmount()
1375 ls->ls_dlm = NULL; in gdlm_unmount()