Lines Matching +full:t +full:- +full:head

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
51 * Verify the log-relative block number and length in basic blocks are valid for
61 if (blk_no < 0 || blk_no >= log->l_logBBsize) in xlog_verify_bno()
63 if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize) in xlog_verify_bno()
78 * Pass log block 0 since we don't have an addr yet, buffer will be in xlog_alloc_buffer()
81 if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, 0, nbblks))) { in xlog_alloc_buffer()
82 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", in xlog_alloc_buffer()
88 * We do log I/O in units of log sectors (a power-of-2 multiple of the in xlog_alloc_buffer()
92 * In addition, the buffer may be used for a non-sector-aligned block in xlog_alloc_buffer()
95 * block it will never straddle a sector boundary, so this won't be an in xlog_alloc_buffer()
101 if (nbblks > 1 && log->l_sectBBsize > 1) in xlog_alloc_buffer()
102 nbblks += log->l_sectBBsize; in xlog_alloc_buffer()
103 nbblks = round_up(nbblks, log->l_sectBBsize); in xlog_alloc_buffer()
109 * in a log buffer. The buffer covers a log sector-aligned region.
116 return BBTOB(blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1)); in xlog_align()
129 if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, blk_no, nbblks))) { in xlog_do_io()
130 xfs_warn(log->l_mp, in xlog_do_io()
133 return -EFSCORRUPTED; in xlog_do_io()
136 blk_no = round_down(blk_no, log->l_sectBBsize); in xlog_do_io()
137 nbblks = round_up(nbblks, log->l_sectBBsize); in xlog_do_io()
140 error = xfs_rw_bdev(log->l_targ->bt_bdev, log->l_logBBstart + blk_no, in xlog_do_io()
143 xfs_alert(log->l_mp, in xlog_do_io()
194 xlog_rec_header_t *head) in xlog_header_check_dump() argument
197 __func__, &mp->m_sb.sb_uuid, XLOG_FMT); in xlog_header_check_dump()
199 &head->h_fs_uuid, be32_to_cpu(head->h_fmt)); in xlog_header_check_dump()
202 #define xlog_header_check_dump(mp, head) argument
211 xlog_rec_header_t *head) in xlog_header_check_recover() argument
213 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)); in xlog_header_check_recover()
216 * IRIX doesn't write the h_fmt field and leaves it zeroed in xlog_header_check_recover()
220 if (XFS_IS_CORRUPT(mp, head->h_fmt != cpu_to_be32(XLOG_FMT))) { in xlog_header_check_recover()
222 "dirty log written in incompatible format - can't recover"); in xlog_header_check_recover()
223 xlog_header_check_dump(mp, head); in xlog_header_check_recover()
224 return -EFSCORRUPTED; in xlog_header_check_recover()
226 if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid, in xlog_header_check_recover()
227 &head->h_fs_uuid))) { in xlog_header_check_recover()
229 "dirty log entry has mismatched uuid - can't recover"); in xlog_header_check_recover()
230 xlog_header_check_dump(mp, head); in xlog_header_check_recover()
231 return -EFSCORRUPTED; in xlog_header_check_recover()
237 * read the head block of the log and check the header
242 xlog_rec_header_t *head) in xlog_header_check_mount() argument
244 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)); in xlog_header_check_mount()
246 if (uuid_is_null(&head->h_fs_uuid)) { in xlog_header_check_mount()
248 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If in xlog_header_check_mount()
252 xfs_warn(mp, "null uuid in log - IRIX style log"); in xlog_header_check_mount()
253 } else if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid, in xlog_header_check_mount()
254 &head->h_fs_uuid))) { in xlog_header_check_mount()
255 xfs_warn(mp, "log has mismatched uuid - can't recover"); in xlog_header_check_mount()
256 xlog_header_check_dump(mp, head); in xlog_header_check_mount()
257 return -EFSCORRUPTED; in xlog_header_check_mount()
296 (mid_blk == end_blk && mid_blk-1 == first_blk)); in xlog_find_cycle_start()
306 * found, or with -1 (an invalid block number) if there is no such
333 while (bufblks > log->l_logBBsize) in xlog_find_verify_cycle()
337 if (bufblks < log->l_sectBBsize) in xlog_find_verify_cycle()
338 return -ENOMEM; in xlog_find_verify_cycle()
344 bcount = min(bufblks, (start_blk + nbblks - i)); in xlog_find_verify_cycle()
361 *new_blk = -1; in xlog_find_verify_cycle()
371 if (xfs_has_logv2(log->l_mp)) { in xlog_logrec_hblks()
372 int h_size = be32_to_cpu(rh->h_size); in xlog_logrec_hblks()
374 if ((be32_to_cpu(rh->h_version) & XLOG_VERSION_2) && in xlog_logrec_hblks()
403 xlog_rec_header_t *head = NULL; in xlog_find_verify_log_record() local
406 int num_blks = *last_blk - start_blk; in xlog_find_verify_log_record()
415 return -ENOMEM; in xlog_find_verify_log_record()
421 offset += ((num_blks - 1) << BBSHIFT); in xlog_find_verify_log_record()
424 for (i = (*last_blk) - 1; i >= 0; i--) { in xlog_find_verify_log_record()
427 xfs_warn(log->l_mp, in xlog_find_verify_log_record()
428 "Log inconsistent (didn't find previous header)"); in xlog_find_verify_log_record()
430 error = -EFSCORRUPTED; in xlog_find_verify_log_record()
440 head = (xlog_rec_header_t *)offset; in xlog_find_verify_log_record()
442 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) in xlog_find_verify_log_record()
446 offset -= BBSIZE; in xlog_find_verify_log_record()
451 * to caller. If caller can handle a return of -1, then this routine in xlog_find_verify_log_record()
454 if (i == -1) { in xlog_find_verify_log_record()
461 * of the log record _before_ the head. So we check the uuid. in xlog_find_verify_log_record()
463 if ((error = xlog_header_check_mount(log->l_mp, head))) in xlog_find_verify_log_record()
469 * up reading an entire log record. In this case, we don't want to in xlog_find_verify_log_record()
473 xhdrs = xlog_logrec_hblks(log, head); in xlog_find_verify_log_record()
475 if (*last_blk - i + extra_bblks != in xlog_find_verify_log_record()
476 BTOBB(be32_to_cpu(head->h_len)) + xhdrs) in xlog_find_verify_log_record()
485 * Head is defined to be the point of the log where the next log write
487 * eliminated when calculating the head. We aren't guaranteed that previous
489 * current cycle number -1 won't be present in the log if we start writing
495 * Return: zero if normal, non-zero if error.
508 int error, log_bbnum = log->l_logBBsize; in xlog_find_head()
513 xfs_warn(log->l_mp, "empty log check failed"); in xlog_find_head()
521 /* Linux XFS shouldn't generate totally zeroed logs - in xlog_find_head()
525 xfs_warn(log->l_mp, "totally zeroed log"); in xlog_find_head()
534 return -ENOMEM; in xlog_find_head()
542 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */ in xlog_find_head()
553 * case, head_blk can't be set to zero (which makes sense). The below in xlog_find_head()
554 * math doesn't work out properly with head_blk equal to zero. Instead, in xlog_find_head()
556 * value makes the math correct. If head_blk doesn't changed through in xlog_find_head()
566 * containing last_half_cycle - 1. If we find such a hole, in xlog_find_head()
567 * then the start of that hole will be the new head. The in xlog_find_head()
569 * x | x ... | x - 1 | x in xlog_find_head()
572 * In this case the head really is somewhere at the end of the in xlog_find_head()
576 * x | x + 1 | x ... | x - 1 | x in xlog_find_head()
578 * the head has to end up at the start of the x-1 hole at the in xlog_find_head()
582 * end of the log and search for cycle numbers equal to x-1. in xlog_find_head()
583 * We don't worry about the x+1 blocks that we encounter, in xlog_find_head()
584 * because we know that they cannot be the head since the log in xlog_find_head()
588 stop_on_cycle = last_half_cycle - 1; in xlog_find_head()
596 * be where the new head belongs. First we do a binary search in xlog_find_head()
601 * the log, then we look for occurrences of last_half_cycle - 1 in xlog_find_head()
608 * <---------> less than scan distance in xlog_find_head()
609 * x + 1 ... | x ... | x - 1 | x in xlog_find_head()
623 * in the in-core log. The following number can be made tighter if in xlog_find_head()
632 start_blk = head_blk - num_scan_bblks; in xlog_find_head()
637 if (new_blk != -1) in xlog_find_head()
644 * last_half_cycle - 1. in xlog_find_head()
646 * we've found a hole that didn't get written in going around in xlog_find_head()
648 * x + 1 ... | x ... | x - 1 | x in xlog_find_head()
649 * <---------> less than scan distance in xlog_find_head()
664 * certainly not the head of the log. By searching for in xlog_find_head()
665 * last_half_cycle-1 we accomplish that. in xlog_find_head()
669 start_blk = log_bbnum - (num_scan_bblks - head_blk); in xlog_find_head()
671 num_scan_bblks - (int)head_blk, in xlog_find_head()
672 (stop_on_cycle - 1), &new_blk))) in xlog_find_head()
674 if (new_blk != -1) { in xlog_find_head()
681 * log is good. This scan needs to verify that it doesn't find in xlog_find_head()
690 if (new_blk != -1) in xlog_find_head()
701 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */ in xlog_find_head()
706 error = -EIO; in xlog_find_head()
717 start_blk = log_bbnum - (num_scan_bblks - head_blk); in xlog_find_head()
720 (xfs_daddr_t) log_bbnum-start_blk >= 0); in xlog_find_head()
725 error = -EIO; in xlog_find_head()
741 * means that during a previous crash, we didn't have a clean break in xlog_find_head()
742 * from cycle number N to cycle number N-1. In this case, we need in xlog_find_head()
743 * to find the first block with cycle number N-1. in xlog_find_head()
750 xfs_warn(log->l_mp, "failed to find log head"); in xlog_find_head()
782 * Walk backwards from the head block until we hit the tail or the first in xlog_rseek_logrec_hdr()
786 for (i = (int) head_blk - 1; i >= end_blk; i--) { in xlog_rseek_logrec_hdr()
800 * If we haven't hit the tail block or the log record header count, in xlog_rseek_logrec_hdr()
802 * callers can pass head == tail if the tail is not yet known. in xlog_rseek_logrec_hdr()
805 for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) { in xlog_rseek_logrec_hdr()
830 * Given head and tail blocks, walk forward from the tail block until we find
831 * the provided number of records or hit the head block. The return value is the
856 * Walk forward from the tail block until we hit the head or the last in xlog_seek_logrec_hdr()
859 end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1; in xlog_seek_logrec_hdr()
874 * If we haven't hit the head block or the log record header count, in xlog_seek_logrec_hdr()
901 * Calculate distance from head to tail (i.e., unused space in the log).
910 return tail_blk - head_blk; in xlog_tail_distance()
912 return tail_blk + (log->l_logBBsize - head_blk); in xlog_tail_distance()
917 * writes have been detected near the front of the log and the head has been
920 * We also have to handle the case where the tail was pinned and the head
927 * subsequent checkpoint didn't write successfully.
929 * Therefore, CRC check the log from tail to head. If a failure occurs and the
930 * offending record is within max iclog bufs from the head, walk the tail
951 return -ENOMEM; in xlog_verify_tail()
965 * Run a CRC check from the tail to the head. We can't just check in xlog_verify_tail()
967 * blocks cleared during the search for the head/tail. These blocks are in xlog_verify_tail()
968 * overwritten with zero-length records and thus record count is not a in xlog_verify_tail()
974 while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) { in xlog_verify_tail()
978 * Is corruption within range of the head? If so, retry from in xlog_verify_tail()
998 xfs_warn(log->l_mp, in xlog_verify_tail()
1007 * Detect and trim torn writes from the head of the log.
1011 * CRC verification. While we can't always be certain that CRC verification
1014 * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
1016 * policy. In the event of CRC failure, the head is walked back to the last good
1022 xfs_daddr_t *head_blk, /* in/out: unverified head */ in xlog_verify_head()
1038 * Check the head of the log for torn writes. Search backwards from the in xlog_verify_head()
1039 * head until we hit the tail or the maximum number of log record I/Os in xlog_verify_head()
1041 * we don't trash the rhead/buffer pointers from the caller. in xlog_verify_head()
1045 return -ENOMEM; in xlog_verify_head()
1055 * block found above to the current head. If a CRC failure occurs, the in xlog_verify_head()
1060 if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) { in xlog_verify_head()
1066 xfs_warn(log->l_mp, in xlog_verify_head()
1067 "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.", in xlog_verify_head()
1074 * Note that xlog_find_tail() clears the blocks at the new head in xlog_verify_head()
1083 return -EIO; in xlog_verify_head()
1086 * Reset the head block to the starting block of the first bad in xlog_verify_head()
1090 * Bail out if the updated head/tail match as this indicates in xlog_verify_head()
1095 *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn)); in xlog_verify_head()
1105 be32_to_cpu((*rhead)->h_size)); in xlog_verify_head()
1109 * We need to make sure we handle log wrapping properly, so we can't use the
1123 div_s64_rem(bno, log->l_logBBsize, &mod); in xlog_wrap_logbno()
1128 * Check whether the head of the log points to an unmount record. In other
1129 * words, determine whether the log is clean. If so, update the in-core state
1157 * below. We won't want to clear the unmount record if there is one, so in xlog_check_unmount_rec()
1162 rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len))); in xlog_check_unmount_rec()
1165 be32_to_cpu(rhead->h_num_logops) == 1) { in xlog_check_unmount_rec()
1172 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) { in xlog_check_unmount_rec()
1178 xlog_assign_atomic_lsn(&log->l_tail_lsn, in xlog_check_unmount_rec()
1179 log->l_curr_cycle, after_umount_blk); in xlog_check_unmount_rec()
1180 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, in xlog_check_unmount_rec()
1181 log->l_curr_cycle, after_umount_blk); in xlog_check_unmount_rec()
1209 log->l_prev_block = rhead_blk; in xlog_set_state()
1210 log->l_curr_block = (int)head_blk; in xlog_set_state()
1211 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle); in xlog_set_state()
1213 log->l_curr_cycle++; in xlog_set_state()
1214 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn)); in xlog_set_state()
1215 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn)); in xlog_set_state()
1216 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle, in xlog_set_state()
1217 BBTOB(log->l_curr_block)); in xlog_set_state()
1218 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle, in xlog_set_state()
1219 BBTOB(log->l_curr_block)); in xlog_set_state()
1262 return -ENOMEM; in xlog_find_tail()
1277 * block. This wraps all the way back around to the head so something is in xlog_find_tail()
1278 * seriously wrong if we can't find it. in xlog_find_tail()
1285 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__); in xlog_find_tail()
1286 error = -EFSCORRUPTED; in xlog_find_tail()
1289 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn)); in xlog_find_tail()
1292 * Set the log state based on the current head record. in xlog_find_tail()
1295 tail_lsn = atomic64_read(&log->l_tail_lsn); in xlog_find_tail()
1298 * Look for an unmount record at the head of the log. This sets the log in xlog_find_tail()
1307 * Verify the log head if the log is not clean (e.g., we have anything in xlog_find_tail()
1308 * but an unmount record at the head). This uses CRC verification to in xlog_find_tail()
1310 * considered torn writes and the log head is trimmed accordingly. in xlog_find_tail()
1324 /* update in-core state again if the head changed */ in xlog_find_tail()
1328 tail_lsn = atomic64_read(&log->l_tail_lsn); in xlog_find_tail()
1340 * headers if we have a filesystem using non-persistent counters. in xlog_find_tail()
1343 set_bit(XFS_OPSTATE_CLEAN, &log->l_mp->m_opstate); in xlog_find_tail()
1346 * Make sure that there are no blocks in front of the head in xlog_find_tail()
1347 * with the same cycle number as the head. This can happen in xlog_find_tail()
1357 * However on Linux, we can & do recover a read-only filesystem. in xlog_find_tail()
1361 * But... if the -device- itself is readonly, just skip this. in xlog_find_tail()
1362 * We can't recover this device anyway, so it won't matter. in xlog_find_tail()
1364 if (!xfs_readonly_buftarg(log->l_targ)) in xlog_find_tail()
1371 xfs_warn(log->l_mp, "failed to locate log tail"); in xlog_find_tail()
1383 * of the first block with cycle number 0. It won't have a complete LR
1401 int error, log_bbnum = log->l_logBBsize; in xlog_find_zeroed()
1408 return -ENOMEM; in xlog_find_zeroed()
1421 error = xlog_bread(log, log_bbnum-1, 1, buffer, &offset); in xlog_find_zeroed()
1432 last_blk = log_bbnum-1; in xlog_find_zeroed()
1448 start_blk = last_blk - num_scan_bblks; in xlog_find_zeroed()
1452 * our current estimate of the head. What we're trying to detect is in xlog_find_zeroed()
1459 if (new_blk != -1) in xlog_find_zeroed()
1463 * Potentially backup over partial log record write. We don't need in xlog_find_zeroed()
1468 error = -EIO; in xlog_find_zeroed()
1497 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); in xlog_add_record()
1498 recp->h_cycle = cpu_to_be32(cycle); in xlog_add_record()
1499 recp->h_version = cpu_to_be32( in xlog_add_record()
1500 xfs_has_logv2(log->l_mp) ? 2 : 1); in xlog_add_record()
1501 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block)); in xlog_add_record()
1502 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block)); in xlog_add_record()
1503 recp->h_fmt = cpu_to_be32(XLOG_FMT); in xlog_add_record()
1504 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t)); in xlog_add_record()
1519 int sectbb = log->l_sectBBsize; in xlog_write_log_records()
1532 while (bufblks > log->l_logBBsize) in xlog_write_log_records()
1537 return -ENOMEM; in xlog_write_log_records()
1550 j = start_block - balign; in xlog_write_log_records()
1556 bcount = min(bufblks, end_block - start_block); in xlog_write_log_records()
1557 endcount = bcount - j; in xlog_write_log_records()
1566 buffer + BBTOB(ealign - start_block)); in xlog_write_log_records()
1592 * in front of the log head. We do this so that we won't become confused
1619 head_cycle = log->l_curr_cycle; in xlog_clear_stale_blocks()
1620 head_block = log->l_curr_block; in xlog_clear_stale_blocks()
1623 * Figure out the distance between the new head of the log in xlog_clear_stale_blocks()
1625 * head that we may have written just before the crash, but in xlog_clear_stale_blocks()
1626 * we don't want to overwrite the tail of the log. in xlog_clear_stale_blocks()
1630 * The tail is behind the head in the physical log, in xlog_clear_stale_blocks()
1631 * so the distance from the head to the tail is the in xlog_clear_stale_blocks()
1632 * distance from the head to the end of the log plus in xlog_clear_stale_blocks()
1636 if (XFS_IS_CORRUPT(log->l_mp, in xlog_clear_stale_blocks()
1638 head_block >= log->l_logBBsize)) in xlog_clear_stale_blocks()
1639 return -EFSCORRUPTED; in xlog_clear_stale_blocks()
1640 tail_distance = tail_block + (log->l_logBBsize - head_block); in xlog_clear_stale_blocks()
1643 * The head is behind the tail in the physical log, in xlog_clear_stale_blocks()
1644 * so the distance from the head to the tail is just in xlog_clear_stale_blocks()
1645 * the tail block minus the head block. in xlog_clear_stale_blocks()
1647 if (XFS_IS_CORRUPT(log->l_mp, in xlog_clear_stale_blocks()
1650 return -EFSCORRUPTED; in xlog_clear_stale_blocks()
1651 tail_distance = tail_block - head_block; in xlog_clear_stale_blocks()
1655 * If the head is right up against the tail, we can't clear in xlog_clear_stale_blocks()
1667 * We take the smaller so that we don't overwrite the tail and in xlog_clear_stale_blocks()
1668 * we don't waste all day writing from the head to the tail in xlog_clear_stale_blocks()
1673 if ((head_block + max_distance) <= log->l_logBBsize) { in xlog_clear_stale_blocks()
1679 * n ... | n - 1 ... in xlog_clear_stale_blocks()
1681 error = xlog_write_log_records(log, (head_cycle - 1), in xlog_clear_stale_blocks()
1690 * I/Os. The first write should be from the head to the in xlog_clear_stale_blocks()
1694 distance = log->l_logBBsize - head_block; in xlog_clear_stale_blocks()
1695 error = xlog_write_log_records(log, (head_cycle - 1), in xlog_clear_stale_blocks()
1706 * same cycle as the head so that we get: in xlog_clear_stale_blocks()
1707 * n ... n ... | n - 1 ... in xlog_clear_stale_blocks()
1710 distance = max_distance - (log->l_logBBsize - head_block); in xlog_clear_stale_blocks()
1732 list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) { in xlog_recover_release_intent()
1733 struct xfs_log_item *lip = dfp->dfp_intent; in xlog_recover_release_intent()
1735 if (lip->li_type != intent_type) in xlog_recover_release_intent()
1737 if (!lip->li_ops->iop_match(lip, intent_id)) in xlog_recover_release_intent()
1742 xfs_defer_cancel_recovery(log->l_mp, dfp); in xlog_recover_release_intent()
1764 if (VFS_I(*ipp)->i_nlink == 0) in xlog_recover_iget()
1801 if (ITEM_TYPE(item) == xlog_recover_item_ops[i]->item_type) in xlog_find_item_ops()
1820 * from the transaction. However, we can't do that until after we've
1838 * But there's a problem with that - we can't tell an inode allocation buffer
1839 * apart from a regular buffer, so we can't separate them. We can, however,
1843 * Hence, 4 lists, in order from head to tail:
1844 * - buffer_list for all buffers except cancelled/inode unlink buffers
1845 * - item_list for all non-buffer items
1846 * - inode_buffer_list for inode unlink buffers
1847 * - cancel_list for the cancelled buffers
1849 * Note that we add objects to the tail of the lists so that first-to-last
1850 * ordering is preserved within the lists. Adding objects to the head of the
1851 * list means when we traverse from the head we walk them in last-to-first
1852 * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1870 list_splice_init(&trans->r_itemq, &sort_list); in xlog_recover_reorder_trans()
1874 item->ri_ops = xlog_find_item_ops(item); in xlog_recover_reorder_trans()
1875 if (!item->ri_ops) { in xlog_recover_reorder_trans()
1876 xfs_warn(log->l_mp, in xlog_recover_reorder_trans()
1885 list_splice_init(&sort_list, &trans->r_itemq); in xlog_recover_reorder_trans()
1886 error = -EFSCORRUPTED; in xlog_recover_reorder_trans()
1890 if (item->ri_ops->reorder) in xlog_recover_reorder_trans()
1891 fate = item->ri_ops->reorder(item); in xlog_recover_reorder_trans()
1895 list_move_tail(&item->ri_list, &buffer_list); in xlog_recover_reorder_trans()
1900 list_move(&item->ri_list, &cancel_list); in xlog_recover_reorder_trans()
1903 list_move(&item->ri_list, &inode_buffer_list); in xlog_recover_reorder_trans()
1908 list_move_tail(&item->ri_list, &item_list); in xlog_recover_reorder_trans()
1915 list_splice(&buffer_list, &trans->r_itemq); in xlog_recover_reorder_trans()
1917 list_splice_tail(&item_list, &trans->r_itemq); in xlog_recover_reorder_trans()
1919 list_splice_tail(&inode_buffer_list, &trans->r_itemq); in xlog_recover_reorder_trans()
1921 list_splice_tail(&cancel_list, &trans->r_itemq); in xlog_recover_reorder_trans()
1933 xfs_buf_readahead(log->l_mp->m_ddev_targp, blkno, len, ops); in xlog_buf_readahead()
1949 xfs_defer_start_recovery(lip, &log->r_dfops, ops); in xlog_recover_intent_item()
1955 xfs_trans_ail_insert(log->l_ailp, lip, lsn); in xlog_recover_intent_item()
1956 lip->li_ops->iop_unpin(lip, 0); in xlog_recover_intent_item()
1973 if (item->ri_ops->commit_pass2) in xlog_recover_items_pass2()
1974 error = item->ri_ops->commit_pass2(log, buffer_list, in xlog_recover_items_pass2()
1975 item, trans->r_lsn); in xlog_recover_items_pass2()
2005 hlist_del_init(&trans->r_list); in xlog_recover_commit_trans()
2011 list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) { in xlog_recover_commit_trans()
2016 if (item->ri_ops->commit_pass1) in xlog_recover_commit_trans()
2017 error = item->ri_ops->commit_pass1(log, item); in xlog_recover_commit_trans()
2020 if (item->ri_ops->ra_pass2) in xlog_recover_commit_trans()
2021 item->ri_ops->ra_pass2(log, item); in xlog_recover_commit_trans()
2022 list_move_tail(&item->ri_list, &ra_list); in xlog_recover_commit_trans()
2049 list_splice_init(&done_list, &trans->r_itemq); in xlog_recover_commit_trans()
2056 struct list_head *head) in xlog_recover_add_item() argument
2061 INIT_LIST_HEAD(&item->ri_list); in xlog_recover_add_item()
2062 list_add_tail(&item->ri_list, head); in xlog_recover_add_item()
2080 if (list_empty(&trans->r_itemq)) { in xlog_recover_add_to_cont_trans()
2083 xfs_warn(log->l_mp, "%s: bad header length", __func__); in xlog_recover_add_to_cont_trans()
2084 return -EFSCORRUPTED; in xlog_recover_add_to_cont_trans()
2087 xlog_recover_add_item(&trans->r_itemq); in xlog_recover_add_to_cont_trans()
2088 ptr = (char *)&trans->r_theader + in xlog_recover_add_to_cont_trans()
2089 sizeof(struct xfs_trans_header) - len; in xlog_recover_add_to_cont_trans()
2095 item = list_entry(trans->r_itemq.prev, struct xlog_recover_item, in xlog_recover_add_to_cont_trans()
2098 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr; in xlog_recover_add_to_cont_trans()
2099 old_len = item->ri_buf[item->ri_cnt-1].i_len; in xlog_recover_add_to_cont_trans()
2103 return -ENOMEM; in xlog_recover_add_to_cont_trans()
2105 item->ri_buf[item->ri_cnt-1].i_len += len; in xlog_recover_add_to_cont_trans()
2106 item->ri_buf[item->ri_cnt-1].i_addr = ptr; in xlog_recover_add_to_cont_trans()
2137 if (list_empty(&trans->r_itemq)) { in xlog_recover_add_to_trans()
2140 xfs_warn(log->l_mp, "%s: bad header magic number", in xlog_recover_add_to_trans()
2143 return -EFSCORRUPTED; in xlog_recover_add_to_trans()
2147 xfs_warn(log->l_mp, "%s: bad header length", __func__); in xlog_recover_add_to_trans()
2149 return -EFSCORRUPTED; in xlog_recover_add_to_trans()
2154 * records. If we don't have the whole thing here, copy what we in xlog_recover_add_to_trans()
2158 xlog_recover_add_item(&trans->r_itemq); in xlog_recover_add_to_trans()
2159 memcpy(&trans->r_theader, dp, len); in xlog_recover_add_to_trans()
2168 item = list_entry(trans->r_itemq.prev, struct xlog_recover_item, in xlog_recover_add_to_trans()
2170 if (item->ri_total != 0 && in xlog_recover_add_to_trans()
2171 item->ri_total == item->ri_cnt) { in xlog_recover_add_to_trans()
2173 xlog_recover_add_item(&trans->r_itemq); in xlog_recover_add_to_trans()
2174 item = list_entry(trans->r_itemq.prev, in xlog_recover_add_to_trans()
2178 if (item->ri_total == 0) { /* first region to be added */ in xlog_recover_add_to_trans()
2179 if (in_f->ilf_size == 0 || in xlog_recover_add_to_trans()
2180 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) { in xlog_recover_add_to_trans()
2181 xfs_warn(log->l_mp, in xlog_recover_add_to_trans()
2183 in_f->ilf_size); in xlog_recover_add_to_trans()
2186 return -EFSCORRUPTED; in xlog_recover_add_to_trans()
2189 item->ri_total = in_f->ilf_size; in xlog_recover_add_to_trans()
2190 item->ri_buf = in xlog_recover_add_to_trans()
2191 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t), in xlog_recover_add_to_trans()
2195 if (item->ri_total <= item->ri_cnt) { in xlog_recover_add_to_trans()
2196 xfs_warn(log->l_mp, in xlog_recover_add_to_trans()
2198 item->ri_cnt, item->ri_total); in xlog_recover_add_to_trans()
2201 return -EFSCORRUPTED; in xlog_recover_add_to_trans()
2205 item->ri_buf[item->ri_cnt].i_addr = ptr; in xlog_recover_add_to_trans()
2206 item->ri_buf[item->ri_cnt].i_len = len; in xlog_recover_add_to_trans()
2207 item->ri_cnt++; in xlog_recover_add_to_trans()
2224 hlist_del_init(&trans->r_list); in xlog_recover_free_trans()
2226 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) { in xlog_recover_free_trans()
2228 list_del(&item->ri_list); in xlog_recover_free_trans()
2229 for (i = 0; i < item->ri_cnt; i++) in xlog_recover_free_trans()
2230 kmem_free(item->ri_buf[i].i_addr); in xlog_recover_free_trans()
2232 kmem_free(item->ri_buf); in xlog_recover_free_trans()
2283 xfs_warn(log->l_mp, "%s: Unmount LR", __func__); in xlog_recovery_process_trans()
2288 xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags); in xlog_recovery_process_trans()
2290 error = -EFSCORRUPTED; in xlog_recovery_process_trans()
2300 * current ophdr. If the transaction doesn't exist and the start flag is set in
2302 * Either way, return what we found during the lookup - an existing transaction
2315 tid = be32_to_cpu(ohead->oh_tid); in xlog_recover_ophdr_to_trans()
2318 if (trans->r_log_tid == tid) in xlog_recover_ophdr_to_trans()
2323 * skip over non-start transaction headers - we could be in xlog_recover_ophdr_to_trans()
2326 if (!(ohead->oh_flags & XLOG_START_TRANS)) in xlog_recover_ophdr_to_trans()
2329 ASSERT(be32_to_cpu(ohead->oh_len) == 0); in xlog_recover_ophdr_to_trans()
2336 trans->r_log_tid = tid; in xlog_recover_ophdr_to_trans()
2337 trans->r_lsn = be64_to_cpu(rhead->h_lsn); in xlog_recover_ophdr_to_trans()
2338 INIT_LIST_HEAD(&trans->r_itemq); in xlog_recover_ophdr_to_trans()
2339 INIT_HLIST_NODE(&trans->r_list); in xlog_recover_ophdr_to_trans()
2340 hlist_add_head(&trans->r_list, rhp); in xlog_recover_ophdr_to_trans()
2365 if (ohead->oh_clientid != XFS_TRANSACTION && in xlog_recover_process_ophdr()
2366 ohead->oh_clientid != XFS_LOG) { in xlog_recover_process_ophdr()
2367 xfs_warn(log->l_mp, "%s: bad clientid 0x%x", in xlog_recover_process_ophdr()
2368 __func__, ohead->oh_clientid); in xlog_recover_process_ophdr()
2370 return -EFSCORRUPTED; in xlog_recover_process_ophdr()
2376 len = be32_to_cpu(ohead->oh_len); in xlog_recover_process_ophdr()
2378 xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len); in xlog_recover_process_ophdr()
2380 return -EFSCORRUPTED; in xlog_recover_process_ophdr()
2394 * - Buffer write submission updates the metadata LSN of the buffer. in xlog_recover_process_ophdr()
2395 * - Log recovery skips items with a metadata LSN >= the current LSN of in xlog_recover_process_ophdr()
2397 * - Separate recovery items against the same metadata buffer can share in xlog_recover_process_ophdr()
2407 * We don't know up front whether buffers are updated multiple times per in xlog_recover_process_ophdr()
2412 if (log->l_recovery_lsn != trans->r_lsn && in xlog_recover_process_ophdr()
2413 ohead->oh_flags & XLOG_COMMIT_TRANS) { in xlog_recover_process_ophdr()
2417 log->l_recovery_lsn = trans->r_lsn; in xlog_recover_process_ophdr()
2421 ohead->oh_flags, pass, buffer_list); in xlog_recover_process_ophdr()
2447 end = dp + be32_to_cpu(rhead->h_len); in xlog_recover_process_data()
2448 num_logops = be32_to_cpu(rhead->h_num_logops); in xlog_recover_process_data()
2450 /* check the log format matches our own - else we can't recover */ in xlog_recover_process_data()
2451 if (xlog_header_check_recover(log->l_mp, rhead)) in xlog_recover_process_data()
2452 return -EIO; in xlog_recover_process_data()
2467 dp += be32_to_cpu(ohead->oh_len); in xlog_recover_process_data()
2468 num_logops--; in xlog_recover_process_data()
2493 resv.tr_logres = dfc->dfc_logres; in xlog_finish_defer_ops()
2497 error = xfs_trans_alloc(mp, &resv, dfc->dfc_blkres, in xlog_finish_defer_ops()
2498 dfc->dfc_rtxres, XFS_TRANS_RESERVE, &tp); in xlog_finish_defer_ops()
2500 xlog_force_shutdown(mp->m_log, SHUTDOWN_LOG_IO_ERROR); in xlog_finish_defer_ops()
2508 list_del_init(&dfc->dfc_list); in xlog_finish_defer_ops()
2530 list_del_init(&dfc->dfc_list); in xlog_abort_defer_ops()
2548 * have started recovery on all the pending intents when we find an non-intent
2561 last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block); in xlog_recover_process_intents()
2564 list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) { in xlog_recover_process_intents()
2565 ASSERT(xlog_item_is_intent(dfp->dfp_intent)); in xlog_recover_process_intents()
2572 ASSERT(XFS_LSN_CMP(last_lsn, dfp->dfp_intent->li_lsn) >= 0); in xlog_recover_process_intents()
2581 * access dfp->dfp_intent after it returns. It must dispose of in xlog_recover_process_intents()
2584 error = xfs_defer_finish_recovery(log->l_mp, dfp, in xlog_recover_process_intents()
2592 error = xlog_finish_defer_ops(log->l_mp, &capture_list); in xlog_recover_process_intents()
2598 xlog_abort_defer_ops(log->l_mp, &capture_list); in xlog_recover_process_intents()
2604 * pending log intent items that we haven't started recovery on so they don't
2613 list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) { in xlog_recover_cancel_intents()
2614 ASSERT(xlog_item_is_intent(dfp->dfp_intent)); in xlog_recover_cancel_intents()
2616 xfs_defer_cancel_recovery(log->l_mp, dfp); in xlog_recover_cancel_intents()
2632 list_move(&dfp->dfp_list, &tp->t_dfops); in xlog_recover_finish_intent()
2634 if (error == -EAGAIN) in xlog_recover_finish_intent()
2648 struct xfs_mount *mp = pag->pag_mount; in xlog_recover_clear_agi_bucket()
2655 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp); in xlog_recover_clear_agi_bucket()
2663 agi = agibp->b_addr; in xlog_recover_clear_agi_bucket()
2664 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); in xlog_recover_clear_agi_bucket()
2668 (offset + sizeof(xfs_agino_t) - 1)); in xlog_recover_clear_agi_bucket()
2679 pag->pag_agno); in xlog_recover_clear_agi_bucket()
2689 struct xfs_mount *mp = pag->pag_mount; in xlog_recover_iunlink_bucket()
2695 agino = be32_to_cpu(agi->agi_unlinked[bucket]); in xlog_recover_iunlink_bucket()
2698 XFS_AGINO_TO_INO(mp, pag->pag_agno, agino), in xlog_recover_iunlink_bucket()
2703 ASSERT(VFS_I(ip)->i_nlink == 0); in xlog_recover_iunlink_bucket()
2704 ASSERT(VFS_I(ip)->i_mode != 0); in xlog_recover_iunlink_bucket()
2706 agino = ip->i_next_unlinked; in xlog_recover_iunlink_bucket()
2709 ip->i_prev_unlinked = prev_agino; in xlog_recover_iunlink_bucket()
2714 * before we continue so that it won't race with in xlog_recover_iunlink_bucket()
2715 * building the in-memory list here. This could be in xlog_recover_iunlink_bucket()
2733 ip->i_prev_unlinked = prev_agino; in xlog_recover_iunlink_bucket()
2759 * This behaviour is bad for latency on single CPU and non-preemptible kernels,
2778 * AGI is b0rked. Don't process it. in xlog_recover_iunlink_ag()
2789 * racing with anyone else here for the AGI buffer, we don't even need in xlog_recover_iunlink_ag()
2794 agi = agibp->b_addr; in xlog_recover_iunlink_ag()
2820 for_each_perag(log->l_mp, agno, pag) in xlog_recover_process_iunlinks()
2832 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) && in xlog_unpack_data()
2834 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i]; in xlog_unpack_data()
2838 if (xfs_has_logv2(log->l_mp)) { in xlog_unpack_data()
2840 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) { in xlog_unpack_data()
2861 __le32 old_crc = rhead->h_crc; in xlog_recover_process()
2864 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len)); in xlog_recover_process()
2868 * if this a record with a non-zero crc. Unfortunately, mkfs always in xlog_recover_process()
2875 return -EFSBADCRC; in xlog_recover_process()
2881 * CRC in the header is non-zero. This is an advisory warning and the in xlog_recover_process()
2886 if (old_crc || xfs_has_crc(log->l_mp)) { in xlog_recover_process()
2887 xfs_alert(log->l_mp, in xlog_recover_process()
2898 if (xfs_has_crc(log->l_mp)) { in xlog_recover_process()
2899 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp); in xlog_recover_process()
2900 return -EFSCORRUPTED; in xlog_recover_process()
2919 if (XFS_IS_CORRUPT(log->l_mp, in xlog_valid_rec_header()
2920 rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) in xlog_valid_rec_header()
2921 return -EFSCORRUPTED; in xlog_valid_rec_header()
2922 if (XFS_IS_CORRUPT(log->l_mp, in xlog_valid_rec_header()
2923 (!rhead->h_version || in xlog_valid_rec_header()
2924 (be32_to_cpu(rhead->h_version) & in xlog_valid_rec_header()
2926 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).", in xlog_valid_rec_header()
2927 __func__, be32_to_cpu(rhead->h_version)); in xlog_valid_rec_header()
2928 return -EFSCORRUPTED; in xlog_valid_rec_header()
2932 * LR body must have data (or it wouldn't have been written) in xlog_valid_rec_header()
2935 hlen = be32_to_cpu(rhead->h_len); in xlog_valid_rec_header()
2936 if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0 || hlen > bufsize)) in xlog_valid_rec_header()
2937 return -EFSCORRUPTED; in xlog_valid_rec_header()
2939 if (XFS_IS_CORRUPT(log->l_mp, in xlog_valid_rec_header()
2940 blkno > log->l_logBBsize || blkno > INT_MAX)) in xlog_valid_rec_header()
2941 return -EFSCORRUPTED; in xlog_valid_rec_header()
2946 * Read the log from tail to head and process the log records found.
2947 * Handle the two cases where the tail and head are in the same cycle
2984 if (xfs_has_logv2(log->l_mp)) { in xlog_do_recovery_pass()
2992 return -ENOMEM; in xlog_do_recovery_pass()
3011 h_size = be32_to_cpu(rhead->h_size); in xlog_do_recovery_pass()
3012 h_len = be32_to_cpu(rhead->h_len); in xlog_do_recovery_pass()
3013 if (h_len > h_size && h_len <= log->l_mp->m_logbsize && in xlog_do_recovery_pass()
3014 rhead->h_num_logops == cpu_to_be32(1)) { in xlog_do_recovery_pass()
3015 xfs_warn(log->l_mp, in xlog_do_recovery_pass()
3017 h_size, log->l_mp->m_logbsize); in xlog_do_recovery_pass()
3018 h_size = log->l_mp->m_logbsize; in xlog_do_recovery_pass()
3031 ASSERT(log->l_sectBBsize == 1); in xlog_do_recovery_pass()
3038 return -ENOMEM; in xlog_do_recovery_pass()
3042 return -ENOMEM; in xlog_do_recovery_pass()
3049 * When the head is not on the same cycle number as the tail, in xlog_do_recovery_pass()
3050 * we can't do a sequential recovery. in xlog_do_recovery_pass()
3052 while (blk_no < log->l_logBBsize) { in xlog_do_recovery_pass()
3054 * Check for header wrapping around physical end-of-log in xlog_do_recovery_pass()
3059 if (blk_no + hblks <= log->l_logBBsize) { in xlog_do_recovery_pass()
3067 if (blk_no != log->l_logBBsize) { in xlog_do_recovery_pass()
3070 split_hblks = log->l_logBBsize - (int)blk_no; in xlog_do_recovery_pass()
3081 * large sector sizes (non-512) only because: in xlog_do_recovery_pass()
3082 * - we increased the buffer size originally in xlog_do_recovery_pass()
3085 * - the log start is guaranteed to be sector in xlog_do_recovery_pass()
3087 * - we read the log end (LR header start) in xlog_do_recovery_pass()
3089 * - order is important. in xlog_do_recovery_pass()
3091 wrapped_hblks = hblks - split_hblks; in xlog_do_recovery_pass()
3104 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); in xlog_do_recovery_pass()
3114 if (blk_no + bblks <= log->l_logBBsize || in xlog_do_recovery_pass()
3115 blk_no >= log->l_logBBsize) { in xlog_do_recovery_pass()
3126 if (blk_no != log->l_logBBsize) { in xlog_do_recovery_pass()
3132 log->l_logBBsize - (int)blk_no; in xlog_do_recovery_pass()
3143 * large sector sizes (non-512) only because: in xlog_do_recovery_pass()
3144 * - we increased the buffer size originally in xlog_do_recovery_pass()
3147 * - the log start is guaranteed to be sector in xlog_do_recovery_pass()
3149 * - we read the log end (LR header start) in xlog_do_recovery_pass()
3151 * - order is important. in xlog_do_recovery_pass()
3154 bblks - split_bblks, in xlog_do_recovery_pass()
3169 ASSERT(blk_no >= log->l_logBBsize); in xlog_do_recovery_pass()
3170 blk_no -= log->l_logBBsize; in xlog_do_recovery_pass()
3186 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); in xlog_do_recovery_pass()
3290 struct xfs_mount *mp = log->l_mp; in xlog_do_recover()
3291 struct xfs_buf *bp = mp->m_sb_bp; in xlog_do_recover()
3292 struct xfs_sb *sbp = &mp->m_sb; in xlog_do_recover()
3305 return -EIO; in xlog_do_recover()
3320 * re-read the superblock and reverify it. in xlog_do_recover()
3334 /* Convert superblock from on-disk format */ in xlog_do_recover()
3335 xfs_sb_from_disk(sbp, bp->b_addr); in xlog_do_recover()
3338 /* re-initialise in-core superblock and geometry structures */ in xlog_do_recover()
3339 mp->m_features |= xfs_sb_version_to_features(sbp); in xlog_do_recover()
3341 error = xfs_initialize_perag(mp, sbp->sb_agcount, sbp->sb_dblocks, in xlog_do_recover()
3342 &mp->m_maxagi); in xlog_do_recover()
3344 xfs_warn(mp, "Failed post-recovery per-ag init: %d", error); in xlog_do_recover()
3347 mp->m_alloc_set_aside = xfs_alloc_set_aside(mp); in xlog_do_recover()
3350 clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate); in xlog_do_recover()
3355 * Perform recovery and re-initialize some log variables in xlog_find_tail.
3376 if (xfs_has_crc(log->l_mp) && in xlog_recover()
3377 !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn)) in xlog_recover()
3378 return -EINVAL; in xlog_recover()
3383 * disallow recovery on read-only mounts. note -- mount in xlog_recover()
3390 * the device itself is read-only, in which case we fail. in xlog_recover()
3392 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) { in xlog_recover()
3403 if (xfs_sb_is_v5(&log->l_mp->m_sb) && in xlog_recover()
3404 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb, in xlog_recover()
3406 xfs_warn(log->l_mp, in xlog_recover()
3408 (log->l_mp->m_sb.sb_features_log_incompat & in xlog_recover()
3410 xfs_warn(log->l_mp, in xlog_recover()
3412 xfs_warn(log->l_mp, in xlog_recover()
3414 return -EINVAL; in xlog_recover()
3423 xfs_notice(log->l_mp, in xlog_recover()
3429 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)", in xlog_recover()
3430 log->l_mp->m_logname ? log->l_mp->m_logname in xlog_recover()
3434 set_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate); in xlog_recover()
3443 * part of recovery so that the root and real-time bitmap inodes can be read in
3445 * space in the real-time portion of the file system.
3456 * Cancel all the unprocessed intent items now so that we don't in xlog_recover_finish()
3463 xfs_alert(log->l_mp, "Failed to recover intents"); in xlog_recover_finish()
3469 * Sync the log to get all the intents out of the AIL. This isn't in xlog_recover_finish()
3473 xfs_log_force(log->l_mp, XFS_LOG_SYNC); in xlog_recover_finish()
3481 if (xfs_clear_incompat_log_features(log->l_mp)) { in xlog_recover_finish()
3482 error = xfs_sync_sb(log->l_mp, false); in xlog_recover_finish()
3484 xfs_alert(log->l_mp, in xlog_recover_finish()
3497 * read-only mount. in xlog_recover_finish()
3499 error = xfs_reflink_recover_cow(log->l_mp); in xlog_recover_finish()
3501 xfs_alert(log->l_mp, in xlog_recover_finish()