Lines Matching +full:scrubber +full:- +full:done
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2017-2023 Oracle. All Rights Reserved.
37 return xchk_setup_ag_btree(sc, sc->flags & XCHK_TRY_HARDER); in xchk_setup_ag_iallocbt()
40 /* Inode btree scrubber. */
57 * - The finobt need not have a record if all inodes in the inobt record are
59 * - The finobt need not have a record if all inodes in the inobt record are
61 * - The finobt need not have a record if the inobt record says this is a hole.
73 struct xfs_btree_cur *cur = sc->sa.fino_cur; in xchk_inobt_xref_finobt()
79 ASSERT(cur->bc_btnum == XFS_BTNUM_FINO); in xchk_inobt_xref_finobt()
89 return -EFSCORRUPTED; in xchk_inobt_xref_finobt()
95 frec_idx = agino - frec.ir_startino; in xchk_inobt_xref_finobt()
108 if (irec->ir_free == 0) in xchk_inobt_xref_finobt()
112 if (irec->ir_free == XFS_INOBT_ALL_FREE) in xchk_inobt_xref_finobt()
142 ASSERT(sc->sm->sm_type == XFS_SCRUB_TYPE_INOBT); in xchk_inobt_chunk_xref_finobt()
144 if (!sc->sa.fino_cur || xchk_skip_xref(sc->sm)) in xchk_inobt_chunk_xref_finobt()
147 for (i = agino, rec_idx = agino - irec->ir_startino; in xchk_inobt_chunk_xref_finobt()
153 free = irec->ir_free & (1ULL << rec_idx); in xchk_inobt_chunk_xref_finobt()
155 hole = irec->ir_holemask & (1U << hole_idx); in xchk_inobt_chunk_xref_finobt()
158 if (!xchk_should_check_xref(sc, &error, &sc->sa.fino_cur)) in xchk_inobt_chunk_xref_finobt()
176 struct xfs_btree_cur *cur = sc->sa.ino_cur; in xchk_finobt_xref_inobt()
182 ASSERT(cur->bc_btnum == XFS_BTNUM_INO); in xchk_finobt_xref_inobt()
192 return -EFSCORRUPTED; in xchk_finobt_xref_inobt()
198 rec_idx = agino - irec.ir_startino; in xchk_finobt_xref_inobt()
230 ASSERT(sc->sm->sm_type == XFS_SCRUB_TYPE_FINOBT); in xchk_finobt_chunk_xref_inobt()
232 if (!sc->sa.ino_cur || xchk_skip_xref(sc->sm)) in xchk_finobt_chunk_xref_inobt()
235 for (i = agino, rec_idx = agino - frec->ir_startino; in xchk_finobt_chunk_xref_inobt()
241 ffree = frec->ir_free & (1ULL << rec_idx); in xchk_finobt_chunk_xref_inobt()
243 fhole = frec->ir_holemask & (1U << hole_idx); in xchk_finobt_chunk_xref_inobt()
246 if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur)) in xchk_finobt_chunk_xref_inobt()
251 /* Is this chunk worth checking and cross-referencing? */
259 struct xfs_scrub *sc = bs->sc; in xchk_iallocbt_chunk()
260 struct xfs_mount *mp = bs->cur->bc_mp; in xchk_iallocbt_chunk()
261 struct xfs_perag *pag = bs->cur->bc_ag.pag; in xchk_iallocbt_chunk()
266 len = XFS_B_TO_FSB(mp, nr_inodes * mp->m_sb.sb_inodesize); in xchk_iallocbt_chunk()
269 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_chunk()
271 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) in xchk_iallocbt_chunk()
275 if (sc->sm->sm_type == XFS_SCRUB_TYPE_INOBT) in xchk_iallocbt_chunk()
287 * record. First we try querying the in-core inode state, and if the inode
288 * isn't loaded we examine the on-disk inode directly.
297 * @dip is the on-disk inode.
306 struct xfs_mount *mp = bs->cur->bc_mp; in xchk_iallocbt_check_cluster_ifree()
314 if (xchk_should_terminate(bs->sc, &error)) in xchk_iallocbt_check_cluster_ifree()
321 agino = irec->ir_startino + irec_ino; in xchk_iallocbt_check_cluster_ifree()
322 fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_ag.pag->pag_agno, agino); in xchk_iallocbt_check_cluster_ifree()
323 irec_free = (irec->ir_free & XFS_INOBT_MASK(irec_ino)); in xchk_iallocbt_check_cluster_ifree()
325 if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC || in xchk_iallocbt_check_cluster_ifree()
326 (dip->di_version >= 3 && be64_to_cpu(dip->di_ino) != fsino)) { in xchk_iallocbt_check_cluster_ifree()
327 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_check_cluster_ifree()
331 error = xchk_inode_is_allocated(bs->sc, agino, &ino_inuse); in xchk_iallocbt_check_cluster_ifree()
332 if (error == -ENODATA) { in xchk_iallocbt_check_cluster_ifree()
334 freemask_ok = irec_free ^ !!(dip->di_mode); in xchk_iallocbt_check_cluster_ifree()
335 if (!(bs->sc->flags & XCHK_TRY_HARDER) && !freemask_ok) in xchk_iallocbt_check_cluster_ifree()
336 return -EDEADLOCK; in xchk_iallocbt_check_cluster_ifree()
341 * The inode scrubber can deal with this. in xchk_iallocbt_check_cluster_ifree()
349 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_check_cluster_ifree()
368 struct xfs_mount *mp = bs->cur->bc_mp; in xchk_iallocbt_check_cluster()
371 xfs_agnumber_t agno = bs->cur->bc_ag.pag->pag_agno; in xchk_iallocbt_check_cluster()
379 M_IGEO(mp)->inodes_per_cluster); in xchk_iallocbt_check_cluster()
382 agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino + cluster_base); in xchk_iallocbt_check_cluster()
398 ir_holemask = (irec->ir_holemask & cluster_mask); in xchk_iallocbt_check_cluster()
400 imap.im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster); in xchk_iallocbt_check_cluster()
401 imap.im_boffset = XFS_INO_TO_OFFSET(mp, irec->ir_startino) << in xchk_iallocbt_check_cluster()
402 mp->m_sb.sb_inodelog; in xchk_iallocbt_check_cluster()
406 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_check_cluster()
410 trace_xchk_iallocbt_check_cluster(mp, agno, irec->ir_startino, in xchk_iallocbt_check_cluster()
413 XFS_INO_TO_OFFSET(mp, irec->ir_startino + in xchk_iallocbt_check_cluster()
418 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_check_cluster()
424 xchk_xref_is_not_owned_by(bs->sc, agbno, in xchk_iallocbt_check_cluster()
425 M_IGEO(mp)->blocks_per_cluster, in xchk_iallocbt_check_cluster()
430 xchk_xref_is_only_owned_by(bs->sc, agbno, M_IGEO(mp)->blocks_per_cluster, in xchk_iallocbt_check_cluster()
434 error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &cluster_bp); in xchk_iallocbt_check_cluster()
435 if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0, &error)) in xchk_iallocbt_check_cluster()
442 if (imap.im_boffset >= BBTOB(cluster_bp->b_length)) { in xchk_iallocbt_check_cluster()
443 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_check_cluster()
452 imap.im_boffset += mp->m_sb.sb_inodesize; in xchk_iallocbt_check_cluster()
455 xfs_trans_brelse(bs->cur->bc_tp, cluster_bp); in xchk_iallocbt_check_cluster()
481 cluster_base += M_IGEO(bs->sc->mp)->inodes_per_cluster) { in xchk_iallocbt_check_clusters()
501 struct xfs_mount *mp = bs->sc->mp; in xchk_iallocbt_rec_alignment()
502 struct xchk_iallocbt *iabt = bs->private; in xchk_iallocbt_rec_alignment()
517 if (bs->cur->bc_btnum == XFS_BTNUM_FINO) { in xchk_iallocbt_rec_alignment()
521 igeo->cluster_align_inodes) - 1; in xchk_iallocbt_rec_alignment()
522 if (irec->ir_startino & imask) in xchk_iallocbt_rec_alignment()
523 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_rec_alignment()
527 if (iabt->next_startino != NULLAGINO) { in xchk_iallocbt_rec_alignment()
533 if (irec->ir_startino != iabt->next_startino) { in xchk_iallocbt_rec_alignment()
534 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_rec_alignment()
538 iabt->next_startino += XFS_INODES_PER_CHUNK; in xchk_iallocbt_rec_alignment()
540 /* Are we done with the cluster? */ in xchk_iallocbt_rec_alignment()
541 if (iabt->next_startino >= iabt->next_cluster_ino) { in xchk_iallocbt_rec_alignment()
542 iabt->next_startino = NULLAGINO; in xchk_iallocbt_rec_alignment()
543 iabt->next_cluster_ino = NULLAGINO; in xchk_iallocbt_rec_alignment()
549 if (irec->ir_startino & (igeo->cluster_align_inodes - 1)) { in xchk_iallocbt_rec_alignment()
550 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_rec_alignment()
554 if (irec->ir_startino & (igeo->inodes_per_cluster - 1)) { in xchk_iallocbt_rec_alignment()
555 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_rec_alignment()
559 if (igeo->inodes_per_cluster <= XFS_INODES_PER_CHUNK) in xchk_iallocbt_rec_alignment()
567 iabt->next_startino = irec->ir_startino + XFS_INODES_PER_CHUNK; in xchk_iallocbt_rec_alignment()
568 iabt->next_cluster_ino = irec->ir_startino + igeo->inodes_per_cluster; in xchk_iallocbt_rec_alignment()
577 struct xfs_mount *mp = bs->cur->bc_mp; in xchk_iallocbt_rec()
578 struct xchk_iallocbt *iabt = bs->private; in xchk_iallocbt_rec()
588 if (xfs_inobt_check_irec(bs->cur->bc_ag.pag, &irec) != NULL) { in xchk_iallocbt_rec()
589 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_rec()
596 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) in xchk_iallocbt_rec()
599 iabt->inodes += irec.ir_count; in xchk_iallocbt_rec()
601 /* Handle non-sparse inodes */ in xchk_iallocbt_rec()
604 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_rec()
618 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_rec()
632 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); in xchk_iallocbt_rec()
635 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) in xchk_iallocbt_rec()
660 if (!sc->sa.ino_cur || !sc->sa.rmap_cur || in xchk_iallocbt_xref_rmap_btreeblks()
661 (xfs_has_finobt(sc->mp) && !sc->sa.fino_cur) || in xchk_iallocbt_xref_rmap_btreeblks()
662 xchk_skip_xref(sc->sm)) in xchk_iallocbt_xref_rmap_btreeblks()
666 error = xfs_btree_count_blocks(sc->sa.ino_cur, &inobt_blocks); in xchk_iallocbt_xref_rmap_btreeblks()
670 if (sc->sa.fino_cur) { in xchk_iallocbt_xref_rmap_btreeblks()
671 error = xfs_btree_count_blocks(sc->sa.fino_cur, &finobt_blocks); in xchk_iallocbt_xref_rmap_btreeblks()
676 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, in xchk_iallocbt_xref_rmap_btreeblks()
678 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) in xchk_iallocbt_xref_rmap_btreeblks()
681 xchk_btree_set_corrupt(sc, sc->sa.ino_cur, 0); in xchk_iallocbt_xref_rmap_btreeblks()
698 if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) in xchk_iallocbt_xref_rmap_inodes()
702 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, in xchk_iallocbt_xref_rmap_inodes()
704 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) in xchk_iallocbt_xref_rmap_inodes()
706 inode_blocks = XFS_B_TO_FSB(sc->mp, inodes * sc->mp->m_sb.sb_inodesize); in xchk_iallocbt_xref_rmap_inodes()
708 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); in xchk_iallocbt_xref_rmap_inodes()
725 switch (sc->sm->sm_type) { in xchk_iallocbt()
727 cur = sc->sa.ino_cur; in xchk_iallocbt()
731 cur = sc->sa.fino_cur; in xchk_iallocbt()
736 return -EIO; in xchk_iallocbt()
771 if (!(*icur) || xchk_skip_xref(sc->sm)) in xchk_xref_inode_check()
788 xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, in xchk_xref_is_not_inode_chunk()
790 xchk_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur, in xchk_xref_is_not_inode_chunk()
801 xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, in xchk_xref_is_inode_chunk()