1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2017 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_btree.h"
14 #include "xfs_log_format.h"
15 #include "xfs_trans.h"
16 #include "xfs_inode.h"
17 #include "xfs_alloc.h"
19 #include "xfs_bmap_btree.h"
21 #include "xfs_rmap_btree.h"
22 #include "scrub/scrub.h"
23 #include "scrub/common.h"
24 #include "scrub/btree.h"
26 /* Set us up with an inode's bmap. */
28 xchk_setup_inode_bmap(
33 error = xchk_get_inode(sc);
37 sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
38 xfs_ilock(sc->ip, sc->ilock_flags);
41 * We don't want any ephemeral data fork updates sitting around
42 * while we inspect block mappings, so wait for directio to finish
43 * and flush dirty data if we have delalloc reservations.
45 if (S_ISREG(VFS_I(sc->ip)->i_mode) &&
46 sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) {
47 struct address_space *mapping = VFS_I(sc->ip)->i_mapping;
49 inode_dio_wait(VFS_I(sc->ip));
52 * Try to flush all incore state to disk before we examine the
53 * space mappings for the data fork. Leave accumulated errors
54 * in the mapping for the writer threads to consume.
56 * On ENOSPC or EIO writeback errors, we continue into the
57 * extent mapping checks because write failures do not
58 * necessarily imply anything about the correctness of the file
59 * metadata. The metadata and the file data could be on
60 * completely separate devices; a media failure might only
61 * affect a subset of the disk, etc. We can handle delalloc
62 * extents in the scrubber, so leaving them in memory is fine.
64 error = filemap_fdatawrite(mapping);
66 error = filemap_fdatawait_keep_errors(mapping);
67 if (error && (error != -ENOSPC && error != -EIO))
71 /* Got the inode, lock it and we're ready to go. */
72 error = xchk_trans_alloc(sc, 0);
75 sc->ilock_flags |= XFS_ILOCK_EXCL;
76 xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
79 /* scrub teardown will unlock and release the inode */
84 * Inode fork block mapping (BMBT) scrubber.
85 * More complex than the others because we have to scrub
86 * all the extents regardless of whether or not the fork
90 struct xchk_bmap_info {
92 xfs_fileoff_t lastoff;
99 /* Look for a corresponding rmap for this irec. */
102 struct xchk_bmap_info *info,
103 struct xfs_bmbt_irec *irec,
106 struct xfs_rmap_irec *rmap)
108 xfs_fileoff_t offset;
109 unsigned int rflags = 0;
113 if (info->whichfork == XFS_ATTR_FORK)
114 rflags |= XFS_RMAP_ATTR_FORK;
115 if (irec->br_state == XFS_EXT_UNWRITTEN)
116 rflags |= XFS_RMAP_UNWRITTEN;
119 * CoW staging extents are owned (on disk) by the refcountbt, so
120 * their rmaps do not have offsets.
122 if (info->whichfork == XFS_COW_FORK)
125 offset = irec->br_startoff;
128 * If the caller thinks this could be a shared bmbt extent (IOWs,
129 * any data fork extent of a reflink inode) then we have to use the
130 * range rmap lookup to make sure we get the correct owner/offset.
132 if (info->is_shared) {
133 error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno,
134 owner, offset, rflags, rmap, &has_rmap);
135 if (!xchk_should_check_xref(info->sc, &error,
136 &info->sc->sa.rmap_cur))
142 * Otherwise, use the (faster) regular lookup.
144 error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno, 0, owner,
145 offset, rflags, &has_rmap);
146 if (!xchk_should_check_xref(info->sc, &error,
147 &info->sc->sa.rmap_cur))
152 error = xfs_rmap_get_rec(info->sc->sa.rmap_cur, rmap, &has_rmap);
153 if (!xchk_should_check_xref(info->sc, &error,
154 &info->sc->sa.rmap_cur))
159 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
164 /* Make sure that we have rmapbt records for this extent. */
167 struct xchk_bmap_info *info,
168 struct xfs_bmbt_irec *irec,
171 struct xfs_rmap_irec rmap;
172 unsigned long long rmap_end;
175 if (!info->sc->sa.rmap_cur || xchk_skip_xref(info->sc->sm))
178 if (info->whichfork == XFS_COW_FORK)
179 owner = XFS_RMAP_OWN_COW;
181 owner = info->sc->ip->i_ino;
183 /* Find the rmap record for this irec. */
184 if (!xchk_bmap_get_rmap(info, irec, agbno, owner, &rmap))
187 /* Check the rmap. */
188 rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount;
189 if (rmap.rm_startblock > agbno ||
190 agbno + irec->br_blockcount > rmap_end)
191 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
195 * Check the logical offsets if applicable. CoW staging extents
196 * don't track logical offsets since the mappings only exist in
199 if (info->whichfork != XFS_COW_FORK) {
200 rmap_end = (unsigned long long)rmap.rm_offset +
202 if (rmap.rm_offset > irec->br_startoff ||
203 irec->br_startoff + irec->br_blockcount > rmap_end)
204 xchk_fblock_xref_set_corrupt(info->sc,
205 info->whichfork, irec->br_startoff);
208 if (rmap.rm_owner != owner)
209 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
213 * Check for discrepancies between the unwritten flag in the irec and
214 * the rmap. Note that the (in-memory) CoW fork distinguishes between
215 * unwritten and written extents, but we don't track that in the rmap
216 * records because the blocks are owned (on-disk) by the refcountbt,
217 * which doesn't track unwritten state.
219 if (owner != XFS_RMAP_OWN_COW &&
220 !!(irec->br_state == XFS_EXT_UNWRITTEN) !=
221 !!(rmap.rm_flags & XFS_RMAP_UNWRITTEN))
222 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
225 if (!!(info->whichfork == XFS_ATTR_FORK) !=
226 !!(rmap.rm_flags & XFS_RMAP_ATTR_FORK))
227 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
229 if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK)
230 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
234 /* Cross-reference a single rtdev extent record. */
236 xchk_bmap_rt_iextent_xref(
237 struct xfs_inode *ip,
238 struct xchk_bmap_info *info,
239 struct xfs_bmbt_irec *irec)
241 xchk_xref_is_used_rt_space(info->sc, irec->br_startblock,
242 irec->br_blockcount);
245 /* Cross-reference a single datadev extent record. */
247 xchk_bmap_iextent_xref(
248 struct xfs_inode *ip,
249 struct xchk_bmap_info *info,
250 struct xfs_bmbt_irec *irec)
252 struct xfs_mount *mp = info->sc->mp;
258 agno = XFS_FSB_TO_AGNO(mp, irec->br_startblock);
259 agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
260 len = irec->br_blockcount;
262 error = xchk_ag_init(info->sc, agno, &info->sc->sa);
263 if (!xchk_fblock_process_error(info->sc, info->whichfork,
264 irec->br_startoff, &error))
267 xchk_xref_is_used_space(info->sc, agbno, len);
268 xchk_xref_is_not_inode_chunk(info->sc, agbno, len);
269 xchk_bmap_xref_rmap(info, irec, agbno);
270 switch (info->whichfork) {
272 if (xfs_is_reflink_inode(info->sc->ip))
276 xchk_xref_is_not_shared(info->sc, agbno,
277 irec->br_blockcount);
280 xchk_xref_is_cow_staging(info->sc, agbno,
281 irec->br_blockcount);
285 xchk_ag_free(info->sc, &info->sc->sa);
289 * Directories and attr forks should never have blocks that can't be addressed
293 xchk_bmap_dirattr_extent(
294 struct xfs_inode *ip,
295 struct xchk_bmap_info *info,
296 struct xfs_bmbt_irec *irec)
298 struct xfs_mount *mp = ip->i_mount;
301 if (!S_ISDIR(VFS_I(ip)->i_mode) && info->whichfork != XFS_ATTR_FORK)
304 if (!xfs_verify_dablk(mp, irec->br_startoff))
305 xchk_fblock_set_corrupt(info->sc, info->whichfork,
308 off = irec->br_startoff + irec->br_blockcount - 1;
309 if (!xfs_verify_dablk(mp, off))
310 xchk_fblock_set_corrupt(info->sc, info->whichfork, off);
313 /* Scrub a single extent record. */
316 struct xfs_inode *ip,
317 struct xchk_bmap_info *info,
318 struct xfs_bmbt_irec *irec)
320 struct xfs_mount *mp = info->sc->mp;
324 * Check for out-of-order extents. This record could have come
325 * from the incore list, for which there is no ordering check.
327 if (irec->br_startoff < info->lastoff)
328 xchk_fblock_set_corrupt(info->sc, info->whichfork,
331 if (!xfs_verify_fileext(mp, irec->br_startoff, irec->br_blockcount))
332 xchk_fblock_set_corrupt(info->sc, info->whichfork,
335 xchk_bmap_dirattr_extent(ip, info, irec);
337 /* There should never be a "hole" extent in either extent list. */
338 if (irec->br_startblock == HOLESTARTBLOCK)
339 xchk_fblock_set_corrupt(info->sc, info->whichfork,
343 * Check for delalloc extents. We never iterate the ones in the
344 * in-core extent scan, and we should never see these in the bmbt.
346 if (isnullstartblock(irec->br_startblock))
347 xchk_fblock_set_corrupt(info->sc, info->whichfork,
350 /* Make sure the extent points to a valid place. */
351 if (irec->br_blockcount > MAXEXTLEN)
352 xchk_fblock_set_corrupt(info->sc, info->whichfork,
355 !xfs_verify_rtext(mp, irec->br_startblock, irec->br_blockcount))
356 xchk_fblock_set_corrupt(info->sc, info->whichfork,
359 !xfs_verify_fsbext(mp, irec->br_startblock, irec->br_blockcount))
360 xchk_fblock_set_corrupt(info->sc, info->whichfork,
363 /* We don't allow unwritten extents on attr forks. */
364 if (irec->br_state == XFS_EXT_UNWRITTEN &&
365 info->whichfork == XFS_ATTR_FORK)
366 xchk_fblock_set_corrupt(info->sc, info->whichfork,
369 if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
373 xchk_bmap_rt_iextent_xref(ip, info, irec);
375 xchk_bmap_iextent_xref(ip, info, irec);
377 info->lastoff = irec->br_startoff + irec->br_blockcount;
381 /* Scrub a bmbt record. */
384 struct xchk_btree *bs,
385 union xfs_btree_rec *rec)
387 struct xfs_bmbt_irec irec;
388 struct xfs_bmbt_irec iext_irec;
389 struct xfs_iext_cursor icur;
390 struct xchk_bmap_info *info = bs->private;
391 struct xfs_inode *ip = bs->cur->bc_ino.ip;
392 struct xfs_buf *bp = NULL;
393 struct xfs_btree_block *block;
394 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, info->whichfork);
399 * Check the owners of the btree blocks up to the level below
400 * the root since the verifiers don't do that.
402 if (xfs_sb_version_hascrc(&bs->cur->bc_mp->m_sb) &&
403 bs->cur->bc_ptrs[0] == 1) {
404 for (i = 0; i < bs->cur->bc_nlevels - 1; i++) {
405 block = xfs_btree_get_block(bs->cur, i, &bp);
406 owner = be64_to_cpu(block->bb_u.l.bb_owner);
407 if (owner != ip->i_ino)
408 xchk_fblock_set_corrupt(bs->sc,
414 * Check that the incore extent tree contains an extent that matches
415 * this one exactly. We validate those cached bmaps later, so we don't
416 * need to check them here. If the incore extent tree was just loaded
417 * from disk by the scrubber, we assume that its contents match what's
418 * on disk (we still hold the ILOCK) and skip the equivalence check.
420 if (!info->was_loaded)
423 xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
424 if (!xfs_iext_lookup_extent(ip, ifp, irec.br_startoff, &icur,
426 irec.br_startoff != iext_irec.br_startoff ||
427 irec.br_startblock != iext_irec.br_startblock ||
428 irec.br_blockcount != iext_irec.br_blockcount ||
429 irec.br_state != iext_irec.br_state)
430 xchk_fblock_set_corrupt(bs->sc, info->whichfork,
435 /* Scan the btree records. */
438 struct xfs_scrub *sc,
440 struct xchk_bmap_info *info)
442 struct xfs_owner_info oinfo;
443 struct xfs_ifork *ifp = XFS_IFORK_PTR(sc->ip, whichfork);
444 struct xfs_mount *mp = sc->mp;
445 struct xfs_inode *ip = sc->ip;
446 struct xfs_btree_cur *cur;
449 /* Load the incore bmap cache if it's not loaded. */
450 info->was_loaded = ifp->if_flags & XFS_IFEXTENTS;
452 error = xfs_iread_extents(sc->tp, ip, whichfork);
453 if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
456 /* Check the btree structure. */
457 cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork);
458 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
459 error = xchk_btree(sc, cur, xchk_bmapbt_rec, &oinfo, info);
460 xfs_btree_del_cursor(cur, error);
465 struct xchk_bmap_check_rmap_info {
466 struct xfs_scrub *sc;
468 struct xfs_iext_cursor icur;
471 /* Can we find bmaps that fit this rmap? */
473 xchk_bmap_check_rmap(
474 struct xfs_btree_cur *cur,
475 struct xfs_rmap_irec *rec,
478 struct xfs_bmbt_irec irec;
479 struct xchk_bmap_check_rmap_info *sbcri = priv;
480 struct xfs_ifork *ifp;
481 struct xfs_scrub *sc = sbcri->sc;
484 /* Is this even the right fork? */
485 if (rec->rm_owner != sc->ip->i_ino)
487 if ((sbcri->whichfork == XFS_ATTR_FORK) ^
488 !!(rec->rm_flags & XFS_RMAP_ATTR_FORK))
490 if (rec->rm_flags & XFS_RMAP_BMBT_BLOCK)
493 /* Now look up the bmbt record. */
494 ifp = XFS_IFORK_PTR(sc->ip, sbcri->whichfork);
496 xchk_fblock_set_corrupt(sc, sbcri->whichfork,
500 have_map = xfs_iext_lookup_extent(sc->ip, ifp, rec->rm_offset,
501 &sbcri->icur, &irec);
503 xchk_fblock_set_corrupt(sc, sbcri->whichfork,
506 * bmap extent record lengths are constrained to 2^21 blocks in length
507 * because of space constraints in the on-disk metadata structure.
508 * However, rmap extent record lengths are constrained only by AG
509 * length, so we have to loop through the bmbt to make sure that the
510 * entire rmap is covered by bmbt records.
513 if (irec.br_startoff != rec->rm_offset)
514 xchk_fblock_set_corrupt(sc, sbcri->whichfork,
516 if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp,
517 cur->bc_ag.agno, rec->rm_startblock))
518 xchk_fblock_set_corrupt(sc, sbcri->whichfork,
520 if (irec.br_blockcount > rec->rm_blockcount)
521 xchk_fblock_set_corrupt(sc, sbcri->whichfork,
523 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
525 rec->rm_startblock += irec.br_blockcount;
526 rec->rm_offset += irec.br_blockcount;
527 rec->rm_blockcount -= irec.br_blockcount;
528 if (rec->rm_blockcount == 0)
530 have_map = xfs_iext_next_extent(ifp, &sbcri->icur, &irec);
532 xchk_fblock_set_corrupt(sc, sbcri->whichfork,
537 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
542 /* Make sure each rmap has a corresponding bmbt entry. */
544 xchk_bmap_check_ag_rmaps(
545 struct xfs_scrub *sc,
549 struct xchk_bmap_check_rmap_info sbcri;
550 struct xfs_btree_cur *cur;
554 error = xfs_alloc_read_agf(sc->mp, sc->tp, agno, 0, &agf);
558 cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf, agno);
561 sbcri.whichfork = whichfork;
562 error = xfs_rmap_query_all(cur, xchk_bmap_check_rmap, &sbcri);
563 if (error == -ECANCELED)
566 xfs_btree_del_cursor(cur, error);
567 xfs_trans_brelse(sc->tp, agf);
571 /* Make sure each rmap has a corresponding bmbt entry. */
573 xchk_bmap_check_rmaps(
574 struct xfs_scrub *sc,
577 struct xfs_ifork *ifp = XFS_IFORK_PTR(sc->ip, whichfork);
582 if (!xfs_sb_version_hasrmapbt(&sc->mp->m_sb) ||
583 whichfork == XFS_COW_FORK ||
584 (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
587 /* Don't support realtime rmap checks yet. */
588 if (XFS_IS_REALTIME_INODE(sc->ip) && whichfork == XFS_DATA_FORK)
591 ASSERT(XFS_IFORK_PTR(sc->ip, whichfork) != NULL);
594 * Only do this for complex maps that are in btree format, or for
595 * situations where we would seem to have a size but zero extents.
596 * The inode repair code can zap broken iforks, which means we have
597 * to flag this bmap as corrupt if there are rmaps that need to be
601 if (whichfork == XFS_DATA_FORK)
602 zero_size = i_size_read(VFS_I(sc->ip)) == 0;
606 if (ifp->if_format != XFS_DINODE_FMT_BTREE &&
607 (zero_size || ifp->if_nextents > 0))
610 for (agno = 0; agno < sc->mp->m_sb.sb_agcount; agno++) {
611 error = xchk_bmap_check_ag_rmaps(sc, whichfork, agno);
614 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
622 * Scrub an inode fork's block mappings.
624 * First we scan every record in every btree block, if applicable.
625 * Then we unconditionally scan the incore extent cache.
629 struct xfs_scrub *sc,
632 struct xfs_bmbt_irec irec;
633 struct xchk_bmap_info info = { NULL };
634 struct xfs_mount *mp = sc->mp;
635 struct xfs_inode *ip = sc->ip;
636 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
637 xfs_fileoff_t endoff;
638 struct xfs_iext_cursor icur;
641 /* Non-existent forks can be ignored. */
645 info.is_rt = whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip);
646 info.whichfork = whichfork;
647 info.is_shared = whichfork == XFS_DATA_FORK && xfs_is_reflink_inode(ip);
652 /* No CoW forks on non-reflink inodes/filesystems. */
653 if (!xfs_is_reflink_inode(ip)) {
654 xchk_ino_set_corrupt(sc, sc->ip->i_ino);
659 if (!xfs_sb_version_hasattr(&mp->m_sb) &&
660 !xfs_sb_version_hasattr2(&mp->m_sb))
661 xchk_ino_set_corrupt(sc, sc->ip->i_ino);
664 ASSERT(whichfork == XFS_DATA_FORK);
668 /* Check the fork values */
669 switch (ifp->if_format) {
670 case XFS_DINODE_FMT_UUID:
671 case XFS_DINODE_FMT_DEV:
672 case XFS_DINODE_FMT_LOCAL:
673 /* No mappings to check. */
675 case XFS_DINODE_FMT_EXTENTS:
676 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
677 xchk_fblock_set_corrupt(sc, whichfork, 0);
681 case XFS_DINODE_FMT_BTREE:
682 if (whichfork == XFS_COW_FORK) {
683 xchk_fblock_set_corrupt(sc, whichfork, 0);
687 error = xchk_bmap_btree(sc, whichfork, &info);
692 xchk_fblock_set_corrupt(sc, whichfork, 0);
696 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
699 /* Find the offset of the last extent in the mapping. */
700 error = xfs_bmap_last_offset(ip, &endoff, whichfork);
701 if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
704 /* Scrub extent records. */
706 ifp = XFS_IFORK_PTR(ip, whichfork);
707 for_each_xfs_iext(ifp, &icur, &irec) {
708 if (xchk_should_terminate(sc, &error) ||
709 (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
711 if (isnullstartblock(irec.br_startblock))
713 if (irec.br_startoff >= endoff) {
714 xchk_fblock_set_corrupt(sc, whichfork,
718 error = xchk_bmap_iextent(ip, &info, &irec);
723 error = xchk_bmap_check_rmaps(sc, whichfork);
724 if (!xchk_fblock_xref_process_error(sc, whichfork, 0, &error))
730 /* Scrub an inode's data fork. */
733 struct xfs_scrub *sc)
735 return xchk_bmap(sc, XFS_DATA_FORK);
738 /* Scrub an inode's attr fork. */
741 struct xfs_scrub *sc)
743 return xchk_bmap(sc, XFS_ATTR_FORK);
746 /* Scrub an inode's CoW fork. */
749 struct xfs_scrub *sc)
751 if (!xfs_is_reflink_inode(sc->ip))
754 return xchk_bmap(sc, XFS_COW_FORK);