1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2018 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_btree.h"
13 #include "xfs_log_format.h"
14 #include "xfs_trans.h"
16 #include "xfs_alloc.h"
17 #include "xfs_alloc_btree.h"
18 #include "xfs_ialloc.h"
19 #include "xfs_ialloc_btree.h"
21 #include "xfs_rmap_btree.h"
22 #include "xfs_refcount_btree.h"
23 #include "scrub/scrub.h"
24 #include "scrub/common.h"
25 #include "scrub/trace.h"
26 #include "scrub/repair.h"
27 #include "scrub/bitmap.h"
31 /* Repair the superblock. */
36 struct xfs_mount *mp = sc->mp;
41 /* Don't try to repair AG 0's sb; let xfs_repair deal with it. */
42 agno = sc->sm->sm_agno;
46 error = xfs_sb_get_secondary(mp, sc->tp, agno, &bp);
50 /* Copy AG 0's superblock to this one. */
51 xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
52 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb);
54 /* Write this to disk. */
55 xfs_trans_buf_set_type(sc->tp, bp, XFS_BLFT_SB_BUF);
56 xfs_trans_log_buf(sc->tp, bp, 0, BBTOB(bp->b_length) - 1);
62 struct xrep_agf_allocbt {
64 xfs_agblock_t freeblks;
65 xfs_agblock_t longest;
68 /* Record free space shape information. */
70 xrep_agf_walk_allocbt(
71 struct xfs_btree_cur *cur,
72 struct xfs_alloc_rec_incore *rec,
75 struct xrep_agf_allocbt *raa = priv;
78 if (xchk_should_terminate(raa->sc, &error))
81 raa->freeblks += rec->ar_blockcount;
82 if (rec->ar_blockcount > raa->longest)
83 raa->longest = rec->ar_blockcount;
87 /* Does this AGFL block look sane? */
89 xrep_agf_check_agfl_block(
94 struct xfs_scrub *sc = priv;
96 if (!xfs_verify_agbno(mp, sc->sa.agno, agbno))
102 * Offset within the xrep_find_ag_btree array for each btree type. Avoid the
103 * XFS_BTNUM_ names here to avoid creating a sparse array.
114 /* Check a btree root candidate. */
116 xrep_check_btree_root(
117 struct xfs_scrub *sc,
118 struct xrep_find_ag_btree *fab)
120 struct xfs_mount *mp = sc->mp;
121 xfs_agnumber_t agno = sc->sm->sm_agno;
123 return xfs_verify_agbno(mp, agno, fab->root) &&
124 fab->height <= XFS_BTREE_MAXLEVELS;
128 * Given the btree roots described by *fab, find the roots, check them for
129 * sanity, and pass the root data back out via *fab.
131 * This is /also/ a chicken and egg problem because we have to use the rmapbt
132 * (rooted in the AGF) to find the btrees rooted in the AGF. We also have no
133 * idea if the btrees make any sense. If we hit obvious corruptions in those
134 * btrees we'll bail out.
137 xrep_agf_find_btrees(
138 struct xfs_scrub *sc,
139 struct xfs_buf *agf_bp,
140 struct xrep_find_ag_btree *fab,
141 struct xfs_buf *agfl_bp)
143 struct xfs_agf *old_agf = XFS_BUF_TO_AGF(agf_bp);
146 /* Go find the root data. */
147 error = xrep_find_ag_btree_roots(sc, agf_bp, fab, agfl_bp);
151 /* We must find the bnobt, cntbt, and rmapbt roots. */
152 if (!xrep_check_btree_root(sc, &fab[XREP_AGF_BNOBT]) ||
153 !xrep_check_btree_root(sc, &fab[XREP_AGF_CNTBT]) ||
154 !xrep_check_btree_root(sc, &fab[XREP_AGF_RMAPBT]))
155 return -EFSCORRUPTED;
158 * We relied on the rmapbt to reconstruct the AGF. If we get a
159 * different root then something's seriously wrong.
161 if (fab[XREP_AGF_RMAPBT].root !=
162 be32_to_cpu(old_agf->agf_roots[XFS_BTNUM_RMAPi]))
163 return -EFSCORRUPTED;
165 /* We must find the refcountbt root if that feature is enabled. */
166 if (xfs_sb_version_hasreflink(&sc->mp->m_sb) &&
167 !xrep_check_btree_root(sc, &fab[XREP_AGF_REFCOUNTBT]))
168 return -EFSCORRUPTED;
174 * Reinitialize the AGF header, making an in-core copy of the old contents so
175 * that we know which in-core state needs to be reinitialized.
178 xrep_agf_init_header(
179 struct xfs_scrub *sc,
180 struct xfs_buf *agf_bp,
181 struct xfs_agf *old_agf)
183 struct xfs_mount *mp = sc->mp;
184 struct xfs_agf *agf = XFS_BUF_TO_AGF(agf_bp);
186 memcpy(old_agf, agf, sizeof(*old_agf));
187 memset(agf, 0, BBTOB(agf_bp->b_length));
188 agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
189 agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
190 agf->agf_seqno = cpu_to_be32(sc->sa.agno);
191 agf->agf_length = cpu_to_be32(xfs_ag_block_count(mp, sc->sa.agno));
192 agf->agf_flfirst = old_agf->agf_flfirst;
193 agf->agf_fllast = old_agf->agf_fllast;
194 agf->agf_flcount = old_agf->agf_flcount;
195 if (xfs_sb_version_hascrc(&mp->m_sb))
196 uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
198 /* Mark the incore AGF data stale until we're done fixing things. */
199 ASSERT(sc->sa.pag->pagf_init);
200 sc->sa.pag->pagf_init = 0;
203 /* Set btree root information in an AGF. */
206 struct xfs_scrub *sc,
208 struct xrep_find_ag_btree *fab)
210 agf->agf_roots[XFS_BTNUM_BNOi] =
211 cpu_to_be32(fab[XREP_AGF_BNOBT].root);
212 agf->agf_levels[XFS_BTNUM_BNOi] =
213 cpu_to_be32(fab[XREP_AGF_BNOBT].height);
215 agf->agf_roots[XFS_BTNUM_CNTi] =
216 cpu_to_be32(fab[XREP_AGF_CNTBT].root);
217 agf->agf_levels[XFS_BTNUM_CNTi] =
218 cpu_to_be32(fab[XREP_AGF_CNTBT].height);
220 agf->agf_roots[XFS_BTNUM_RMAPi] =
221 cpu_to_be32(fab[XREP_AGF_RMAPBT].root);
222 agf->agf_levels[XFS_BTNUM_RMAPi] =
223 cpu_to_be32(fab[XREP_AGF_RMAPBT].height);
225 if (xfs_sb_version_hasreflink(&sc->mp->m_sb)) {
226 agf->agf_refcount_root =
227 cpu_to_be32(fab[XREP_AGF_REFCOUNTBT].root);
228 agf->agf_refcount_level =
229 cpu_to_be32(fab[XREP_AGF_REFCOUNTBT].height);
233 /* Update all AGF fields which derive from btree contents. */
235 xrep_agf_calc_from_btrees(
236 struct xfs_scrub *sc,
237 struct xfs_buf *agf_bp)
239 struct xrep_agf_allocbt raa = { .sc = sc };
240 struct xfs_btree_cur *cur = NULL;
241 struct xfs_agf *agf = XFS_BUF_TO_AGF(agf_bp);
242 struct xfs_mount *mp = sc->mp;
243 xfs_agblock_t btreeblks;
244 xfs_agblock_t blocks;
247 /* Update the AGF counters from the bnobt. */
248 cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
250 error = xfs_alloc_query_all(cur, xrep_agf_walk_allocbt, &raa);
253 error = xfs_btree_count_blocks(cur, &blocks);
256 xfs_btree_del_cursor(cur, error);
257 btreeblks = blocks - 1;
258 agf->agf_freeblks = cpu_to_be32(raa.freeblks);
259 agf->agf_longest = cpu_to_be32(raa.longest);
261 /* Update the AGF counters from the cntbt. */
262 cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
264 error = xfs_btree_count_blocks(cur, &blocks);
267 xfs_btree_del_cursor(cur, error);
268 btreeblks += blocks - 1;
270 /* Update the AGF counters from the rmapbt. */
271 cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno);
272 error = xfs_btree_count_blocks(cur, &blocks);
275 xfs_btree_del_cursor(cur, error);
276 agf->agf_rmap_blocks = cpu_to_be32(blocks);
277 btreeblks += blocks - 1;
279 agf->agf_btreeblks = cpu_to_be32(btreeblks);
281 /* Update the AGF counters from the refcountbt. */
282 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
283 cur = xfs_refcountbt_init_cursor(mp, sc->tp, agf_bp,
285 error = xfs_btree_count_blocks(cur, &blocks);
288 xfs_btree_del_cursor(cur, error);
289 agf->agf_refcount_blocks = cpu_to_be32(blocks);
294 xfs_btree_del_cursor(cur, error);
298 /* Commit the new AGF and reinitialize the incore state. */
301 struct xfs_scrub *sc,
302 struct xfs_buf *agf_bp)
304 struct xfs_perag *pag;
305 struct xfs_agf *agf = XFS_BUF_TO_AGF(agf_bp);
307 /* Trigger fdblocks recalculation */
308 xfs_force_summary_recalc(sc->mp);
310 /* Write this to disk. */
311 xfs_trans_buf_set_type(sc->tp, agf_bp, XFS_BLFT_AGF_BUF);
312 xfs_trans_log_buf(sc->tp, agf_bp, 0, BBTOB(agf_bp->b_length) - 1);
314 /* Now reinitialize the in-core counters we changed. */
316 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
317 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
318 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
319 pag->pagf_levels[XFS_BTNUM_BNOi] =
320 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
321 pag->pagf_levels[XFS_BTNUM_CNTi] =
322 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
323 pag->pagf_levels[XFS_BTNUM_RMAPi] =
324 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
325 pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
331 /* Repair the AGF. v5 filesystems only. */
334 struct xfs_scrub *sc)
336 struct xrep_find_ag_btree fab[XREP_AGF_MAX] = {
338 .rmap_owner = XFS_RMAP_OWN_AG,
339 .buf_ops = &xfs_bnobt_buf_ops,
342 .rmap_owner = XFS_RMAP_OWN_AG,
343 .buf_ops = &xfs_cntbt_buf_ops,
345 [XREP_AGF_RMAPBT] = {
346 .rmap_owner = XFS_RMAP_OWN_AG,
347 .buf_ops = &xfs_rmapbt_buf_ops,
349 [XREP_AGF_REFCOUNTBT] = {
350 .rmap_owner = XFS_RMAP_OWN_REFC,
351 .buf_ops = &xfs_refcountbt_buf_ops,
357 struct xfs_agf old_agf;
358 struct xfs_mount *mp = sc->mp;
359 struct xfs_buf *agf_bp;
360 struct xfs_buf *agfl_bp;
364 /* We require the rmapbt to rebuild anything. */
365 if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
368 xchk_perag_get(sc->mp, &sc->sa);
370 * Make sure we have the AGF buffer, as scrub might have decided it
371 * was corrupt after xfs_alloc_read_agf failed with -EFSCORRUPTED.
373 error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
374 XFS_AG_DADDR(mp, sc->sa.agno, XFS_AGF_DADDR(mp)),
375 XFS_FSS_TO_BB(mp, 1), 0, &agf_bp, NULL);
378 agf_bp->b_ops = &xfs_agf_buf_ops;
379 agf = XFS_BUF_TO_AGF(agf_bp);
382 * Load the AGFL so that we can screen out OWN_AG blocks that are on
383 * the AGFL now; these blocks might have once been part of the
384 * bno/cnt/rmap btrees but are not now. This is a chicken and egg
385 * problem: the AGF is corrupt, so we have to trust the AGFL contents
386 * because we can't do any serious cross-referencing with any of the
387 * btrees rooted in the AGF. If the AGFL contents are obviously bad
388 * then we'll bail out.
390 error = xfs_alloc_read_agfl(mp, sc->tp, sc->sa.agno, &agfl_bp);
395 * Spot-check the AGFL blocks; if they're obviously corrupt then
396 * there's nothing we can do but bail out.
398 error = xfs_agfl_walk(sc->mp, XFS_BUF_TO_AGF(agf_bp), agfl_bp,
399 xrep_agf_check_agfl_block, sc);
404 * Find the AGF btree roots. This is also a chicken-and-egg situation;
405 * see the function for more details.
407 error = xrep_agf_find_btrees(sc, agf_bp, fab, agfl_bp);
411 /* Start rewriting the header and implant the btrees we found. */
412 xrep_agf_init_header(sc, agf_bp, &old_agf);
413 xrep_agf_set_roots(sc, agf, fab);
414 error = xrep_agf_calc_from_btrees(sc, agf_bp);
418 /* Commit the changes and reinitialize incore state. */
419 return xrep_agf_commit_new(sc, agf_bp);
422 /* Mark the incore AGF state stale and revert the AGF. */
423 sc->sa.pag->pagf_init = 0;
424 memcpy(agf, &old_agf, sizeof(old_agf));
431 /* Bitmap of other OWN_AG metadata blocks. */
432 struct xfs_bitmap agmetablocks;
434 /* Bitmap of free space. */
435 struct xfs_bitmap *freesp;
437 struct xfs_scrub *sc;
440 /* Record all OWN_AG (free space btree) information from the rmap data. */
443 struct xfs_btree_cur *cur,
444 struct xfs_rmap_irec *rec,
447 struct xrep_agfl *ra = priv;
451 if (xchk_should_terminate(ra->sc, &error))
454 /* Record all the OWN_AG blocks. */
455 if (rec->rm_owner == XFS_RMAP_OWN_AG) {
456 fsb = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_private.a.agno,
458 error = xfs_bitmap_set(ra->freesp, fsb, rec->rm_blockcount);
463 return xfs_bitmap_set_btcur_path(&ra->agmetablocks, cur);
467 * Map out all the non-AGFL OWN_AG space in this AG so that we can deduce
468 * which blocks belong to the AGFL.
470 * Compute the set of old AGFL blocks by subtracting from the list of OWN_AG
471 * blocks the list of blocks owned by all other OWN_AG metadata (bnobt, cntbt,
472 * rmapbt). These are the old AGFL blocks, so return that list and the number
473 * of blocks we're actually going to put back on the AGFL.
476 xrep_agfl_collect_blocks(
477 struct xfs_scrub *sc,
478 struct xfs_buf *agf_bp,
479 struct xfs_bitmap *agfl_extents,
480 xfs_agblock_t *flcount)
483 struct xfs_mount *mp = sc->mp;
484 struct xfs_btree_cur *cur;
485 struct xfs_bitmap_range *br;
486 struct xfs_bitmap_range *n;
490 ra.freesp = agfl_extents;
491 xfs_bitmap_init(&ra.agmetablocks);
493 /* Find all space used by the free space btrees & rmapbt. */
494 cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno);
495 error = xfs_rmap_query_all(cur, xrep_agfl_walk_rmap, &ra);
498 xfs_btree_del_cursor(cur, error);
500 /* Find all blocks currently being used by the bnobt. */
501 cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
503 error = xfs_bitmap_set_btblocks(&ra.agmetablocks, cur);
506 xfs_btree_del_cursor(cur, error);
508 /* Find all blocks currently being used by the cntbt. */
509 cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
511 error = xfs_bitmap_set_btblocks(&ra.agmetablocks, cur);
515 xfs_btree_del_cursor(cur, error);
518 * Drop the freesp meta blocks that are in use by btrees.
519 * The remaining blocks /should/ be AGFL blocks.
521 error = xfs_bitmap_disunion(agfl_extents, &ra.agmetablocks);
522 xfs_bitmap_destroy(&ra.agmetablocks);
527 * Calculate the new AGFL size. If we found more blocks than fit in
528 * the AGFL we'll free them later.
531 for_each_xfs_bitmap_extent(br, n, agfl_extents) {
533 if (*flcount > xfs_agfl_size(mp))
536 if (*flcount > xfs_agfl_size(mp))
537 *flcount = xfs_agfl_size(mp);
541 xfs_bitmap_destroy(&ra.agmetablocks);
542 xfs_btree_del_cursor(cur, error);
546 /* Update the AGF and reset the in-core state. */
548 xrep_agfl_update_agf(
549 struct xfs_scrub *sc,
550 struct xfs_buf *agf_bp,
551 xfs_agblock_t flcount)
553 struct xfs_agf *agf = XFS_BUF_TO_AGF(agf_bp);
555 ASSERT(flcount <= xfs_agfl_size(sc->mp));
557 /* Trigger fdblocks recalculation */
558 xfs_force_summary_recalc(sc->mp);
560 /* Update the AGF counters. */
561 if (sc->sa.pag->pagf_init)
562 sc->sa.pag->pagf_flcount = flcount;
563 agf->agf_flfirst = cpu_to_be32(0);
564 agf->agf_flcount = cpu_to_be32(flcount);
565 agf->agf_fllast = cpu_to_be32(flcount - 1);
567 xfs_alloc_log_agf(sc->tp, agf_bp,
568 XFS_AGF_FLFIRST | XFS_AGF_FLLAST | XFS_AGF_FLCOUNT);
571 /* Write out a totally new AGFL. */
573 xrep_agfl_init_header(
574 struct xfs_scrub *sc,
575 struct xfs_buf *agfl_bp,
576 struct xfs_bitmap *agfl_extents,
577 xfs_agblock_t flcount)
579 struct xfs_mount *mp = sc->mp;
581 struct xfs_bitmap_range *br;
582 struct xfs_bitmap_range *n;
583 struct xfs_agfl *agfl;
587 ASSERT(flcount <= xfs_agfl_size(mp));
590 * Start rewriting the header by setting the bno[] array to
591 * NULLAGBLOCK, then setting AGFL header fields.
593 agfl = XFS_BUF_TO_AGFL(agfl_bp);
594 memset(agfl, 0xFF, BBTOB(agfl_bp->b_length));
595 agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
596 agfl->agfl_seqno = cpu_to_be32(sc->sa.agno);
597 uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
600 * Fill the AGFL with the remaining blocks. If agfl_extents has more
601 * blocks than fit in the AGFL, they will be freed in a subsequent
605 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agfl_bp);
606 for_each_xfs_bitmap_extent(br, n, agfl_extents) {
607 agbno = XFS_FSB_TO_AGBNO(mp, br->start);
609 trace_xrep_agfl_insert(mp, sc->sa.agno, agbno, br->len);
611 while (br->len > 0 && fl_off < flcount) {
612 agfl_bno[fl_off] = cpu_to_be32(agbno);
617 * We've now used br->start by putting it in the AGFL,
618 * so bump br so that we don't reap the block later.
630 /* Write new AGFL to disk. */
631 xfs_trans_buf_set_type(sc->tp, agfl_bp, XFS_BLFT_AGFL_BUF);
632 xfs_trans_log_buf(sc->tp, agfl_bp, 0, BBTOB(agfl_bp->b_length) - 1);
635 /* Repair the AGFL. */
638 struct xfs_scrub *sc)
640 struct xfs_bitmap agfl_extents;
641 struct xfs_mount *mp = sc->mp;
642 struct xfs_buf *agf_bp;
643 struct xfs_buf *agfl_bp;
644 xfs_agblock_t flcount;
647 /* We require the rmapbt to rebuild anything. */
648 if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
651 xchk_perag_get(sc->mp, &sc->sa);
652 xfs_bitmap_init(&agfl_extents);
655 * Read the AGF so that we can query the rmapbt. We hope that there's
656 * nothing wrong with the AGF, but all the AG header repair functions
657 * have this chicken-and-egg problem.
659 error = xfs_alloc_read_agf(mp, sc->tp, sc->sa.agno, 0, &agf_bp);
666 * Make sure we have the AGFL buffer, as scrub might have decided it
667 * was corrupt after xfs_alloc_read_agfl failed with -EFSCORRUPTED.
669 error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
670 XFS_AG_DADDR(mp, sc->sa.agno, XFS_AGFL_DADDR(mp)),
671 XFS_FSS_TO_BB(mp, 1), 0, &agfl_bp, NULL);
674 agfl_bp->b_ops = &xfs_agfl_buf_ops;
676 /* Gather all the extents we're going to put on the new AGFL. */
677 error = xrep_agfl_collect_blocks(sc, agf_bp, &agfl_extents, &flcount);
682 * Update AGF and AGFL. We reset the global free block counter when
683 * we adjust the AGF flcount (which can fail) so avoid updating any
684 * buffers until we know that part works.
686 xrep_agfl_update_agf(sc, agf_bp, flcount);
687 xrep_agfl_init_header(sc, agfl_bp, &agfl_extents, flcount);
690 * Ok, the AGFL should be ready to go now. Roll the transaction to
691 * make the new AGFL permanent before we start using it to return
692 * freespace overflow to the freespace btrees.
694 sc->sa.agf_bp = agf_bp;
695 sc->sa.agfl_bp = agfl_bp;
696 error = xrep_roll_ag_trans(sc);
700 /* Dump any AGFL overflow. */
701 return xrep_reap_extents(sc, &agfl_extents, &XFS_RMAP_OINFO_AG,
704 xfs_bitmap_destroy(&agfl_extents);
711 * Offset within the xrep_find_ag_btree array for each btree type. Avoid the
712 * XFS_BTNUM_ names here to avoid creating a sparse array.
722 * Given the inode btree roots described by *fab, find the roots, check them
723 * for sanity, and pass the root data back out via *fab.
726 xrep_agi_find_btrees(
727 struct xfs_scrub *sc,
728 struct xrep_find_ag_btree *fab)
730 struct xfs_buf *agf_bp;
731 struct xfs_mount *mp = sc->mp;
735 error = xfs_alloc_read_agf(mp, sc->tp, sc->sa.agno, 0, &agf_bp);
741 /* Find the btree roots. */
742 error = xrep_find_ag_btree_roots(sc, agf_bp, fab, NULL);
746 /* We must find the inobt root. */
747 if (!xrep_check_btree_root(sc, &fab[XREP_AGI_INOBT]))
748 return -EFSCORRUPTED;
750 /* We must find the finobt root if that feature is enabled. */
751 if (xfs_sb_version_hasfinobt(&mp->m_sb) &&
752 !xrep_check_btree_root(sc, &fab[XREP_AGI_FINOBT]))
753 return -EFSCORRUPTED;
759 * Reinitialize the AGI header, making an in-core copy of the old contents so
760 * that we know which in-core state needs to be reinitialized.
763 xrep_agi_init_header(
764 struct xfs_scrub *sc,
765 struct xfs_buf *agi_bp,
766 struct xfs_agi *old_agi)
768 struct xfs_agi *agi = XFS_BUF_TO_AGI(agi_bp);
769 struct xfs_mount *mp = sc->mp;
771 memcpy(old_agi, agi, sizeof(*old_agi));
772 memset(agi, 0, BBTOB(agi_bp->b_length));
773 agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
774 agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
775 agi->agi_seqno = cpu_to_be32(sc->sa.agno);
776 agi->agi_length = cpu_to_be32(xfs_ag_block_count(mp, sc->sa.agno));
777 agi->agi_newino = cpu_to_be32(NULLAGINO);
778 agi->agi_dirino = cpu_to_be32(NULLAGINO);
779 if (xfs_sb_version_hascrc(&mp->m_sb))
780 uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid);
782 /* We don't know how to fix the unlinked list yet. */
783 memcpy(&agi->agi_unlinked, &old_agi->agi_unlinked,
784 sizeof(agi->agi_unlinked));
786 /* Mark the incore AGF data stale until we're done fixing things. */
787 ASSERT(sc->sa.pag->pagi_init);
788 sc->sa.pag->pagi_init = 0;
791 /* Set btree root information in an AGI. */
794 struct xfs_scrub *sc,
796 struct xrep_find_ag_btree *fab)
798 agi->agi_root = cpu_to_be32(fab[XREP_AGI_INOBT].root);
799 agi->agi_level = cpu_to_be32(fab[XREP_AGI_INOBT].height);
801 if (xfs_sb_version_hasfinobt(&sc->mp->m_sb)) {
802 agi->agi_free_root = cpu_to_be32(fab[XREP_AGI_FINOBT].root);
803 agi->agi_free_level = cpu_to_be32(fab[XREP_AGI_FINOBT].height);
807 /* Update the AGI counters. */
809 xrep_agi_calc_from_btrees(
810 struct xfs_scrub *sc,
811 struct xfs_buf *agi_bp)
813 struct xfs_btree_cur *cur;
814 struct xfs_agi *agi = XFS_BUF_TO_AGI(agi_bp);
815 struct xfs_mount *mp = sc->mp;
817 xfs_agino_t freecount;
820 cur = xfs_inobt_init_cursor(mp, sc->tp, agi_bp, sc->sa.agno,
822 error = xfs_ialloc_count_inodes(cur, &count, &freecount);
825 xfs_btree_del_cursor(cur, error);
827 agi->agi_count = cpu_to_be32(count);
828 agi->agi_freecount = cpu_to_be32(freecount);
831 xfs_btree_del_cursor(cur, error);
835 /* Trigger reinitialization of the in-core data. */
838 struct xfs_scrub *sc,
839 struct xfs_buf *agi_bp)
841 struct xfs_perag *pag;
842 struct xfs_agi *agi = XFS_BUF_TO_AGI(agi_bp);
844 /* Trigger inode count recalculation */
845 xfs_force_summary_recalc(sc->mp);
847 /* Write this to disk. */
848 xfs_trans_buf_set_type(sc->tp, agi_bp, XFS_BLFT_AGI_BUF);
849 xfs_trans_log_buf(sc->tp, agi_bp, 0, BBTOB(agi_bp->b_length) - 1);
851 /* Now reinitialize the in-core counters if necessary. */
853 pag->pagi_count = be32_to_cpu(agi->agi_count);
854 pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
860 /* Repair the AGI. */
863 struct xfs_scrub *sc)
865 struct xrep_find_ag_btree fab[XREP_AGI_MAX] = {
867 .rmap_owner = XFS_RMAP_OWN_INOBT,
868 .buf_ops = &xfs_inobt_buf_ops,
870 [XREP_AGI_FINOBT] = {
871 .rmap_owner = XFS_RMAP_OWN_INOBT,
872 .buf_ops = &xfs_finobt_buf_ops,
878 struct xfs_agi old_agi;
879 struct xfs_mount *mp = sc->mp;
880 struct xfs_buf *agi_bp;
884 /* We require the rmapbt to rebuild anything. */
885 if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
888 xchk_perag_get(sc->mp, &sc->sa);
890 * Make sure we have the AGI buffer, as scrub might have decided it
891 * was corrupt after xfs_ialloc_read_agi failed with -EFSCORRUPTED.
893 error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
894 XFS_AG_DADDR(mp, sc->sa.agno, XFS_AGI_DADDR(mp)),
895 XFS_FSS_TO_BB(mp, 1), 0, &agi_bp, NULL);
898 agi_bp->b_ops = &xfs_agi_buf_ops;
899 agi = XFS_BUF_TO_AGI(agi_bp);
901 /* Find the AGI btree roots. */
902 error = xrep_agi_find_btrees(sc, fab);
906 /* Start rewriting the header and implant the btrees we found. */
907 xrep_agi_init_header(sc, agi_bp, &old_agi);
908 xrep_agi_set_roots(sc, agi, fab);
909 error = xrep_agi_calc_from_btrees(sc, agi_bp);
913 /* Reinitialize in-core state. */
914 return xrep_agi_commit_new(sc, agi_bp);
917 /* Mark the incore AGI state stale and revert the AGI. */
918 sc->sa.pag->pagi_init = 0;
919 memcpy(agi, &old_agi, sizeof(old_agi));