adaeabdefdd33ad7ab35e55a64acd69a5a62e6c4
[linux-2.6-microblaze.git] / fs / xfs / scrub / agheader.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2017 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_defer.h"
13 #include "xfs_btree.h"
14 #include "xfs_bit.h"
15 #include "xfs_log_format.h"
16 #include "xfs_trans.h"
17 #include "xfs_sb.h"
18 #include "xfs_inode.h"
19 #include "xfs_alloc.h"
20 #include "xfs_ialloc.h"
21 #include "xfs_rmap.h"
22 #include "scrub/xfs_scrub.h"
23 #include "scrub/scrub.h"
24 #include "scrub/common.h"
25 #include "scrub/trace.h"
26
27 /* Superblock */
28
29 /* Cross-reference with the other btrees. */
30 STATIC void
31 xchk_superblock_xref(
32         struct xfs_scrub        *sc,
33         struct xfs_buf          *bp)
34 {
35         struct xfs_mount        *mp = sc->mp;
36         xfs_agnumber_t          agno = sc->sm->sm_agno;
37         xfs_agblock_t           agbno;
38         int                     error;
39
40         if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
41                 return;
42
43         agbno = XFS_SB_BLOCK(mp);
44
45         error = xchk_ag_init(sc, agno, &sc->sa);
46         if (!xchk_xref_process_error(sc, agno, agbno, &error))
47                 return;
48
49         xchk_xref_is_used_space(sc, agbno, 1);
50         xchk_xref_is_not_inode_chunk(sc, agbno, 1);
51         xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
52         xchk_xref_is_not_shared(sc, agbno, 1);
53
54         /* scrub teardown will take care of sc->sa for us */
55 }
56
57 /*
58  * Scrub the filesystem superblock.
59  *
60  * Note: We do /not/ attempt to check AG 0's superblock.  Mount is
61  * responsible for validating all the geometry information in sb 0, so
62  * if the filesystem is capable of initiating online scrub, then clearly
63  * sb 0 is ok and we can use its information to check everything else.
64  */
65 int
66 xchk_superblock(
67         struct xfs_scrub        *sc)
68 {
69         struct xfs_mount        *mp = sc->mp;
70         struct xfs_buf          *bp;
71         struct xfs_dsb          *sb;
72         xfs_agnumber_t          agno;
73         uint32_t                v2_ok;
74         __be32                  features_mask;
75         int                     error;
76         __be16                  vernum_mask;
77
78         agno = sc->sm->sm_agno;
79         if (agno == 0)
80                 return 0;
81
82         error = xfs_sb_read_secondary(mp, sc->tp, agno, &bp);
83         /*
84          * The superblock verifier can return several different error codes
85          * if it thinks the superblock doesn't look right.  For a mount these
86          * would all get bounced back to userspace, but if we're here then the
87          * fs mounted successfully, which means that this secondary superblock
88          * is simply incorrect.  Treat all these codes the same way we treat
89          * any corruption.
90          */
91         switch (error) {
92         case -EINVAL:   /* also -EWRONGFS */
93         case -ENOSYS:
94         case -EFBIG:
95                 error = -EFSCORRUPTED;
96         default:
97                 break;
98         }
99         if (!xchk_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
100                 return error;
101
102         sb = XFS_BUF_TO_SBP(bp);
103
104         /*
105          * Verify the geometries match.  Fields that are permanently
106          * set by mkfs are checked; fields that can be updated later
107          * (and are not propagated to backup superblocks) are preen
108          * checked.
109          */
110         if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize))
111                 xchk_block_set_corrupt(sc, bp);
112
113         if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks))
114                 xchk_block_set_corrupt(sc, bp);
115
116         if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks))
117                 xchk_block_set_corrupt(sc, bp);
118
119         if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents))
120                 xchk_block_set_corrupt(sc, bp);
121
122         if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid))
123                 xchk_block_set_preen(sc, bp);
124
125         if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart))
126                 xchk_block_set_corrupt(sc, bp);
127
128         if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino))
129                 xchk_block_set_preen(sc, bp);
130
131         if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino))
132                 xchk_block_set_preen(sc, bp);
133
134         if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino))
135                 xchk_block_set_preen(sc, bp);
136
137         if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize))
138                 xchk_block_set_corrupt(sc, bp);
139
140         if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks))
141                 xchk_block_set_corrupt(sc, bp);
142
143         if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount))
144                 xchk_block_set_corrupt(sc, bp);
145
146         if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks))
147                 xchk_block_set_corrupt(sc, bp);
148
149         if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks))
150                 xchk_block_set_corrupt(sc, bp);
151
152         /* Check sb_versionnum bits that are set at mkfs time. */
153         vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS |
154                                   XFS_SB_VERSION_NUMBITS |
155                                   XFS_SB_VERSION_ALIGNBIT |
156                                   XFS_SB_VERSION_DALIGNBIT |
157                                   XFS_SB_VERSION_SHAREDBIT |
158                                   XFS_SB_VERSION_LOGV2BIT |
159                                   XFS_SB_VERSION_SECTORBIT |
160                                   XFS_SB_VERSION_EXTFLGBIT |
161                                   XFS_SB_VERSION_DIRV2BIT);
162         if ((sb->sb_versionnum & vernum_mask) !=
163             (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
164                 xchk_block_set_corrupt(sc, bp);
165
166         /* Check sb_versionnum bits that can be set after mkfs time. */
167         vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT |
168                                   XFS_SB_VERSION_NLINKBIT |
169                                   XFS_SB_VERSION_QUOTABIT);
170         if ((sb->sb_versionnum & vernum_mask) !=
171             (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
172                 xchk_block_set_preen(sc, bp);
173
174         if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize))
175                 xchk_block_set_corrupt(sc, bp);
176
177         if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize))
178                 xchk_block_set_corrupt(sc, bp);
179
180         if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock))
181                 xchk_block_set_corrupt(sc, bp);
182
183         if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname)))
184                 xchk_block_set_preen(sc, bp);
185
186         if (sb->sb_blocklog != mp->m_sb.sb_blocklog)
187                 xchk_block_set_corrupt(sc, bp);
188
189         if (sb->sb_sectlog != mp->m_sb.sb_sectlog)
190                 xchk_block_set_corrupt(sc, bp);
191
192         if (sb->sb_inodelog != mp->m_sb.sb_inodelog)
193                 xchk_block_set_corrupt(sc, bp);
194
195         if (sb->sb_inopblog != mp->m_sb.sb_inopblog)
196                 xchk_block_set_corrupt(sc, bp);
197
198         if (sb->sb_agblklog != mp->m_sb.sb_agblklog)
199                 xchk_block_set_corrupt(sc, bp);
200
201         if (sb->sb_rextslog != mp->m_sb.sb_rextslog)
202                 xchk_block_set_corrupt(sc, bp);
203
204         if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct)
205                 xchk_block_set_preen(sc, bp);
206
207         /*
208          * Skip the summary counters since we track them in memory anyway.
209          * sb_icount, sb_ifree, sb_fdblocks, sb_frexents
210          */
211
212         if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino))
213                 xchk_block_set_preen(sc, bp);
214
215         if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino))
216                 xchk_block_set_preen(sc, bp);
217
218         /*
219          * Skip the quota flags since repair will force quotacheck.
220          * sb_qflags
221          */
222
223         if (sb->sb_flags != mp->m_sb.sb_flags)
224                 xchk_block_set_corrupt(sc, bp);
225
226         if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn)
227                 xchk_block_set_corrupt(sc, bp);
228
229         if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt))
230                 xchk_block_set_corrupt(sc, bp);
231
232         if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit))
233                 xchk_block_set_preen(sc, bp);
234
235         if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width))
236                 xchk_block_set_preen(sc, bp);
237
238         if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog)
239                 xchk_block_set_corrupt(sc, bp);
240
241         if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog)
242                 xchk_block_set_corrupt(sc, bp);
243
244         if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize))
245                 xchk_block_set_corrupt(sc, bp);
246
247         if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit))
248                 xchk_block_set_corrupt(sc, bp);
249
250         /* Do we see any invalid bits in sb_features2? */
251         if (!xfs_sb_version_hasmorebits(&mp->m_sb)) {
252                 if (sb->sb_features2 != 0)
253                         xchk_block_set_corrupt(sc, bp);
254         } else {
255                 v2_ok = XFS_SB_VERSION2_OKBITS;
256                 if (XFS_SB_VERSION_NUM(&mp->m_sb) >= XFS_SB_VERSION_5)
257                         v2_ok |= XFS_SB_VERSION2_CRCBIT;
258
259                 if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok)))
260                         xchk_block_set_corrupt(sc, bp);
261
262                 if (sb->sb_features2 != sb->sb_bad_features2)
263                         xchk_block_set_preen(sc, bp);
264         }
265
266         /* Check sb_features2 flags that are set at mkfs time. */
267         features_mask = cpu_to_be32(XFS_SB_VERSION2_LAZYSBCOUNTBIT |
268                                     XFS_SB_VERSION2_PROJID32BIT |
269                                     XFS_SB_VERSION2_CRCBIT |
270                                     XFS_SB_VERSION2_FTYPE);
271         if ((sb->sb_features2 & features_mask) !=
272             (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
273                 xchk_block_set_corrupt(sc, bp);
274
275         /* Check sb_features2 flags that can be set after mkfs time. */
276         features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT);
277         if ((sb->sb_features2 & features_mask) !=
278             (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
279                 xchk_block_set_corrupt(sc, bp);
280
281         if (!xfs_sb_version_hascrc(&mp->m_sb)) {
282                 /* all v5 fields must be zero */
283                 if (memchr_inv(&sb->sb_features_compat, 0,
284                                 sizeof(struct xfs_dsb) -
285                                 offsetof(struct xfs_dsb, sb_features_compat)))
286                         xchk_block_set_corrupt(sc, bp);
287         } else {
288                 /* Check compat flags; all are set at mkfs time. */
289                 features_mask = cpu_to_be32(XFS_SB_FEAT_COMPAT_UNKNOWN);
290                 if ((sb->sb_features_compat & features_mask) !=
291                     (cpu_to_be32(mp->m_sb.sb_features_compat) & features_mask))
292                         xchk_block_set_corrupt(sc, bp);
293
294                 /* Check ro compat flags; all are set at mkfs time. */
295                 features_mask = cpu_to_be32(XFS_SB_FEAT_RO_COMPAT_UNKNOWN |
296                                             XFS_SB_FEAT_RO_COMPAT_FINOBT |
297                                             XFS_SB_FEAT_RO_COMPAT_RMAPBT |
298                                             XFS_SB_FEAT_RO_COMPAT_REFLINK);
299                 if ((sb->sb_features_ro_compat & features_mask) !=
300                     (cpu_to_be32(mp->m_sb.sb_features_ro_compat) &
301                      features_mask))
302                         xchk_block_set_corrupt(sc, bp);
303
304                 /* Check incompat flags; all are set at mkfs time. */
305                 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_UNKNOWN |
306                                             XFS_SB_FEAT_INCOMPAT_FTYPE |
307                                             XFS_SB_FEAT_INCOMPAT_SPINODES |
308                                             XFS_SB_FEAT_INCOMPAT_META_UUID);
309                 if ((sb->sb_features_incompat & features_mask) !=
310                     (cpu_to_be32(mp->m_sb.sb_features_incompat) &
311                      features_mask))
312                         xchk_block_set_corrupt(sc, bp);
313
314                 /* Check log incompat flags; all are set at mkfs time. */
315                 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN);
316                 if ((sb->sb_features_log_incompat & features_mask) !=
317                     (cpu_to_be32(mp->m_sb.sb_features_log_incompat) &
318                      features_mask))
319                         xchk_block_set_corrupt(sc, bp);
320
321                 /* Don't care about sb_crc */
322
323                 if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align))
324                         xchk_block_set_corrupt(sc, bp);
325
326                 if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino))
327                         xchk_block_set_preen(sc, bp);
328
329                 /* Don't care about sb_lsn */
330         }
331
332         if (xfs_sb_version_hasmetauuid(&mp->m_sb)) {
333                 /* The metadata UUID must be the same for all supers */
334                 if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid))
335                         xchk_block_set_corrupt(sc, bp);
336         }
337
338         /* Everything else must be zero. */
339         if (memchr_inv(sb + 1, 0,
340                         BBTOB(bp->b_length) - sizeof(struct xfs_dsb)))
341                 xchk_block_set_corrupt(sc, bp);
342
343         xchk_superblock_xref(sc, bp);
344
345         return error;
346 }
347
348 /* AGF */
349
350 /* Tally freespace record lengths. */
351 STATIC int
352 xchk_agf_record_bno_lengths(
353         struct xfs_btree_cur            *cur,
354         struct xfs_alloc_rec_incore     *rec,
355         void                            *priv)
356 {
357         xfs_extlen_t                    *blocks = priv;
358
359         (*blocks) += rec->ar_blockcount;
360         return 0;
361 }
362
363 /* Check agf_freeblks */
364 static inline void
365 xchk_agf_xref_freeblks(
366         struct xfs_scrub        *sc)
367 {
368         struct xfs_agf          *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
369         xfs_extlen_t            blocks = 0;
370         int                     error;
371
372         if (!sc->sa.bno_cur)
373                 return;
374
375         error = xfs_alloc_query_all(sc->sa.bno_cur,
376                         xchk_agf_record_bno_lengths, &blocks);
377         if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
378                 return;
379         if (blocks != be32_to_cpu(agf->agf_freeblks))
380                 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
381 }
382
383 /* Cross reference the AGF with the cntbt (freespace by length btree) */
384 static inline void
385 xchk_agf_xref_cntbt(
386         struct xfs_scrub        *sc)
387 {
388         struct xfs_agf          *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
389         xfs_agblock_t           agbno;
390         xfs_extlen_t            blocks;
391         int                     have;
392         int                     error;
393
394         if (!sc->sa.cnt_cur)
395                 return;
396
397         /* Any freespace at all? */
398         error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have);
399         if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
400                 return;
401         if (!have) {
402                 if (agf->agf_freeblks != cpu_to_be32(0))
403                         xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
404                 return;
405         }
406
407         /* Check agf_longest */
408         error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have);
409         if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
410                 return;
411         if (!have || blocks != be32_to_cpu(agf->agf_longest))
412                 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
413 }
414
415 /* Check the btree block counts in the AGF against the btrees. */
416 STATIC void
417 xchk_agf_xref_btreeblks(
418         struct xfs_scrub        *sc)
419 {
420         struct xfs_agf          *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
421         struct xfs_mount        *mp = sc->mp;
422         xfs_agblock_t           blocks;
423         xfs_agblock_t           btreeblks;
424         int                     error;
425
426         /* Check agf_rmap_blocks; set up for agf_btreeblks check */
427         if (sc->sa.rmap_cur) {
428                 error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks);
429                 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
430                         return;
431                 btreeblks = blocks - 1;
432                 if (blocks != be32_to_cpu(agf->agf_rmap_blocks))
433                         xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
434         } else {
435                 btreeblks = 0;
436         }
437
438         /*
439          * No rmap cursor; we can't xref if we have the rmapbt feature.
440          * We also can't do it if we're missing the free space btree cursors.
441          */
442         if ((xfs_sb_version_hasrmapbt(&mp->m_sb) && !sc->sa.rmap_cur) ||
443             !sc->sa.bno_cur || !sc->sa.cnt_cur)
444                 return;
445
446         /* Check agf_btreeblks */
447         error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks);
448         if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
449                 return;
450         btreeblks += blocks - 1;
451
452         error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks);
453         if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
454                 return;
455         btreeblks += blocks - 1;
456
457         if (btreeblks != be32_to_cpu(agf->agf_btreeblks))
458                 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
459 }
460
461 /* Check agf_refcount_blocks against tree size */
462 static inline void
463 xchk_agf_xref_refcblks(
464         struct xfs_scrub        *sc)
465 {
466         struct xfs_agf          *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
467         xfs_agblock_t           blocks;
468         int                     error;
469
470         if (!sc->sa.refc_cur)
471                 return;
472
473         error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks);
474         if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
475                 return;
476         if (blocks != be32_to_cpu(agf->agf_refcount_blocks))
477                 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
478 }
479
480 /* Cross-reference with the other btrees. */
481 STATIC void
482 xchk_agf_xref(
483         struct xfs_scrub        *sc)
484 {
485         struct xfs_mount        *mp = sc->mp;
486         xfs_agblock_t           agbno;
487         int                     error;
488
489         if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
490                 return;
491
492         agbno = XFS_AGF_BLOCK(mp);
493
494         error = xchk_ag_btcur_init(sc, &sc->sa);
495         if (error)
496                 return;
497
498         xchk_xref_is_used_space(sc, agbno, 1);
499         xchk_agf_xref_freeblks(sc);
500         xchk_agf_xref_cntbt(sc);
501         xchk_xref_is_not_inode_chunk(sc, agbno, 1);
502         xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
503         xchk_agf_xref_btreeblks(sc);
504         xchk_xref_is_not_shared(sc, agbno, 1);
505         xchk_agf_xref_refcblks(sc);
506
507         /* scrub teardown will take care of sc->sa for us */
508 }
509
510 /* Scrub the AGF. */
511 int
512 xchk_agf(
513         struct xfs_scrub        *sc)
514 {
515         struct xfs_mount        *mp = sc->mp;
516         struct xfs_agf          *agf;
517         struct xfs_perag        *pag;
518         xfs_agnumber_t          agno;
519         xfs_agblock_t           agbno;
520         xfs_agblock_t           eoag;
521         xfs_agblock_t           agfl_first;
522         xfs_agblock_t           agfl_last;
523         xfs_agblock_t           agfl_count;
524         xfs_agblock_t           fl_count;
525         int                     level;
526         int                     error = 0;
527
528         agno = sc->sa.agno = sc->sm->sm_agno;
529         error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
530                         &sc->sa.agf_bp, &sc->sa.agfl_bp);
531         if (!xchk_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
532                 goto out;
533         xchk_buffer_recheck(sc, sc->sa.agf_bp);
534
535         agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
536
537         /* Check the AG length */
538         eoag = be32_to_cpu(agf->agf_length);
539         if (eoag != xfs_ag_block_count(mp, agno))
540                 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
541
542         /* Check the AGF btree roots and levels */
543         agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
544         if (!xfs_verify_agbno(mp, agno, agbno))
545                 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
546
547         agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
548         if (!xfs_verify_agbno(mp, agno, agbno))
549                 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
550
551         level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
552         if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
553                 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
554
555         level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
556         if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
557                 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
558
559         if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
560                 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
561                 if (!xfs_verify_agbno(mp, agno, agbno))
562                         xchk_block_set_corrupt(sc, sc->sa.agf_bp);
563
564                 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
565                 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
566                         xchk_block_set_corrupt(sc, sc->sa.agf_bp);
567         }
568
569         if (xfs_sb_version_hasreflink(&mp->m_sb)) {
570                 agbno = be32_to_cpu(agf->agf_refcount_root);
571                 if (!xfs_verify_agbno(mp, agno, agbno))
572                         xchk_block_set_corrupt(sc, sc->sa.agf_bp);
573
574                 level = be32_to_cpu(agf->agf_refcount_level);
575                 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
576                         xchk_block_set_corrupt(sc, sc->sa.agf_bp);
577         }
578
579         /* Check the AGFL counters */
580         agfl_first = be32_to_cpu(agf->agf_flfirst);
581         agfl_last = be32_to_cpu(agf->agf_fllast);
582         agfl_count = be32_to_cpu(agf->agf_flcount);
583         if (agfl_last > agfl_first)
584                 fl_count = agfl_last - agfl_first + 1;
585         else
586                 fl_count = xfs_agfl_size(mp) - agfl_first + agfl_last + 1;
587         if (agfl_count != 0 && fl_count != agfl_count)
588                 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
589
590         /* Do the incore counters match? */
591         pag = xfs_perag_get(mp, agno);
592         if (pag->pagf_freeblks != be32_to_cpu(agf->agf_freeblks))
593                 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
594         if (pag->pagf_flcount != be32_to_cpu(agf->agf_flcount))
595                 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
596         if (pag->pagf_btreeblks != be32_to_cpu(agf->agf_btreeblks))
597                 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
598         xfs_perag_put(pag);
599
600         xchk_agf_xref(sc);
601 out:
602         return error;
603 }
604
605 /* AGFL */
606
607 struct xchk_agfl_info {
608         unsigned int            sz_entries;
609         unsigned int            nr_entries;
610         xfs_agblock_t           *entries;
611         struct xfs_scrub        *sc;
612 };
613
614 /* Cross-reference with the other btrees. */
615 STATIC void
616 xchk_agfl_block_xref(
617         struct xfs_scrub        *sc,
618         xfs_agblock_t           agbno)
619 {
620         if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
621                 return;
622
623         xchk_xref_is_used_space(sc, agbno, 1);
624         xchk_xref_is_not_inode_chunk(sc, agbno, 1);
625         xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_AG);
626         xchk_xref_is_not_shared(sc, agbno, 1);
627 }
628
629 /* Scrub an AGFL block. */
630 STATIC int
631 xchk_agfl_block(
632         struct xfs_mount        *mp,
633         xfs_agblock_t           agbno,
634         void                    *priv)
635 {
636         struct xchk_agfl_info   *sai = priv;
637         struct xfs_scrub        *sc = sai->sc;
638         xfs_agnumber_t          agno = sc->sa.agno;
639
640         if (xfs_verify_agbno(mp, agno, agbno) &&
641             sai->nr_entries < sai->sz_entries)
642                 sai->entries[sai->nr_entries++] = agbno;
643         else
644                 xchk_block_set_corrupt(sc, sc->sa.agfl_bp);
645
646         xchk_agfl_block_xref(sc, agbno);
647
648         if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
649                 return XFS_BTREE_QUERY_RANGE_ABORT;
650
651         return 0;
652 }
653
654 static int
655 xchk_agblock_cmp(
656         const void              *pa,
657         const void              *pb)
658 {
659         const xfs_agblock_t     *a = pa;
660         const xfs_agblock_t     *b = pb;
661
662         return (int)*a - (int)*b;
663 }
664
665 /* Cross-reference with the other btrees. */
666 STATIC void
667 xchk_agfl_xref(
668         struct xfs_scrub        *sc)
669 {
670         struct xfs_mount        *mp = sc->mp;
671         xfs_agblock_t           agbno;
672         int                     error;
673
674         if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
675                 return;
676
677         agbno = XFS_AGFL_BLOCK(mp);
678
679         error = xchk_ag_btcur_init(sc, &sc->sa);
680         if (error)
681                 return;
682
683         xchk_xref_is_used_space(sc, agbno, 1);
684         xchk_xref_is_not_inode_chunk(sc, agbno, 1);
685         xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
686         xchk_xref_is_not_shared(sc, agbno, 1);
687
688         /*
689          * Scrub teardown will take care of sc->sa for us.  Leave sc->sa
690          * active so that the agfl block xref can use it too.
691          */
692 }
693
694 /* Scrub the AGFL. */
695 int
696 xchk_agfl(
697         struct xfs_scrub        *sc)
698 {
699         struct xchk_agfl_info   sai;
700         struct xfs_agf          *agf;
701         xfs_agnumber_t          agno;
702         unsigned int            agflcount;
703         unsigned int            i;
704         int                     error;
705
706         agno = sc->sa.agno = sc->sm->sm_agno;
707         error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
708                         &sc->sa.agf_bp, &sc->sa.agfl_bp);
709         if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
710                 goto out;
711         if (!sc->sa.agf_bp)
712                 return -EFSCORRUPTED;
713         xchk_buffer_recheck(sc, sc->sa.agfl_bp);
714
715         xchk_agfl_xref(sc);
716
717         if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
718                 goto out;
719
720         /* Allocate buffer to ensure uniqueness of AGFL entries. */
721         agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
722         agflcount = be32_to_cpu(agf->agf_flcount);
723         if (agflcount > xfs_agfl_size(sc->mp)) {
724                 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
725                 goto out;
726         }
727         memset(&sai, 0, sizeof(sai));
728         sai.sc = sc;
729         sai.sz_entries = agflcount;
730         sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount,
731                         KM_MAYFAIL);
732         if (!sai.entries) {
733                 error = -ENOMEM;
734                 goto out;
735         }
736
737         /* Check the blocks in the AGFL. */
738         error = xfs_agfl_walk(sc->mp, XFS_BUF_TO_AGF(sc->sa.agf_bp),
739                         sc->sa.agfl_bp, xchk_agfl_block, &sai);
740         if (error == XFS_BTREE_QUERY_RANGE_ABORT) {
741                 error = 0;
742                 goto out_free;
743         }
744         if (error)
745                 goto out_free;
746
747         if (agflcount != sai.nr_entries) {
748                 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
749                 goto out_free;
750         }
751
752         /* Sort entries, check for duplicates. */
753         sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]),
754                         xchk_agblock_cmp, NULL);
755         for (i = 1; i < sai.nr_entries; i++) {
756                 if (sai.entries[i] == sai.entries[i - 1]) {
757                         xchk_block_set_corrupt(sc, sc->sa.agf_bp);
758                         break;
759                 }
760         }
761
762 out_free:
763         kmem_free(sai.entries);
764 out:
765         return error;
766 }
767
768 /* AGI */
769
770 /* Check agi_count/agi_freecount */
771 static inline void
772 xchk_agi_xref_icounts(
773         struct xfs_scrub        *sc)
774 {
775         struct xfs_agi          *agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
776         xfs_agino_t             icount;
777         xfs_agino_t             freecount;
778         int                     error;
779
780         if (!sc->sa.ino_cur)
781                 return;
782
783         error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount);
784         if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur))
785                 return;
786         if (be32_to_cpu(agi->agi_count) != icount ||
787             be32_to_cpu(agi->agi_freecount) != freecount)
788                 xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
789 }
790
791 /* Cross-reference with the other btrees. */
792 STATIC void
793 xchk_agi_xref(
794         struct xfs_scrub        *sc)
795 {
796         struct xfs_mount        *mp = sc->mp;
797         xfs_agblock_t           agbno;
798         int                     error;
799
800         if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
801                 return;
802
803         agbno = XFS_AGI_BLOCK(mp);
804
805         error = xchk_ag_btcur_init(sc, &sc->sa);
806         if (error)
807                 return;
808
809         xchk_xref_is_used_space(sc, agbno, 1);
810         xchk_xref_is_not_inode_chunk(sc, agbno, 1);
811         xchk_agi_xref_icounts(sc);
812         xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
813         xchk_xref_is_not_shared(sc, agbno, 1);
814
815         /* scrub teardown will take care of sc->sa for us */
816 }
817
818 /* Scrub the AGI. */
819 int
820 xchk_agi(
821         struct xfs_scrub        *sc)
822 {
823         struct xfs_mount        *mp = sc->mp;
824         struct xfs_agi          *agi;
825         struct xfs_perag        *pag;
826         xfs_agnumber_t          agno;
827         xfs_agblock_t           agbno;
828         xfs_agblock_t           eoag;
829         xfs_agino_t             agino;
830         xfs_agino_t             first_agino;
831         xfs_agino_t             last_agino;
832         xfs_agino_t             icount;
833         int                     i;
834         int                     level;
835         int                     error = 0;
836
837         agno = sc->sa.agno = sc->sm->sm_agno;
838         error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
839                         &sc->sa.agf_bp, &sc->sa.agfl_bp);
840         if (!xchk_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
841                 goto out;
842         xchk_buffer_recheck(sc, sc->sa.agi_bp);
843
844         agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
845
846         /* Check the AG length */
847         eoag = be32_to_cpu(agi->agi_length);
848         if (eoag != xfs_ag_block_count(mp, agno))
849                 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
850
851         /* Check btree roots and levels */
852         agbno = be32_to_cpu(agi->agi_root);
853         if (!xfs_verify_agbno(mp, agno, agbno))
854                 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
855
856         level = be32_to_cpu(agi->agi_level);
857         if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
858                 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
859
860         if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
861                 agbno = be32_to_cpu(agi->agi_free_root);
862                 if (!xfs_verify_agbno(mp, agno, agbno))
863                         xchk_block_set_corrupt(sc, sc->sa.agi_bp);
864
865                 level = be32_to_cpu(agi->agi_free_level);
866                 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
867                         xchk_block_set_corrupt(sc, sc->sa.agi_bp);
868         }
869
870         /* Check inode counters */
871         xfs_agino_range(mp, agno, &first_agino, &last_agino);
872         icount = be32_to_cpu(agi->agi_count);
873         if (icount > last_agino - first_agino + 1 ||
874             icount < be32_to_cpu(agi->agi_freecount))
875                 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
876
877         /* Check inode pointers */
878         agino = be32_to_cpu(agi->agi_newino);
879         if (!xfs_verify_agino_or_null(mp, agno, agino))
880                 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
881
882         agino = be32_to_cpu(agi->agi_dirino);
883         if (!xfs_verify_agino_or_null(mp, agno, agino))
884                 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
885
886         /* Check unlinked inode buckets */
887         for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
888                 agino = be32_to_cpu(agi->agi_unlinked[i]);
889                 if (!xfs_verify_agino_or_null(mp, agno, agino))
890                         xchk_block_set_corrupt(sc, sc->sa.agi_bp);
891         }
892
893         if (agi->agi_pad32 != cpu_to_be32(0))
894                 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
895
896         /* Do the incore counters match? */
897         pag = xfs_perag_get(mp, agno);
898         if (pag->pagi_count != be32_to_cpu(agi->agi_count))
899                 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
900         if (pag->pagi_freecount != be32_to_cpu(agi->agi_freecount))
901                 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
902         xfs_perag_put(pag);
903
904         xchk_agi_xref(sc);
905 out:
906         return error;
907 }