1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2017 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_log_format.h"
13 #include "xfs_inode.h"
14 #include "xfs_da_format.h"
15 #include "xfs_da_btree.h"
17 #include "xfs_attr_leaf.h"
18 #include "scrub/scrub.h"
19 #include "scrub/common.h"
20 #include "scrub/dabtree.h"
23 /* Set us up to scrub an inode's extended attributes. */
32 * Allocate the buffer without the inode lock held. We need enough
33 * space to read every xattr value in the file or enough space to
34 * hold three copies of the xattr free space bitmap. (Not both at
37 sz = max_t(size_t, XATTR_SIZE_MAX, 3 * sizeof(long) *
38 BITS_TO_LONGS(sc->mp->m_attr_geo->blksize));
39 sc->buf = kmem_zalloc_large(sz, KM_SLEEP);
43 return xchk_setup_inode_contents(sc, ip, 0);
46 /* Extended Attributes */
49 struct xfs_attr_list_context context;
54 * Check that an extended attribute key can be looked up by hash.
56 * We use the XFS attribute list iterator (i.e. xfs_attr_list_int_ilocked)
57 * to call this function for every attribute key in an inode. Once
58 * we're here, we load the attribute value to see if any errors happen,
59 * or if we get more or less data than we expected.
63 struct xfs_attr_list_context *context,
69 struct xchk_xattr *sx;
70 struct xfs_da_args args = { NULL };
73 sx = container_of(context, struct xchk_xattr, context);
75 if (xchk_should_terminate(sx->sc, &error)) {
76 context->seen_enough = 1;
80 if (flags & XFS_ATTR_INCOMPLETE) {
81 /* Incomplete attr key, just mark the inode for preening. */
82 xchk_ino_set_preen(sx->sc, context->dp->i_ino);
86 /* Does this name make sense? */
87 if (!xfs_attr_namecheck(name, namelen)) {
88 xchk_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK, args.blkno);
92 args.flags = ATTR_KERNOTIME;
93 if (flags & XFS_ATTR_ROOT)
94 args.flags |= ATTR_ROOT;
95 else if (flags & XFS_ATTR_SECURE)
96 args.flags |= ATTR_SECURE;
97 args.geo = context->dp->i_mount->m_attr_geo;
98 args.whichfork = XFS_ATTR_FORK;
99 args.dp = context->dp;
101 args.namelen = namelen;
102 args.hashval = xfs_da_hashname(args.name, args.namelen);
103 args.trans = context->tp;
104 args.value = sx->sc->buf;
105 args.valuelen = XATTR_SIZE_MAX;
107 error = xfs_attr_get_ilocked(context->dp, &args);
108 if (error == -EEXIST)
110 if (!xchk_fblock_process_error(sx->sc, XFS_ATTR_FORK, args.blkno,
113 if (args.valuelen != valuelen)
114 xchk_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK,
117 if (sx->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
118 context->seen_enough = 1;
123 * Mark a range [start, start+len) in this map. Returns true if the
124 * region was free, and false if there's a conflict or a problem.
126 * Within a char, the lowest bit of the char represents the byte with
127 * the smallest address
131 struct xfs_scrub *sc,
136 unsigned int mapsize = sc->mp->m_attr_geo->blksize;
139 if (start >= mapsize)
141 if (start + len > mapsize) {
142 len = mapsize - start;
146 if (find_next_bit(map, mapsize, start) < start + len)
148 bitmap_set(map, start, len);
154 * Check the leaf freemap from the usage bitmap. Returns false if the
155 * attr freemap has problems or points to used space.
158 xchk_xattr_check_freemap(
159 struct xfs_scrub *sc,
161 struct xfs_attr3_icleaf_hdr *leafhdr)
163 unsigned long *freemap;
164 unsigned long *dstmap;
165 unsigned int mapsize = sc->mp->m_attr_geo->blksize;
168 /* Construct bitmap of freemap contents. */
169 freemap = (unsigned long *)sc->buf + BITS_TO_LONGS(mapsize);
170 bitmap_zero(freemap, mapsize);
171 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
172 if (!xchk_xattr_set_map(sc, freemap,
173 leafhdr->freemap[i].base,
174 leafhdr->freemap[i].size))
178 /* Look for bits that are set in freemap and are marked in use. */
179 dstmap = freemap + BITS_TO_LONGS(mapsize);
180 return bitmap_and(dstmap, freemap, map, mapsize) == 0;
184 * Check this leaf entry's relations to everything else.
185 * Returns the number of bytes used for the name/value data.
189 struct xchk_da_btree *ds,
192 struct xfs_attr_leafblock *leaf,
193 struct xfs_attr3_icleaf_hdr *leafhdr,
194 unsigned long *usedmap,
195 struct xfs_attr_leaf_entry *ent,
197 unsigned int *usedbytes,
200 struct xfs_mount *mp = ds->state->mp;
202 struct xfs_attr_leaf_name_local *lentry;
203 struct xfs_attr_leaf_name_remote *rentry;
204 unsigned int nameidx;
205 unsigned int namesize;
208 xchk_da_set_corrupt(ds, level);
210 /* Hash values in order? */
211 if (be32_to_cpu(ent->hashval) < *last_hashval)
212 xchk_da_set_corrupt(ds, level);
213 *last_hashval = be32_to_cpu(ent->hashval);
215 nameidx = be16_to_cpu(ent->nameidx);
216 if (nameidx < leafhdr->firstused ||
217 nameidx >= mp->m_attr_geo->blksize) {
218 xchk_da_set_corrupt(ds, level);
222 /* Check the name information. */
223 if (ent->flags & XFS_ATTR_LOCAL) {
224 lentry = xfs_attr3_leaf_name_local(leaf, idx);
225 namesize = xfs_attr_leaf_entsize_local(lentry->namelen,
226 be16_to_cpu(lentry->valuelen));
227 name_end = (char *)lentry + namesize;
228 if (lentry->namelen == 0)
229 xchk_da_set_corrupt(ds, level);
231 rentry = xfs_attr3_leaf_name_remote(leaf, idx);
232 namesize = xfs_attr_leaf_entsize_remote(rentry->namelen);
233 name_end = (char *)rentry + namesize;
234 if (rentry->namelen == 0 || rentry->valueblk == 0)
235 xchk_da_set_corrupt(ds, level);
237 if (name_end > buf_end)
238 xchk_da_set_corrupt(ds, level);
240 if (!xchk_xattr_set_map(ds->sc, usedmap, nameidx, namesize))
241 xchk_da_set_corrupt(ds, level);
242 if (!(ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
243 *usedbytes += namesize;
246 /* Scrub an attribute leaf. */
249 struct xchk_da_btree *ds,
252 struct xfs_attr3_icleaf_hdr leafhdr;
253 struct xfs_mount *mp = ds->state->mp;
254 struct xfs_da_state_blk *blk = &ds->state->path.blk[level];
255 struct xfs_buf *bp = blk->bp;
256 xfs_dablk_t *last_checked = ds->private;
257 struct xfs_attr_leafblock *leaf = bp->b_addr;
258 struct xfs_attr_leaf_entry *ent;
259 struct xfs_attr_leaf_entry *entries;
260 unsigned long *usedmap = ds->sc->buf;
263 __u32 last_hashval = 0;
264 unsigned int usedbytes = 0;
265 unsigned int hdrsize;
268 if (*last_checked == blk->blkno)
270 *last_checked = blk->blkno;
271 bitmap_zero(usedmap, mp->m_attr_geo->blksize);
273 /* Check all the padding. */
274 if (xfs_sb_version_hascrc(&ds->sc->mp->m_sb)) {
275 struct xfs_attr3_leafblock *leaf = bp->b_addr;
277 if (leaf->hdr.pad1 != 0 || leaf->hdr.pad2 != 0 ||
278 leaf->hdr.info.hdr.pad != 0)
279 xchk_da_set_corrupt(ds, level);
281 if (leaf->hdr.pad1 != 0 || leaf->hdr.info.pad != 0)
282 xchk_da_set_corrupt(ds, level);
285 /* Check the leaf header */
286 xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf);
287 hdrsize = xfs_attr3_leaf_hdr_size(leaf);
289 if (leafhdr.usedbytes > mp->m_attr_geo->blksize)
290 xchk_da_set_corrupt(ds, level);
291 if (leafhdr.firstused > mp->m_attr_geo->blksize)
292 xchk_da_set_corrupt(ds, level);
293 if (leafhdr.firstused < hdrsize)
294 xchk_da_set_corrupt(ds, level);
295 if (!xchk_xattr_set_map(ds->sc, usedmap, 0, hdrsize))
296 xchk_da_set_corrupt(ds, level);
298 if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
301 entries = xfs_attr3_leaf_entryp(leaf);
302 if ((char *)&entries[leafhdr.count] > (char *)leaf + leafhdr.firstused)
303 xchk_da_set_corrupt(ds, level);
305 buf_end = (char *)bp->b_addr + mp->m_attr_geo->blksize;
306 for (i = 0, ent = entries; i < leafhdr.count; ent++, i++) {
307 /* Mark the leaf entry itself. */
308 off = (char *)ent - (char *)leaf;
309 if (!xchk_xattr_set_map(ds->sc, usedmap, off,
310 sizeof(xfs_attr_leaf_entry_t))) {
311 xchk_da_set_corrupt(ds, level);
315 /* Check the entry and nameval. */
316 xchk_xattr_entry(ds, level, buf_end, leaf, &leafhdr,
317 usedmap, ent, i, &usedbytes, &last_hashval);
319 if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
323 if (!xchk_xattr_check_freemap(ds->sc, usedmap, &leafhdr))
324 xchk_da_set_corrupt(ds, level);
326 if (leafhdr.usedbytes != usedbytes)
327 xchk_da_set_corrupt(ds, level);
333 /* Scrub a attribute btree record. */
336 struct xchk_da_btree *ds,
340 struct xfs_mount *mp = ds->state->mp;
341 struct xfs_attr_leaf_entry *ent = rec;
342 struct xfs_da_state_blk *blk;
343 struct xfs_attr_leaf_name_local *lentry;
344 struct xfs_attr_leaf_name_remote *rentry;
346 xfs_dahash_t calc_hash;
350 unsigned int badflags;
353 blk = &ds->state->path.blk[level];
355 /* Check the whole block, if necessary. */
356 error = xchk_xattr_block(ds, level);
359 if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
362 /* Check the hash of the entry. */
363 error = xchk_da_btree_hash(ds, level, &ent->hashval);
367 /* Find the attr entry's location. */
369 hdrsize = xfs_attr3_leaf_hdr_size(bp->b_addr);
370 nameidx = be16_to_cpu(ent->nameidx);
371 if (nameidx < hdrsize || nameidx >= mp->m_attr_geo->blksize) {
372 xchk_da_set_corrupt(ds, level);
376 /* Retrieve the entry and check it. */
377 hash = be32_to_cpu(ent->hashval);
378 badflags = ~(XFS_ATTR_LOCAL | XFS_ATTR_ROOT | XFS_ATTR_SECURE |
379 XFS_ATTR_INCOMPLETE);
380 if ((ent->flags & badflags) != 0)
381 xchk_da_set_corrupt(ds, level);
382 if (ent->flags & XFS_ATTR_LOCAL) {
383 lentry = (struct xfs_attr_leaf_name_local *)
384 (((char *)bp->b_addr) + nameidx);
385 if (lentry->namelen <= 0) {
386 xchk_da_set_corrupt(ds, level);
389 calc_hash = xfs_da_hashname(lentry->nameval, lentry->namelen);
391 rentry = (struct xfs_attr_leaf_name_remote *)
392 (((char *)bp->b_addr) + nameidx);
393 if (rentry->namelen <= 0) {
394 xchk_da_set_corrupt(ds, level);
397 calc_hash = xfs_da_hashname(rentry->name, rentry->namelen);
399 if (calc_hash != hash)
400 xchk_da_set_corrupt(ds, level);
406 /* Scrub the extended attribute metadata. */
409 struct xfs_scrub *sc)
411 struct xchk_xattr sx;
412 struct attrlist_cursor_kern cursor = { 0 };
413 xfs_dablk_t last_checked = -1U;
416 if (!xfs_inode_hasattr(sc->ip))
419 memset(&sx, 0, sizeof(sx));
420 /* Check attribute tree structure */
421 error = xchk_da_btree(sc, XFS_ATTR_FORK, xchk_xattr_rec,
426 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
429 /* Check that every attr key can also be looked up by hash. */
430 sx.context.dp = sc->ip;
431 sx.context.cursor = &cursor;
432 sx.context.resynch = 1;
433 sx.context.put_listent = xchk_xattr_listent;
434 sx.context.tp = sc->tp;
435 sx.context.flags = ATTR_INCOMPLETE;
439 * Look up every xattr in this file by name.
441 * Use the backend implementation of xfs_attr_list to call
442 * xchk_xattr_listent on every attribute key in this inode.
443 * In other words, we use the same iterator/callback mechanism
444 * that listattr uses to scrub extended attributes, though in our
445 * _listent function, we check the value of the attribute.
447 * The VFS only locks i_rwsem when modifying attrs, so keep all
448 * three locks held because that's the only way to ensure we're
449 * the only thread poking into the da btree. We traverse the da
450 * btree while holding a leaf buffer locked for the xattr name
451 * iteration, which doesn't really follow the usual buffer
454 error = xfs_attr_list_int_ilocked(&sx.context);
455 if (!xchk_fblock_process_error(sc, XFS_ATTR_FORK, 0, &error))