1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2014 Red Hat, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_trans.h"
14 #include "xfs_alloc.h"
15 #include "xfs_btree.h"
16 #include "xfs_btree_staging.h"
18 #include "xfs_rmap_btree.h"
19 #include "xfs_trace.h"
20 #include "xfs_error.h"
21 #include "xfs_extent_busy.h"
23 #include "xfs_ag_resv.h"
28 * This is a per-ag tree used to track the owner(s) of a given extent. With
29 * reflink it is possible for there to be multiple owners, which is a departure
30 * from classic XFS. Owner records for data extents are inserted when the
31 * extent is mapped and removed when an extent is unmapped. Owner records for
32 * all other block types (i.e. metadata) are inserted when an extent is
33 * allocated and removed when an extent is freed. There can only be one owner
34 * of a metadata extent, usually an inode or some other metadata structure like
37 * The rmap btree is part of the free space management, so blocks for the tree
38 * are sourced from the agfl. Hence we need transaction reservation support for
39 * this tree so that the freelist is always large enough. This also impacts on
40 * the minimum space we need to leave free in the AG.
42 * The tree is ordered by [ag block, owner, offset]. This is a large key size,
43 * but it is the only way to enforce unique keys when a block can be owned by
44 * multiple files at any offset. There's no need to order/search by extent
45 * size for online updating/management of the tree. It is intended that most
46 * reverse lookups will be to find the owner(s) of a particular block, or to
47 * try to recover tree and file data from corrupt primary metadata.
50 static struct xfs_btree_cur *
51 xfs_rmapbt_dup_cursor(
52 struct xfs_btree_cur *cur)
54 return xfs_rmapbt_init_cursor(cur->bc_mp, cur->bc_tp,
55 cur->bc_ag.agbp, cur->bc_ag.agno, cur->bc_ag.pag);
60 struct xfs_btree_cur *cur,
61 union xfs_btree_ptr *ptr,
64 struct xfs_buf *agbp = cur->bc_ag.agbp;
65 struct xfs_agf *agf = agbp->b_addr;
66 int btnum = cur->bc_btnum;
67 struct xfs_perag *pag = agbp->b_pag;
71 agf->agf_roots[btnum] = ptr->s;
72 be32_add_cpu(&agf->agf_levels[btnum], inc);
73 pag->pagf_levels[btnum] += inc;
75 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
79 xfs_rmapbt_alloc_block(
80 struct xfs_btree_cur *cur,
81 union xfs_btree_ptr *start,
82 union xfs_btree_ptr *new,
85 struct xfs_buf *agbp = cur->bc_ag.agbp;
86 struct xfs_agf *agf = agbp->b_addr;
90 /* Allocate the new block from the freelist. If we can't, give up. */
91 error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_ag.agbp,
96 trace_xfs_rmapbt_alloc_block(cur->bc_mp, cur->bc_ag.agno,
98 if (bno == NULLAGBLOCK) {
103 xfs_extent_busy_reuse(cur->bc_mp, agbp->b_pag, bno, 1, false);
105 new->s = cpu_to_be32(bno);
106 be32_add_cpu(&agf->agf_rmap_blocks, 1);
107 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
109 xfs_ag_resv_rmapbt_alloc(cur->bc_mp, cur->bc_ag.agno);
116 xfs_rmapbt_free_block(
117 struct xfs_btree_cur *cur,
120 struct xfs_buf *agbp = cur->bc_ag.agbp;
121 struct xfs_agf *agf = agbp->b_addr;
122 struct xfs_perag *pag;
126 bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
127 trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_ag.agno,
129 be32_add_cpu(&agf->agf_rmap_blocks, -1);
130 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
131 error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
135 pag = cur->bc_ag.agbp->b_pag;
136 xfs_extent_busy_insert(cur->bc_tp, pag, bno, 1,
137 XFS_EXTENT_BUSY_SKIP_DISCARD);
139 xfs_ag_resv_free_extent(pag, XFS_AG_RESV_RMAPBT, NULL, 1);
144 xfs_rmapbt_get_minrecs(
145 struct xfs_btree_cur *cur,
148 return cur->bc_mp->m_rmap_mnr[level != 0];
152 xfs_rmapbt_get_maxrecs(
153 struct xfs_btree_cur *cur,
156 return cur->bc_mp->m_rmap_mxr[level != 0];
160 xfs_rmapbt_init_key_from_rec(
161 union xfs_btree_key *key,
162 union xfs_btree_rec *rec)
164 key->rmap.rm_startblock = rec->rmap.rm_startblock;
165 key->rmap.rm_owner = rec->rmap.rm_owner;
166 key->rmap.rm_offset = rec->rmap.rm_offset;
170 * The high key for a reverse mapping record can be computed by shifting
171 * the startblock and offset to the highest value that would still map
172 * to that record. In practice this means that we add blockcount-1 to
173 * the startblock for all records, and if the record is for a data/attr
174 * fork mapping, we add blockcount-1 to the offset too.
177 xfs_rmapbt_init_high_key_from_rec(
178 union xfs_btree_key *key,
179 union xfs_btree_rec *rec)
184 adj = be32_to_cpu(rec->rmap.rm_blockcount) - 1;
186 key->rmap.rm_startblock = rec->rmap.rm_startblock;
187 be32_add_cpu(&key->rmap.rm_startblock, adj);
188 key->rmap.rm_owner = rec->rmap.rm_owner;
189 key->rmap.rm_offset = rec->rmap.rm_offset;
190 if (XFS_RMAP_NON_INODE_OWNER(be64_to_cpu(rec->rmap.rm_owner)) ||
191 XFS_RMAP_IS_BMBT_BLOCK(be64_to_cpu(rec->rmap.rm_offset)))
193 off = be64_to_cpu(key->rmap.rm_offset);
194 off = (XFS_RMAP_OFF(off) + adj) | (off & ~XFS_RMAP_OFF_MASK);
195 key->rmap.rm_offset = cpu_to_be64(off);
199 xfs_rmapbt_init_rec_from_cur(
200 struct xfs_btree_cur *cur,
201 union xfs_btree_rec *rec)
203 rec->rmap.rm_startblock = cpu_to_be32(cur->bc_rec.r.rm_startblock);
204 rec->rmap.rm_blockcount = cpu_to_be32(cur->bc_rec.r.rm_blockcount);
205 rec->rmap.rm_owner = cpu_to_be64(cur->bc_rec.r.rm_owner);
206 rec->rmap.rm_offset = cpu_to_be64(
207 xfs_rmap_irec_offset_pack(&cur->bc_rec.r));
211 xfs_rmapbt_init_ptr_from_cur(
212 struct xfs_btree_cur *cur,
213 union xfs_btree_ptr *ptr)
215 struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
217 ASSERT(cur->bc_ag.agno == be32_to_cpu(agf->agf_seqno));
219 ptr->s = agf->agf_roots[cur->bc_btnum];
224 struct xfs_btree_cur *cur,
225 union xfs_btree_key *key)
227 struct xfs_rmap_irec *rec = &cur->bc_rec.r;
228 struct xfs_rmap_key *kp = &key->rmap;
232 d = (int64_t)be32_to_cpu(kp->rm_startblock) - rec->rm_startblock;
236 x = be64_to_cpu(kp->rm_owner);
243 x = XFS_RMAP_OFF(be64_to_cpu(kp->rm_offset));
253 xfs_rmapbt_diff_two_keys(
254 struct xfs_btree_cur *cur,
255 union xfs_btree_key *k1,
256 union xfs_btree_key *k2)
258 struct xfs_rmap_key *kp1 = &k1->rmap;
259 struct xfs_rmap_key *kp2 = &k2->rmap;
263 d = (int64_t)be32_to_cpu(kp1->rm_startblock) -
264 be32_to_cpu(kp2->rm_startblock);
268 x = be64_to_cpu(kp1->rm_owner);
269 y = be64_to_cpu(kp2->rm_owner);
275 x = XFS_RMAP_OFF(be64_to_cpu(kp1->rm_offset));
276 y = XFS_RMAP_OFF(be64_to_cpu(kp2->rm_offset));
284 static xfs_failaddr_t
288 struct xfs_mount *mp = bp->b_mount;
289 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
290 struct xfs_perag *pag = bp->b_pag;
295 * magic number and level verification
297 * During growfs operations, we can't verify the exact level or owner as
298 * the perag is not fully initialised and hence not attached to the
299 * buffer. In this case, check against the maximum tree depth.
301 * Similarly, during log recovery we will have a perag structure
302 * attached, but the agf information will not yet have been initialised
303 * from the on disk AGF. Again, we can only check against maximum limits
306 if (!xfs_verify_magic(bp, block->bb_magic))
307 return __this_address;
309 if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
310 return __this_address;
311 fa = xfs_btree_sblock_v5hdr_verify(bp);
315 level = be16_to_cpu(block->bb_level);
316 if (pag && pag->pagf_init) {
317 if (level >= pag->pagf_levels[XFS_BTNUM_RMAPi])
318 return __this_address;
319 } else if (level >= mp->m_rmap_maxlevels)
320 return __this_address;
322 return xfs_btree_sblock_verify(bp, mp->m_rmap_mxr[level != 0]);
326 xfs_rmapbt_read_verify(
331 if (!xfs_btree_sblock_verify_crc(bp))
332 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
334 fa = xfs_rmapbt_verify(bp);
336 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
340 trace_xfs_btree_corrupt(bp, _RET_IP_);
344 xfs_rmapbt_write_verify(
349 fa = xfs_rmapbt_verify(bp);
351 trace_xfs_btree_corrupt(bp, _RET_IP_);
352 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
355 xfs_btree_sblock_calc_crc(bp);
359 const struct xfs_buf_ops xfs_rmapbt_buf_ops = {
360 .name = "xfs_rmapbt",
361 .magic = { 0, cpu_to_be32(XFS_RMAP_CRC_MAGIC) },
362 .verify_read = xfs_rmapbt_read_verify,
363 .verify_write = xfs_rmapbt_write_verify,
364 .verify_struct = xfs_rmapbt_verify,
368 xfs_rmapbt_keys_inorder(
369 struct xfs_btree_cur *cur,
370 union xfs_btree_key *k1,
371 union xfs_btree_key *k2)
378 x = be32_to_cpu(k1->rmap.rm_startblock);
379 y = be32_to_cpu(k2->rmap.rm_startblock);
384 a = be64_to_cpu(k1->rmap.rm_owner);
385 b = be64_to_cpu(k2->rmap.rm_owner);
390 a = XFS_RMAP_OFF(be64_to_cpu(k1->rmap.rm_offset));
391 b = XFS_RMAP_OFF(be64_to_cpu(k2->rmap.rm_offset));
398 xfs_rmapbt_recs_inorder(
399 struct xfs_btree_cur *cur,
400 union xfs_btree_rec *r1,
401 union xfs_btree_rec *r2)
408 x = be32_to_cpu(r1->rmap.rm_startblock);
409 y = be32_to_cpu(r2->rmap.rm_startblock);
414 a = be64_to_cpu(r1->rmap.rm_owner);
415 b = be64_to_cpu(r2->rmap.rm_owner);
420 a = XFS_RMAP_OFF(be64_to_cpu(r1->rmap.rm_offset));
421 b = XFS_RMAP_OFF(be64_to_cpu(r2->rmap.rm_offset));
427 static const struct xfs_btree_ops xfs_rmapbt_ops = {
428 .rec_len = sizeof(struct xfs_rmap_rec),
429 .key_len = 2 * sizeof(struct xfs_rmap_key),
431 .dup_cursor = xfs_rmapbt_dup_cursor,
432 .set_root = xfs_rmapbt_set_root,
433 .alloc_block = xfs_rmapbt_alloc_block,
434 .free_block = xfs_rmapbt_free_block,
435 .get_minrecs = xfs_rmapbt_get_minrecs,
436 .get_maxrecs = xfs_rmapbt_get_maxrecs,
437 .init_key_from_rec = xfs_rmapbt_init_key_from_rec,
438 .init_high_key_from_rec = xfs_rmapbt_init_high_key_from_rec,
439 .init_rec_from_cur = xfs_rmapbt_init_rec_from_cur,
440 .init_ptr_from_cur = xfs_rmapbt_init_ptr_from_cur,
441 .key_diff = xfs_rmapbt_key_diff,
442 .buf_ops = &xfs_rmapbt_buf_ops,
443 .diff_two_keys = xfs_rmapbt_diff_two_keys,
444 .keys_inorder = xfs_rmapbt_keys_inorder,
445 .recs_inorder = xfs_rmapbt_recs_inorder,
448 static struct xfs_btree_cur *
449 xfs_rmapbt_init_common(
450 struct xfs_mount *mp,
451 struct xfs_trans *tp,
453 struct xfs_perag *pag)
455 struct xfs_btree_cur *cur;
457 cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL);
460 /* Overlapping btree; 2 keys per pointer. */
461 cur->bc_btnum = XFS_BTNUM_RMAP;
462 cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING;
463 cur->bc_blocklog = mp->m_sb.sb_blocklog;
464 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
465 cur->bc_ag.agno = agno;
466 cur->bc_ops = &xfs_rmapbt_ops;
468 /* take a reference for the cursor */
469 atomic_inc(&pag->pag_ref);
471 cur->bc_ag.pag = pag;
476 /* Create a new reverse mapping btree cursor. */
477 struct xfs_btree_cur *
478 xfs_rmapbt_init_cursor(
479 struct xfs_mount *mp,
480 struct xfs_trans *tp,
481 struct xfs_buf *agbp,
483 struct xfs_perag *pag)
485 struct xfs_agf *agf = agbp->b_addr;
486 struct xfs_btree_cur *cur;
488 cur = xfs_rmapbt_init_common(mp, tp, agno, pag);
489 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
490 cur->bc_ag.agbp = agbp;
494 /* Create a new reverse mapping btree cursor with a fake root for staging. */
495 struct xfs_btree_cur *
496 xfs_rmapbt_stage_cursor(
497 struct xfs_mount *mp,
498 struct xbtree_afakeroot *afake,
501 struct xfs_btree_cur *cur;
503 cur = xfs_rmapbt_init_common(mp, NULL, agno, NULL);
504 xfs_btree_stage_afakeroot(cur, afake);
509 * Install a new reverse mapping btree root. Caller is responsible for
510 * invalidating and freeing the old btree blocks.
513 xfs_rmapbt_commit_staged_btree(
514 struct xfs_btree_cur *cur,
515 struct xfs_trans *tp,
516 struct xfs_buf *agbp)
518 struct xfs_agf *agf = agbp->b_addr;
519 struct xbtree_afakeroot *afake = cur->bc_ag.afake;
521 ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
523 agf->agf_roots[cur->bc_btnum] = cpu_to_be32(afake->af_root);
524 agf->agf_levels[cur->bc_btnum] = cpu_to_be32(afake->af_levels);
525 agf->agf_rmap_blocks = cpu_to_be32(afake->af_blocks);
526 xfs_alloc_log_agf(tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS |
527 XFS_AGF_RMAP_BLOCKS);
528 xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_rmapbt_ops);
532 * Calculate number of records in an rmap btree block.
539 blocklen -= XFS_RMAP_BLOCK_LEN;
542 return blocklen / sizeof(struct xfs_rmap_rec);
544 (2 * sizeof(struct xfs_rmap_key) + sizeof(xfs_rmap_ptr_t));
547 /* Compute the maximum height of an rmap btree. */
549 xfs_rmapbt_compute_maxlevels(
550 struct xfs_mount *mp)
553 * On a non-reflink filesystem, the maximum number of rmap
554 * records is the number of blocks in the AG, hence the max
555 * rmapbt height is log_$maxrecs($agblocks). However, with
556 * reflink each AG block can have up to 2^32 (per the refcount
557 * record format) owners, which means that theoretically we
558 * could face up to 2^64 rmap records.
560 * That effectively means that the max rmapbt height must be
561 * XFS_BTREE_MAXLEVELS. "Fortunately" we'll run out of AG
562 * blocks to feed the rmapbt long before the rmapbt reaches
563 * maximum height. The reflink code uses ag_resv_critical to
564 * disallow reflinking when less than 10% of the per-AG metadata
565 * block reservation since the fallback is a regular file copy.
567 if (xfs_sb_version_hasreflink(&mp->m_sb))
568 mp->m_rmap_maxlevels = XFS_BTREE_MAXLEVELS;
570 mp->m_rmap_maxlevels = xfs_btree_compute_maxlevels(
571 mp->m_rmap_mnr, mp->m_sb.sb_agblocks);
574 /* Calculate the refcount btree size for some records. */
576 xfs_rmapbt_calc_size(
577 struct xfs_mount *mp,
578 unsigned long long len)
580 return xfs_btree_calc_size(mp->m_rmap_mnr, len);
584 * Calculate the maximum refcount btree size.
588 struct xfs_mount *mp,
589 xfs_agblock_t agblocks)
591 /* Bail out if we're uninitialized, which can happen in mkfs. */
592 if (mp->m_rmap_mxr[0] == 0)
595 return xfs_rmapbt_calc_size(mp, agblocks);
599 * Figure out how many blocks to reserve and how many are used by this btree.
602 xfs_rmapbt_calc_reserves(
603 struct xfs_mount *mp,
604 struct xfs_trans *tp,
605 struct xfs_perag *pag,
609 struct xfs_buf *agbp;
611 xfs_agblock_t agblocks;
612 xfs_extlen_t tree_len;
615 if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
618 error = xfs_alloc_read_agf(mp, tp, pag->pag_agno, 0, &agbp);
623 agblocks = be32_to_cpu(agf->agf_length);
624 tree_len = be32_to_cpu(agf->agf_rmap_blocks);
625 xfs_trans_brelse(tp, agbp);
628 * The log is permanently allocated, so the space it occupies will
629 * never be available for the kinds of things that would require btree
630 * expansion. We therefore can pretend the space isn't there.
632 if (mp->m_sb.sb_logstart &&
633 XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == pag->pag_agno)
634 agblocks -= mp->m_sb.sb_logblocks;
636 /* Reserve 1% of the AG or enough for 1 block per record. */
637 *ask += max(agblocks / 100, xfs_rmapbt_max_size(mp, agblocks));