1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_shared.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_defer.h"
15 #include "xfs_btree.h"
17 #include "xfs_alloc_btree.h"
18 #include "xfs_alloc.h"
19 #include "xfs_extent_busy.h"
20 #include "xfs_errortag.h"
21 #include "xfs_error.h"
22 #include "xfs_trace.h"
23 #include "xfs_trans.h"
24 #include "xfs_buf_item.h"
27 #include "xfs_ag_resv.h"
30 struct kmem_cache *xfs_extfree_item_cache;
32 struct workqueue_struct *xfs_alloc_wq;
34 #define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
36 #define XFSA_FIXUP_BNO_OK 1
37 #define XFSA_FIXUP_CNT_OK 2
40 * Size of the AGFL. For CRC-enabled filesystes we steal a couple of slots in
41 * the beginning of the block for a proper header with the location information
48 unsigned int size = mp->m_sb.sb_sectsize;
51 size -= sizeof(struct xfs_agfl);
53 return size / sizeof(xfs_agblock_t);
60 if (xfs_has_rmapbt(mp))
61 return XFS_RMAP_BLOCK(mp) + 1;
62 if (xfs_has_finobt(mp))
63 return XFS_FIBT_BLOCK(mp) + 1;
64 return XFS_IBT_BLOCK(mp) + 1;
71 if (xfs_has_reflink(mp))
72 return xfs_refc_block(mp) + 1;
73 if (xfs_has_rmapbt(mp))
74 return XFS_RMAP_BLOCK(mp) + 1;
75 if (xfs_has_finobt(mp))
76 return XFS_FIBT_BLOCK(mp) + 1;
77 return XFS_IBT_BLOCK(mp) + 1;
81 * The number of blocks per AG that we withhold from xfs_mod_fdblocks to
82 * guarantee that we can refill the AGFL prior to allocating space in a nearly
83 * full AG. Although the space described by the free space btrees, the
84 * blocks used by the freesp btrees themselves, and the blocks owned by the
85 * AGFL are counted in the ondisk fdblocks, it's a mistake to let the ondisk
86 * free space in the AG drop so low that the free space btrees cannot refill an
87 * empty AGFL up to the minimum level. Rather than grind through empty AGs
88 * until the fs goes down, we subtract this many AG blocks from the incore
89 * fdblocks to ensure user allocation does not overcommit the space the
90 * filesystem needs for the AGFLs. The rmap btree uses a per-AG reservation to
91 * withhold space from xfs_mod_fdblocks, so we do not account for that here.
93 #define XFS_ALLOCBT_AGFL_RESERVE 4
96 * Compute the number of blocks that we set aside to guarantee the ability to
97 * refill the AGFL and handle a full bmap btree split.
99 * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
100 * AGF buffer (PV 947395), we place constraints on the relationship among
101 * actual allocations for data blocks, freelist blocks, and potential file data
102 * bmap btree blocks. However, these restrictions may result in no actual space
103 * allocated for a delayed extent, for example, a data block in a certain AG is
104 * allocated but there is no additional block for the additional bmap btree
105 * block due to a split of the bmap btree of the file. The result of this may
106 * lead to an infinite loop when the file gets flushed to disk and all delayed
107 * extents need to be actually allocated. To get around this, we explicitly set
108 * aside a few blocks which will not be reserved in delayed allocation.
110 * For each AG, we need to reserve enough blocks to replenish a totally empty
111 * AGFL and 4 more to handle a potential split of the file's bmap btree.
115 struct xfs_mount *mp)
117 return mp->m_sb.sb_agcount * (XFS_ALLOCBT_AGFL_RESERVE + 4);
121 * When deciding how much space to allocate out of an AG, we limit the
122 * allocation maximum size to the size the AG. However, we cannot use all the
123 * blocks in the AG - some are permanently used by metadata. These
124 * blocks are generally:
125 * - the AG superblock, AGF, AGI and AGFL
126 * - the AGF (bno and cnt) and AGI btree root blocks, and optionally
127 * the AGI free inode and rmap btree root blocks.
128 * - blocks on the AGFL according to xfs_alloc_set_aside() limits
129 * - the rmapbt root block
131 * The AG headers are sector sized, so the amount of space they take up is
132 * dependent on filesystem geometry. The others are all single blocks.
135 xfs_alloc_ag_max_usable(
136 struct xfs_mount *mp)
140 blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
141 blocks += XFS_ALLOCBT_AGFL_RESERVE;
142 blocks += 3; /* AGF, AGI btree root blocks */
143 if (xfs_has_finobt(mp))
144 blocks++; /* finobt root block */
145 if (xfs_has_rmapbt(mp))
146 blocks++; /* rmap root block */
147 if (xfs_has_reflink(mp))
148 blocks++; /* refcount root block */
150 return mp->m_sb.sb_agblocks - blocks;
154 * Lookup the record equal to [bno, len] in the btree given by cur.
156 STATIC int /* error */
158 struct xfs_btree_cur *cur, /* btree cursor */
159 xfs_agblock_t bno, /* starting block of extent */
160 xfs_extlen_t len, /* length of extent */
161 int *stat) /* success/failure */
165 cur->bc_rec.a.ar_startblock = bno;
166 cur->bc_rec.a.ar_blockcount = len;
167 error = xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
168 cur->bc_ag.abt.active = (*stat == 1);
173 * Lookup the first record greater than or equal to [bno, len]
174 * in the btree given by cur.
178 struct xfs_btree_cur *cur, /* btree cursor */
179 xfs_agblock_t bno, /* starting block of extent */
180 xfs_extlen_t len, /* length of extent */
181 int *stat) /* success/failure */
185 cur->bc_rec.a.ar_startblock = bno;
186 cur->bc_rec.a.ar_blockcount = len;
187 error = xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
188 cur->bc_ag.abt.active = (*stat == 1);
193 * Lookup the first record less than or equal to [bno, len]
194 * in the btree given by cur.
198 struct xfs_btree_cur *cur, /* btree cursor */
199 xfs_agblock_t bno, /* starting block of extent */
200 xfs_extlen_t len, /* length of extent */
201 int *stat) /* success/failure */
204 cur->bc_rec.a.ar_startblock = bno;
205 cur->bc_rec.a.ar_blockcount = len;
206 error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
207 cur->bc_ag.abt.active = (*stat == 1);
212 xfs_alloc_cur_active(
213 struct xfs_btree_cur *cur)
215 return cur && cur->bc_ag.abt.active;
219 * Update the record referred to by cur to the value given
221 * This either works (return 0) or gets an EFSCORRUPTED error.
223 STATIC int /* error */
225 struct xfs_btree_cur *cur, /* btree cursor */
226 xfs_agblock_t bno, /* starting block of extent */
227 xfs_extlen_t len) /* length of extent */
229 union xfs_btree_rec rec;
231 rec.alloc.ar_startblock = cpu_to_be32(bno);
232 rec.alloc.ar_blockcount = cpu_to_be32(len);
233 return xfs_btree_update(cur, &rec);
236 /* Convert the ondisk btree record to its incore representation. */
238 xfs_alloc_btrec_to_irec(
239 const union xfs_btree_rec *rec,
240 struct xfs_alloc_rec_incore *irec)
242 irec->ar_startblock = be32_to_cpu(rec->alloc.ar_startblock);
243 irec->ar_blockcount = be32_to_cpu(rec->alloc.ar_blockcount);
246 /* Simple checks for free space records. */
248 xfs_alloc_check_irec(
249 struct xfs_btree_cur *cur,
250 const struct xfs_alloc_rec_incore *irec)
252 struct xfs_perag *pag = cur->bc_ag.pag;
254 if (irec->ar_blockcount == 0)
255 return __this_address;
257 /* check for valid extent range, including overflow */
258 if (!xfs_verify_agbext(pag, irec->ar_startblock, irec->ar_blockcount))
259 return __this_address;
265 xfs_alloc_complain_bad_rec(
266 struct xfs_btree_cur *cur,
268 const struct xfs_alloc_rec_incore *irec)
270 struct xfs_mount *mp = cur->bc_mp;
273 "%s Freespace BTree record corruption in AG %d detected at %pS!",
274 cur->bc_btnum == XFS_BTNUM_BNO ? "Block" : "Size",
275 cur->bc_ag.pag->pag_agno, fa);
277 "start block 0x%x block count 0x%x", irec->ar_startblock,
278 irec->ar_blockcount);
279 return -EFSCORRUPTED;
283 * Get the data from the pointed-to record.
287 struct xfs_btree_cur *cur, /* btree cursor */
288 xfs_agblock_t *bno, /* output: starting block of extent */
289 xfs_extlen_t *len, /* output: length of extent */
290 int *stat) /* output: success/failure */
292 struct xfs_alloc_rec_incore irec;
293 union xfs_btree_rec *rec;
297 error = xfs_btree_get_rec(cur, &rec, stat);
298 if (error || !(*stat))
301 xfs_alloc_btrec_to_irec(rec, &irec);
302 fa = xfs_alloc_check_irec(cur, &irec);
304 return xfs_alloc_complain_bad_rec(cur, fa, &irec);
306 *bno = irec.ar_startblock;
307 *len = irec.ar_blockcount;
312 * Compute aligned version of the found extent.
313 * Takes alignment and min length into account.
316 xfs_alloc_compute_aligned(
317 xfs_alloc_arg_t *args, /* allocation argument structure */
318 xfs_agblock_t foundbno, /* starting block in found extent */
319 xfs_extlen_t foundlen, /* length in found extent */
320 xfs_agblock_t *resbno, /* result block number */
321 xfs_extlen_t *reslen, /* result length */
324 xfs_agblock_t bno = foundbno;
325 xfs_extlen_t len = foundlen;
329 /* Trim busy sections out of found extent */
330 busy = xfs_extent_busy_trim(args, &bno, &len, busy_gen);
333 * If we have a largish extent that happens to start before min_agbno,
334 * see if we can shift it into range...
336 if (bno < args->min_agbno && bno + len > args->min_agbno) {
337 diff = args->min_agbno - bno;
344 if (args->alignment > 1 && len >= args->minlen) {
345 xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
347 diff = aligned_bno - bno;
349 *resbno = aligned_bno;
350 *reslen = diff >= len ? 0 : len - diff;
360 * Compute best start block and diff for "near" allocations.
361 * freelen >= wantlen already checked by caller.
363 STATIC xfs_extlen_t /* difference value (absolute) */
364 xfs_alloc_compute_diff(
365 xfs_agblock_t wantbno, /* target starting block */
366 xfs_extlen_t wantlen, /* target length */
367 xfs_extlen_t alignment, /* target alignment */
368 int datatype, /* are we allocating data? */
369 xfs_agblock_t freebno, /* freespace's starting block */
370 xfs_extlen_t freelen, /* freespace's length */
371 xfs_agblock_t *newbnop) /* result: best start block from free */
373 xfs_agblock_t freeend; /* end of freespace extent */
374 xfs_agblock_t newbno1; /* return block number */
375 xfs_agblock_t newbno2; /* other new block number */
376 xfs_extlen_t newlen1=0; /* length with newbno1 */
377 xfs_extlen_t newlen2=0; /* length with newbno2 */
378 xfs_agblock_t wantend; /* end of target extent */
379 bool userdata = datatype & XFS_ALLOC_USERDATA;
381 ASSERT(freelen >= wantlen);
382 freeend = freebno + freelen;
383 wantend = wantbno + wantlen;
385 * We want to allocate from the start of a free extent if it is past
386 * the desired block or if we are allocating user data and the free
387 * extent is before desired block. The second case is there to allow
388 * for contiguous allocation from the remaining free space if the file
389 * grows in the short term.
391 if (freebno >= wantbno || (userdata && freeend < wantend)) {
392 if ((newbno1 = roundup(freebno, alignment)) >= freeend)
393 newbno1 = NULLAGBLOCK;
394 } else if (freeend >= wantend && alignment > 1) {
395 newbno1 = roundup(wantbno, alignment);
396 newbno2 = newbno1 - alignment;
397 if (newbno1 >= freeend)
398 newbno1 = NULLAGBLOCK;
400 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
401 if (newbno2 < freebno)
402 newbno2 = NULLAGBLOCK;
404 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
405 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
406 if (newlen1 < newlen2 ||
407 (newlen1 == newlen2 &&
408 XFS_ABSDIFF(newbno1, wantbno) >
409 XFS_ABSDIFF(newbno2, wantbno)))
411 } else if (newbno2 != NULLAGBLOCK)
413 } else if (freeend >= wantend) {
415 } else if (alignment > 1) {
416 newbno1 = roundup(freeend - wantlen, alignment);
417 if (newbno1 > freeend - wantlen &&
418 newbno1 - alignment >= freebno)
419 newbno1 -= alignment;
420 else if (newbno1 >= freeend)
421 newbno1 = NULLAGBLOCK;
423 newbno1 = freeend - wantlen;
425 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
429 * Fix up the length, based on mod and prod.
430 * len should be k * prod + mod for some k.
431 * If len is too small it is returned unchanged.
432 * If len hits maxlen it is left alone.
436 xfs_alloc_arg_t *args) /* allocation argument structure */
441 ASSERT(args->mod < args->prod);
443 ASSERT(rlen >= args->minlen);
444 ASSERT(rlen <= args->maxlen);
445 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
446 (args->mod == 0 && rlen < args->prod))
448 k = rlen % args->prod;
452 rlen = rlen - (k - args->mod);
454 rlen = rlen - args->prod + (args->mod - k);
455 /* casts to (int) catch length underflows */
456 if ((int)rlen < (int)args->minlen)
458 ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
459 ASSERT(rlen % args->prod == args->mod);
460 ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
461 rlen + args->minleft);
466 * Update the two btrees, logically removing from freespace the extent
467 * starting at rbno, rlen blocks. The extent is contained within the
468 * actual (current) free extent fbno for flen blocks.
469 * Flags are passed in indicating whether the cursors are set to the
472 STATIC int /* error code */
473 xfs_alloc_fixup_trees(
474 struct xfs_btree_cur *cnt_cur, /* cursor for by-size btree */
475 struct xfs_btree_cur *bno_cur, /* cursor for by-block btree */
476 xfs_agblock_t fbno, /* starting block of free extent */
477 xfs_extlen_t flen, /* length of free extent */
478 xfs_agblock_t rbno, /* starting block of returned extent */
479 xfs_extlen_t rlen, /* length of returned extent */
480 int flags) /* flags, XFSA_FIXUP_... */
482 int error; /* error code */
483 int i; /* operation results */
484 xfs_agblock_t nfbno1; /* first new free startblock */
485 xfs_agblock_t nfbno2; /* second new free startblock */
486 xfs_extlen_t nflen1=0; /* first new free length */
487 xfs_extlen_t nflen2=0; /* second new free length */
488 struct xfs_mount *mp;
493 * Look up the record in the by-size tree if necessary.
495 if (flags & XFSA_FIXUP_CNT_OK) {
497 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
499 if (XFS_IS_CORRUPT(mp,
503 return -EFSCORRUPTED;
506 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
508 if (XFS_IS_CORRUPT(mp, i != 1))
509 return -EFSCORRUPTED;
512 * Look up the record in the by-block tree if necessary.
514 if (flags & XFSA_FIXUP_BNO_OK) {
516 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
518 if (XFS_IS_CORRUPT(mp,
522 return -EFSCORRUPTED;
525 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
527 if (XFS_IS_CORRUPT(mp, i != 1))
528 return -EFSCORRUPTED;
532 if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
533 struct xfs_btree_block *bnoblock;
534 struct xfs_btree_block *cntblock;
536 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_levels[0].bp);
537 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_levels[0].bp);
539 if (XFS_IS_CORRUPT(mp,
540 bnoblock->bb_numrecs !=
541 cntblock->bb_numrecs))
542 return -EFSCORRUPTED;
547 * Deal with all four cases: the allocated record is contained
548 * within the freespace record, so we can have new freespace
549 * at either (or both) end, or no freespace remaining.
551 if (rbno == fbno && rlen == flen)
552 nfbno1 = nfbno2 = NULLAGBLOCK;
553 else if (rbno == fbno) {
554 nfbno1 = rbno + rlen;
555 nflen1 = flen - rlen;
556 nfbno2 = NULLAGBLOCK;
557 } else if (rbno + rlen == fbno + flen) {
559 nflen1 = flen - rlen;
560 nfbno2 = NULLAGBLOCK;
563 nflen1 = rbno - fbno;
564 nfbno2 = rbno + rlen;
565 nflen2 = (fbno + flen) - nfbno2;
568 * Delete the entry from the by-size btree.
570 if ((error = xfs_btree_delete(cnt_cur, &i)))
572 if (XFS_IS_CORRUPT(mp, i != 1))
573 return -EFSCORRUPTED;
575 * Add new by-size btree entry(s).
577 if (nfbno1 != NULLAGBLOCK) {
578 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
580 if (XFS_IS_CORRUPT(mp, i != 0))
581 return -EFSCORRUPTED;
582 if ((error = xfs_btree_insert(cnt_cur, &i)))
584 if (XFS_IS_CORRUPT(mp, i != 1))
585 return -EFSCORRUPTED;
587 if (nfbno2 != NULLAGBLOCK) {
588 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
590 if (XFS_IS_CORRUPT(mp, i != 0))
591 return -EFSCORRUPTED;
592 if ((error = xfs_btree_insert(cnt_cur, &i)))
594 if (XFS_IS_CORRUPT(mp, i != 1))
595 return -EFSCORRUPTED;
598 * Fix up the by-block btree entry(s).
600 if (nfbno1 == NULLAGBLOCK) {
602 * No remaining freespace, just delete the by-block tree entry.
604 if ((error = xfs_btree_delete(bno_cur, &i)))
606 if (XFS_IS_CORRUPT(mp, i != 1))
607 return -EFSCORRUPTED;
610 * Update the by-block entry to start later|be shorter.
612 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
615 if (nfbno2 != NULLAGBLOCK) {
617 * 2 resulting free entries, need to add one.
619 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
621 if (XFS_IS_CORRUPT(mp, i != 0))
622 return -EFSCORRUPTED;
623 if ((error = xfs_btree_insert(bno_cur, &i)))
625 if (XFS_IS_CORRUPT(mp, i != 1))
626 return -EFSCORRUPTED;
632 * We do not verify the AGFL contents against AGF-based index counters here,
633 * even though we may have access to the perag that contains shadow copies. We
634 * don't know if the AGF based counters have been checked, and if they have they
635 * still may be inconsistent because they haven't yet been reset on the first
636 * allocation after the AGF has been read in.
638 * This means we can only check that all agfl entries contain valid or null
639 * values because we can't reliably determine the active range to exclude
640 * NULLAGBNO as a valid value.
642 * However, we can't even do that for v4 format filesystems because there are
643 * old versions of mkfs out there that does not initialise the AGFL to known,
644 * verifiable values. HEnce we can't tell the difference between a AGFL block
645 * allocated by mkfs and a corrupted AGFL block here on v4 filesystems.
647 * As a result, we can only fully validate AGFL block numbers when we pull them
648 * from the freelist in xfs_alloc_get_freelist().
650 static xfs_failaddr_t
654 struct xfs_mount *mp = bp->b_mount;
655 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
656 __be32 *agfl_bno = xfs_buf_to_agfl_bno(bp);
659 if (!xfs_has_crc(mp))
662 if (!xfs_verify_magic(bp, agfl->agfl_magicnum))
663 return __this_address;
664 if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
665 return __this_address;
667 * during growfs operations, the perag is not fully initialised,
668 * so we can't use it for any useful checking. growfs ensures we can't
669 * use it by using uncached buffers that don't have the perag attached
670 * so we can detect and avoid this problem.
672 if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
673 return __this_address;
675 for (i = 0; i < xfs_agfl_size(mp); i++) {
676 if (be32_to_cpu(agfl_bno[i]) != NULLAGBLOCK &&
677 be32_to_cpu(agfl_bno[i]) >= mp->m_sb.sb_agblocks)
678 return __this_address;
681 if (!xfs_log_check_lsn(mp, be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn)))
682 return __this_address;
687 xfs_agfl_read_verify(
690 struct xfs_mount *mp = bp->b_mount;
694 * There is no verification of non-crc AGFLs because mkfs does not
695 * initialise the AGFL to zero or NULL. Hence the only valid part of the
696 * AGFL is what the AGF says is active. We can't get to the AGF, so we
697 * can't verify just those entries are valid.
699 if (!xfs_has_crc(mp))
702 if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
703 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
705 fa = xfs_agfl_verify(bp);
707 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
712 xfs_agfl_write_verify(
715 struct xfs_mount *mp = bp->b_mount;
716 struct xfs_buf_log_item *bip = bp->b_log_item;
719 /* no verification of non-crc AGFLs */
720 if (!xfs_has_crc(mp))
723 fa = xfs_agfl_verify(bp);
725 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
730 XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
732 xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
735 const struct xfs_buf_ops xfs_agfl_buf_ops = {
737 .magic = { cpu_to_be32(XFS_AGFL_MAGIC), cpu_to_be32(XFS_AGFL_MAGIC) },
738 .verify_read = xfs_agfl_read_verify,
739 .verify_write = xfs_agfl_write_verify,
740 .verify_struct = xfs_agfl_verify,
744 * Read in the allocation group free block array.
748 struct xfs_perag *pag,
749 struct xfs_trans *tp,
750 struct xfs_buf **bpp)
752 struct xfs_mount *mp = pag->pag_mount;
756 error = xfs_trans_read_buf(
757 mp, tp, mp->m_ddev_targp,
758 XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGFL_DADDR(mp)),
759 XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
762 xfs_buf_set_ref(bp, XFS_AGFL_REF);
768 xfs_alloc_update_counters(
769 struct xfs_trans *tp,
770 struct xfs_buf *agbp,
773 struct xfs_agf *agf = agbp->b_addr;
775 agbp->b_pag->pagf_freeblks += len;
776 be32_add_cpu(&agf->agf_freeblks, len);
778 if (unlikely(be32_to_cpu(agf->agf_freeblks) >
779 be32_to_cpu(agf->agf_length))) {
780 xfs_buf_mark_corrupt(agbp);
781 return -EFSCORRUPTED;
784 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
789 * Block allocation algorithm and data structures.
791 struct xfs_alloc_cur {
792 struct xfs_btree_cur *cnt; /* btree cursors */
793 struct xfs_btree_cur *bnolt;
794 struct xfs_btree_cur *bnogt;
795 xfs_extlen_t cur_len;/* current search length */
796 xfs_agblock_t rec_bno;/* extent startblock */
797 xfs_extlen_t rec_len;/* extent length */
798 xfs_agblock_t bno; /* alloc bno */
799 xfs_extlen_t len; /* alloc len */
800 xfs_extlen_t diff; /* diff from search bno */
801 unsigned int busy_gen;/* busy state */
806 * Set up cursors, etc. in the extent allocation cursor. This function can be
807 * called multiple times to reset an initialized structure without having to
808 * reallocate cursors.
812 struct xfs_alloc_arg *args,
813 struct xfs_alloc_cur *acur)
818 acur->cur_len = args->maxlen;
828 * Perform an initial cntbt lookup to check for availability of maxlen
829 * extents. If this fails, we'll return -ENOSPC to signal the caller to
830 * attempt a small allocation.
833 acur->cnt = xfs_allocbt_init_cursor(args->mp, args->tp,
834 args->agbp, args->pag, XFS_BTNUM_CNT);
835 error = xfs_alloc_lookup_ge(acur->cnt, 0, args->maxlen, &i);
840 * Allocate the bnobt left and right search cursors.
843 acur->bnolt = xfs_allocbt_init_cursor(args->mp, args->tp,
844 args->agbp, args->pag, XFS_BTNUM_BNO);
846 acur->bnogt = xfs_allocbt_init_cursor(args->mp, args->tp,
847 args->agbp, args->pag, XFS_BTNUM_BNO);
848 return i == 1 ? 0 : -ENOSPC;
853 struct xfs_alloc_cur *acur,
856 int cur_error = XFS_BTREE_NOERROR;
859 cur_error = XFS_BTREE_ERROR;
862 xfs_btree_del_cursor(acur->cnt, cur_error);
864 xfs_btree_del_cursor(acur->bnolt, cur_error);
866 xfs_btree_del_cursor(acur->bnogt, cur_error);
867 acur->cnt = acur->bnolt = acur->bnogt = NULL;
871 * Check an extent for allocation and track the best available candidate in the
872 * allocation structure. The cursor is deactivated if it has entered an out of
873 * range state based on allocation arguments. Optionally return the extent
874 * extent geometry and allocation status if requested by the caller.
878 struct xfs_alloc_arg *args,
879 struct xfs_alloc_cur *acur,
880 struct xfs_btree_cur *cur,
884 xfs_agblock_t bno, bnoa, bnew;
885 xfs_extlen_t len, lena, diff = -1;
887 unsigned busy_gen = 0;
888 bool deactivate = false;
889 bool isbnobt = cur->bc_btnum == XFS_BTNUM_BNO;
893 error = xfs_alloc_get_rec(cur, &bno, &len, &i);
896 if (XFS_IS_CORRUPT(args->mp, i != 1))
897 return -EFSCORRUPTED;
900 * Check minlen and deactivate a cntbt cursor if out of acceptable size
901 * range (i.e., walking backwards looking for a minlen extent).
903 if (len < args->minlen) {
904 deactivate = !isbnobt;
908 busy = xfs_alloc_compute_aligned(args, bno, len, &bnoa, &lena,
912 acur->busy_gen = busy_gen;
913 /* deactivate a bnobt cursor outside of locality range */
914 if (bnoa < args->min_agbno || bnoa > args->max_agbno) {
915 deactivate = isbnobt;
918 if (lena < args->minlen)
921 args->len = XFS_EXTLEN_MIN(lena, args->maxlen);
922 xfs_alloc_fix_len(args);
923 ASSERT(args->len >= args->minlen);
924 if (args->len < acur->len)
928 * We have an aligned record that satisfies minlen and beats or matches
929 * the candidate extent size. Compare locality for near allocation mode.
931 diff = xfs_alloc_compute_diff(args->agbno, args->len,
932 args->alignment, args->datatype,
934 if (bnew == NULLAGBLOCK)
938 * Deactivate a bnobt cursor with worse locality than the current best.
940 if (diff > acur->diff) {
941 deactivate = isbnobt;
945 ASSERT(args->len > acur->len ||
946 (args->len == acur->len && diff <= acur->diff));
950 acur->len = args->len;
955 * We're done if we found a perfect allocation. This only deactivates
956 * the current cursor, but this is just an optimization to terminate a
957 * cntbt search that otherwise runs to the edge of the tree.
959 if (acur->diff == 0 && acur->len == args->maxlen)
963 cur->bc_ag.abt.active = false;
964 trace_xfs_alloc_cur_check(args->mp, cur->bc_btnum, bno, len, diff,
970 * Complete an allocation of a candidate extent. Remove the extent from both
971 * trees and update the args structure.
974 xfs_alloc_cur_finish(
975 struct xfs_alloc_arg *args,
976 struct xfs_alloc_cur *acur)
978 struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
981 ASSERT(acur->cnt && acur->bnolt);
982 ASSERT(acur->bno >= acur->rec_bno);
983 ASSERT(acur->bno + acur->len <= acur->rec_bno + acur->rec_len);
984 ASSERT(acur->rec_bno + acur->rec_len <= be32_to_cpu(agf->agf_length));
986 error = xfs_alloc_fixup_trees(acur->cnt, acur->bnolt, acur->rec_bno,
987 acur->rec_len, acur->bno, acur->len, 0);
991 args->agbno = acur->bno;
992 args->len = acur->len;
995 trace_xfs_alloc_cur(args);
1000 * Locality allocation lookup algorithm. This expects a cntbt cursor and uses
1001 * bno optimized lookup to search for extents with ideal size and locality.
1004 xfs_alloc_cntbt_iter(
1005 struct xfs_alloc_arg *args,
1006 struct xfs_alloc_cur *acur)
1008 struct xfs_btree_cur *cur = acur->cnt;
1010 xfs_extlen_t len, cur_len;
1014 if (!xfs_alloc_cur_active(cur))
1017 /* locality optimized lookup */
1018 cur_len = acur->cur_len;
1019 error = xfs_alloc_lookup_ge(cur, args->agbno, cur_len, &i);
1024 error = xfs_alloc_get_rec(cur, &bno, &len, &i);
1028 /* check the current record and update search length from it */
1029 error = xfs_alloc_cur_check(args, acur, cur, &i);
1032 ASSERT(len >= acur->cur_len);
1033 acur->cur_len = len;
1036 * We looked up the first record >= [agbno, len] above. The agbno is a
1037 * secondary key and so the current record may lie just before or after
1038 * agbno. If it is past agbno, check the previous record too so long as
1039 * the length matches as it may be closer. Don't check a smaller record
1040 * because that could deactivate our cursor.
1042 if (bno > args->agbno) {
1043 error = xfs_btree_decrement(cur, 0, &i);
1045 error = xfs_alloc_get_rec(cur, &bno, &len, &i);
1046 if (!error && i && len == acur->cur_len)
1047 error = xfs_alloc_cur_check(args, acur, cur,
1055 * Increment the search key until we find at least one allocation
1056 * candidate or if the extent we found was larger. Otherwise, double the
1057 * search key to optimize the search. Efficiency is more important here
1058 * than absolute best locality.
1061 if (!acur->len || acur->cur_len >= cur_len)
1064 acur->cur_len = cur_len;
1070 * Deal with the case where only small freespaces remain. Either return the
1071 * contents of the last freespace record, or allocate space from the freelist if
1072 * there is nothing in the tree.
1074 STATIC int /* error */
1075 xfs_alloc_ag_vextent_small(
1076 struct xfs_alloc_arg *args, /* allocation argument structure */
1077 struct xfs_btree_cur *ccur, /* optional by-size cursor */
1078 xfs_agblock_t *fbnop, /* result block number */
1079 xfs_extlen_t *flenp, /* result length */
1080 int *stat) /* status: 0-freelist, 1-normal/none */
1082 struct xfs_agf *agf = args->agbp->b_addr;
1084 xfs_agblock_t fbno = NULLAGBLOCK;
1085 xfs_extlen_t flen = 0;
1089 * If a cntbt cursor is provided, try to allocate the largest record in
1090 * the tree. Try the AGFL if the cntbt is empty, otherwise fail the
1091 * allocation. Make sure to respect minleft even when pulling from the
1095 error = xfs_btree_decrement(ccur, 0, &i);
1099 error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i);
1102 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1103 error = -EFSCORRUPTED;
1109 if (args->minlen != 1 || args->alignment != 1 ||
1110 args->resv == XFS_AG_RESV_AGFL ||
1111 be32_to_cpu(agf->agf_flcount) <= args->minleft)
1114 error = xfs_alloc_get_freelist(args->pag, args->tp, args->agbp,
1118 if (fbno == NULLAGBLOCK)
1121 xfs_extent_busy_reuse(args->mp, args->pag, fbno, 1,
1122 (args->datatype & XFS_ALLOC_NOBUSY));
1124 if (args->datatype & XFS_ALLOC_USERDATA) {
1127 error = xfs_trans_get_buf(args->tp, args->mp->m_ddev_targp,
1128 XFS_AGB_TO_DADDR(args->mp, args->agno, fbno),
1129 args->mp->m_bsize, 0, &bp);
1132 xfs_trans_binval(args->tp, bp);
1134 *fbnop = args->agbno = fbno;
1135 *flenp = args->len = 1;
1136 if (XFS_IS_CORRUPT(args->mp, fbno >= be32_to_cpu(agf->agf_length))) {
1137 error = -EFSCORRUPTED;
1140 args->wasfromfl = 1;
1141 trace_xfs_alloc_small_freelist(args);
1144 * If we're feeding an AGFL block to something that doesn't live in the
1145 * free space, we need to clear out the OWN_AG rmap.
1147 error = xfs_rmap_free(args->tp, args->agbp, args->pag, fbno, 1,
1148 &XFS_RMAP_OINFO_AG);
1157 * Can't do the allocation, give up.
1159 if (flen < args->minlen) {
1160 args->agbno = NULLAGBLOCK;
1161 trace_xfs_alloc_small_notenough(args);
1167 trace_xfs_alloc_small_done(args);
1171 trace_xfs_alloc_small_error(args);
1176 * Allocate a variable extent at exactly agno/bno.
1177 * Extent's length (returned in *len) will be between minlen and maxlen,
1178 * and of the form k * prod + mod unless there's nothing that large.
1179 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
1181 STATIC int /* error */
1182 xfs_alloc_ag_vextent_exact(
1183 xfs_alloc_arg_t *args) /* allocation argument structure */
1185 struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
1186 struct xfs_btree_cur *bno_cur;/* by block-number btree cursor */
1187 struct xfs_btree_cur *cnt_cur;/* by count btree cursor */
1189 xfs_agblock_t fbno; /* start block of found extent */
1190 xfs_extlen_t flen; /* length of found extent */
1191 xfs_agblock_t tbno; /* start block of busy extent */
1192 xfs_extlen_t tlen; /* length of busy extent */
1193 xfs_agblock_t tend; /* end block of busy extent */
1194 int i; /* success/failure of operation */
1197 ASSERT(args->alignment == 1);
1200 * Allocate/initialize a cursor for the by-number freespace btree.
1202 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1203 args->pag, XFS_BTNUM_BNO);
1206 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
1207 * Look for the closest free block <= bno, it must contain bno
1208 * if any free block does.
1210 error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
1217 * Grab the freespace record.
1219 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
1222 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1223 error = -EFSCORRUPTED;
1226 ASSERT(fbno <= args->agbno);
1229 * Check for overlapping busy extents.
1233 xfs_extent_busy_trim(args, &tbno, &tlen, &busy_gen);
1236 * Give up if the start of the extent is busy, or the freespace isn't
1237 * long enough for the minimum request.
1239 if (tbno > args->agbno)
1241 if (tlen < args->minlen)
1244 if (tend < args->agbno + args->minlen)
1248 * End of extent will be smaller of the freespace end and the
1249 * maximal requested end.
1251 * Fix the length according to mod and prod if given.
1253 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
1255 xfs_alloc_fix_len(args);
1256 ASSERT(args->agbno + args->len <= tend);
1259 * We are allocating agbno for args->len
1260 * Allocate/initialize a cursor for the by-size btree.
1262 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1263 args->pag, XFS_BTNUM_CNT);
1264 ASSERT(args->agbno + args->len <= be32_to_cpu(agf->agf_length));
1265 error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
1266 args->len, XFSA_FIXUP_BNO_OK);
1268 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1272 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1273 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1275 args->wasfromfl = 0;
1276 trace_xfs_alloc_exact_done(args);
1280 /* Didn't find it, return null. */
1281 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1282 args->agbno = NULLAGBLOCK;
1283 trace_xfs_alloc_exact_notfound(args);
1287 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1288 trace_xfs_alloc_exact_error(args);
1293 * Search a given number of btree records in a given direction. Check each
1294 * record against the good extent we've already found.
1297 xfs_alloc_walk_iter(
1298 struct xfs_alloc_arg *args,
1299 struct xfs_alloc_cur *acur,
1300 struct xfs_btree_cur *cur,
1302 bool find_one, /* quit on first candidate */
1303 int count, /* rec count (-1 for infinite) */
1312 * Search so long as the cursor is active or we find a better extent.
1313 * The cursor is deactivated if it extends beyond the range of the
1314 * current allocation candidate.
1316 while (xfs_alloc_cur_active(cur) && count) {
1317 error = xfs_alloc_cur_check(args, acur, cur, &i);
1325 if (!xfs_alloc_cur_active(cur))
1329 error = xfs_btree_increment(cur, 0, &i);
1331 error = xfs_btree_decrement(cur, 0, &i);
1335 cur->bc_ag.abt.active = false;
1345 * Search the by-bno and by-size btrees in parallel in search of an extent with
1346 * ideal locality based on the NEAR mode ->agbno locality hint.
1349 xfs_alloc_ag_vextent_locality(
1350 struct xfs_alloc_arg *args,
1351 struct xfs_alloc_cur *acur,
1354 struct xfs_btree_cur *fbcur = NULL;
1359 ASSERT(acur->len == 0);
1363 error = xfs_alloc_lookup_ge(acur->cnt, args->agbno, acur->cur_len, &i);
1366 error = xfs_alloc_lookup_le(acur->bnolt, args->agbno, 0, &i);
1369 error = xfs_alloc_lookup_ge(acur->bnogt, args->agbno, 0, &i);
1374 * Search the bnobt and cntbt in parallel. Search the bnobt left and
1375 * right and lookup the closest extent to the locality hint for each
1376 * extent size key in the cntbt. The entire search terminates
1377 * immediately on a bnobt hit because that means we've found best case
1378 * locality. Otherwise the search continues until the cntbt cursor runs
1379 * off the end of the tree. If no allocation candidate is found at this
1380 * point, give up on locality, walk backwards from the end of the cntbt
1381 * and take the first available extent.
1383 * The parallel tree searches balance each other out to provide fairly
1384 * consistent performance for various situations. The bnobt search can
1385 * have pathological behavior in the worst case scenario of larger
1386 * allocation requests and fragmented free space. On the other hand, the
1387 * bnobt is able to satisfy most smaller allocation requests much more
1388 * quickly than the cntbt. The cntbt search can sift through fragmented
1389 * free space and sets of free extents for larger allocation requests
1390 * more quickly than the bnobt. Since the locality hint is just a hint
1391 * and we don't want to scan the entire bnobt for perfect locality, the
1392 * cntbt search essentially bounds the bnobt search such that we can
1393 * find good enough locality at reasonable performance in most cases.
1395 while (xfs_alloc_cur_active(acur->bnolt) ||
1396 xfs_alloc_cur_active(acur->bnogt) ||
1397 xfs_alloc_cur_active(acur->cnt)) {
1399 trace_xfs_alloc_cur_lookup(args);
1402 * Search the bnobt left and right. In the case of a hit, finish
1403 * the search in the opposite direction and we're done.
1405 error = xfs_alloc_walk_iter(args, acur, acur->bnolt, false,
1410 trace_xfs_alloc_cur_left(args);
1411 fbcur = acur->bnogt;
1415 error = xfs_alloc_walk_iter(args, acur, acur->bnogt, true, true,
1420 trace_xfs_alloc_cur_right(args);
1421 fbcur = acur->bnolt;
1427 * Check the extent with best locality based on the current
1428 * extent size search key and keep track of the best candidate.
1430 error = xfs_alloc_cntbt_iter(args, acur);
1433 if (!xfs_alloc_cur_active(acur->cnt)) {
1434 trace_xfs_alloc_cur_lookup_done(args);
1440 * If we failed to find anything due to busy extents, return empty
1441 * handed so the caller can flush and retry. If no busy extents were
1442 * found, walk backwards from the end of the cntbt as a last resort.
1444 if (!xfs_alloc_cur_active(acur->cnt) && !acur->len && !acur->busy) {
1445 error = xfs_btree_decrement(acur->cnt, 0, &i);
1449 acur->cnt->bc_ag.abt.active = true;
1456 * Search in the opposite direction for a better entry in the case of
1457 * a bnobt hit or walk backwards from the end of the cntbt.
1460 error = xfs_alloc_walk_iter(args, acur, fbcur, fbinc, true, -1,
1472 /* Check the last block of the cnt btree for allocations. */
1474 xfs_alloc_ag_vextent_lastblock(
1475 struct xfs_alloc_arg *args,
1476 struct xfs_alloc_cur *acur,
1485 /* Randomly don't execute the first algorithm. */
1486 if (get_random_u32_below(2))
1491 * Start from the entry that lookup found, sequence through all larger
1492 * free blocks. If we're actually pointing at a record smaller than
1493 * maxlen, go to the start of this block, and skip all those smaller
1496 if (*len || args->alignment > 1) {
1497 acur->cnt->bc_levels[0].ptr = 1;
1499 error = xfs_alloc_get_rec(acur->cnt, bno, len, &i);
1502 if (XFS_IS_CORRUPT(args->mp, i != 1))
1503 return -EFSCORRUPTED;
1504 if (*len >= args->minlen)
1506 error = xfs_btree_increment(acur->cnt, 0, &i);
1510 ASSERT(*len >= args->minlen);
1515 error = xfs_alloc_walk_iter(args, acur, acur->cnt, true, false, -1, &i);
1520 * It didn't work. We COULD be in a case where there's a good record
1521 * somewhere, so try again.
1526 trace_xfs_alloc_near_first(args);
1532 * Allocate a variable extent near bno in the allocation group agno.
1533 * Extent's length (returned in len) will be between minlen and maxlen,
1534 * and of the form k * prod + mod unless there's nothing that large.
1535 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1538 xfs_alloc_ag_vextent_near(
1539 struct xfs_alloc_arg *args,
1540 uint32_t alloc_flags)
1542 struct xfs_alloc_cur acur = {};
1543 int error; /* error code */
1544 int i; /* result code, temporary */
1548 /* handle uninitialized agbno range so caller doesn't have to */
1549 if (!args->min_agbno && !args->max_agbno)
1550 args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
1551 ASSERT(args->min_agbno <= args->max_agbno);
1553 /* clamp agbno to the range if it's outside */
1554 if (args->agbno < args->min_agbno)
1555 args->agbno = args->min_agbno;
1556 if (args->agbno > args->max_agbno)
1557 args->agbno = args->max_agbno;
1559 /* Retry once quickly if we find busy extents before blocking. */
1560 alloc_flags |= XFS_ALLOC_FLAG_TRYFLUSH;
1565 * Set up cursors and see if there are any free extents as big as
1566 * maxlen. If not, pick the last entry in the tree unless the tree is
1569 error = xfs_alloc_cur_setup(args, &acur);
1570 if (error == -ENOSPC) {
1571 error = xfs_alloc_ag_vextent_small(args, acur.cnt, &bno,
1575 if (i == 0 || len == 0) {
1576 trace_xfs_alloc_near_noentry(args);
1586 * If the requested extent is large wrt the freespaces available
1587 * in this a.g., then the cursor will be pointing to a btree entry
1588 * near the right edge of the tree. If it's in the last btree leaf
1589 * block, then we just examine all the entries in that block
1590 * that are big enough, and pick the best one.
1592 if (xfs_btree_islastblock(acur.cnt, 0)) {
1593 bool allocated = false;
1595 error = xfs_alloc_ag_vextent_lastblock(args, &acur, &bno, &len,
1604 * Second algorithm. Combined cntbt and bnobt search to find ideal
1607 error = xfs_alloc_ag_vextent_locality(args, &acur, &i);
1612 * If we couldn't get anything, give up.
1617 * Our only valid extents must have been busy. Flush and
1618 * retry the allocation again. If we get an -EAGAIN
1619 * error, we're being told that a deadlock was avoided
1620 * and the current transaction needs committing before
1621 * the allocation can be retried.
1623 trace_xfs_alloc_near_busy(args);
1624 error = xfs_extent_busy_flush(args->tp, args->pag,
1625 acur.busy_gen, alloc_flags);
1629 alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH;
1632 trace_xfs_alloc_size_neither(args);
1633 args->agbno = NULLAGBLOCK;
1638 /* fix up btrees on a successful allocation */
1639 error = xfs_alloc_cur_finish(args, &acur);
1642 xfs_alloc_cur_close(&acur, error);
1647 * Allocate a variable extent anywhere in the allocation group agno.
1648 * Extent's length (returned in len) will be between minlen and maxlen,
1649 * and of the form k * prod + mod unless there's nothing that large.
1650 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1653 xfs_alloc_ag_vextent_size(
1654 struct xfs_alloc_arg *args,
1655 uint32_t alloc_flags)
1657 struct xfs_agf *agf = args->agbp->b_addr;
1658 struct xfs_btree_cur *bno_cur;
1659 struct xfs_btree_cur *cnt_cur;
1660 xfs_agblock_t fbno; /* start of found freespace */
1661 xfs_extlen_t flen; /* length of found freespace */
1662 xfs_agblock_t rbno; /* returned block number */
1663 xfs_extlen_t rlen; /* length of returned extent */
1669 /* Retry once quickly if we find busy extents before blocking. */
1670 alloc_flags |= XFS_ALLOC_FLAG_TRYFLUSH;
1673 * Allocate and initialize a cursor for the by-size btree.
1675 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1676 args->pag, XFS_BTNUM_CNT);
1680 * Look for an entry >= maxlen+alignment-1 blocks.
1682 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1683 args->maxlen + args->alignment - 1, &i)))
1687 * If none then we have to settle for a smaller extent. In the case that
1688 * there are no large extents, this will return the last entry in the
1689 * tree unless the tree is empty. In the case that there are only busy
1690 * large extents, this will return the largest small extent unless there
1691 * are no smaller extents available.
1694 error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1698 if (i == 0 || flen == 0) {
1699 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1700 trace_xfs_alloc_size_noentry(args);
1704 busy = xfs_alloc_compute_aligned(args, fbno, flen, &rbno,
1708 * Search for a non-busy extent that is large enough.
1711 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1714 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1715 error = -EFSCORRUPTED;
1719 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1720 &rbno, &rlen, &busy_gen);
1722 if (rlen >= args->maxlen)
1725 error = xfs_btree_increment(cnt_cur, 0, &i);
1732 * Our only valid extents must have been busy. Flush and
1733 * retry the allocation again. If we get an -EAGAIN
1734 * error, we're being told that a deadlock was avoided
1735 * and the current transaction needs committing before
1736 * the allocation can be retried.
1738 trace_xfs_alloc_size_busy(args);
1739 error = xfs_extent_busy_flush(args->tp, args->pag,
1740 busy_gen, alloc_flags);
1744 alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH;
1745 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1751 * In the first case above, we got the last entry in the
1752 * by-size btree. Now we check to see if the space hits maxlen
1753 * once aligned; if not, we search left for something better.
1754 * This can't happen in the second case above.
1756 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1757 if (XFS_IS_CORRUPT(args->mp,
1760 rbno + rlen > fbno + flen))) {
1761 error = -EFSCORRUPTED;
1764 if (rlen < args->maxlen) {
1765 xfs_agblock_t bestfbno;
1766 xfs_extlen_t bestflen;
1767 xfs_agblock_t bestrbno;
1768 xfs_extlen_t bestrlen;
1775 if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1779 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1782 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1783 error = -EFSCORRUPTED;
1786 if (flen < bestrlen)
1788 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1789 &rbno, &rlen, &busy_gen);
1790 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1791 if (XFS_IS_CORRUPT(args->mp,
1794 rbno + rlen > fbno + flen))) {
1795 error = -EFSCORRUPTED;
1798 if (rlen > bestrlen) {
1803 if (rlen == args->maxlen)
1807 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1810 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1811 error = -EFSCORRUPTED;
1819 args->wasfromfl = 0;
1821 * Fix up the length.
1824 if (rlen < args->minlen) {
1827 * Our only valid extents must have been busy. Flush and
1828 * retry the allocation again. If we get an -EAGAIN
1829 * error, we're being told that a deadlock was avoided
1830 * and the current transaction needs committing before
1831 * the allocation can be retried.
1833 trace_xfs_alloc_size_busy(args);
1834 error = xfs_extent_busy_flush(args->tp, args->pag,
1835 busy_gen, alloc_flags);
1839 alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH;
1840 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1845 xfs_alloc_fix_len(args);
1848 if (XFS_IS_CORRUPT(args->mp, rlen > flen)) {
1849 error = -EFSCORRUPTED;
1853 * Allocate and initialize a cursor for the by-block tree.
1855 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1856 args->pag, XFS_BTNUM_BNO);
1857 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1858 rbno, rlen, XFSA_FIXUP_CNT_OK)))
1860 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1861 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1862 cnt_cur = bno_cur = NULL;
1865 if (XFS_IS_CORRUPT(args->mp,
1866 args->agbno + args->len >
1867 be32_to_cpu(agf->agf_length))) {
1868 error = -EFSCORRUPTED;
1871 trace_xfs_alloc_size_done(args);
1875 trace_xfs_alloc_size_error(args);
1877 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1879 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1883 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1884 trace_xfs_alloc_size_nominleft(args);
1885 args->agbno = NULLAGBLOCK;
1890 * Free the extent starting at agno/bno for length.
1894 struct xfs_trans *tp,
1895 struct xfs_buf *agbp,
1896 xfs_agnumber_t agno,
1899 const struct xfs_owner_info *oinfo,
1900 enum xfs_ag_resv_type type)
1902 struct xfs_mount *mp;
1903 struct xfs_btree_cur *bno_cur;
1904 struct xfs_btree_cur *cnt_cur;
1905 xfs_agblock_t gtbno; /* start of right neighbor */
1906 xfs_extlen_t gtlen; /* length of right neighbor */
1907 xfs_agblock_t ltbno; /* start of left neighbor */
1908 xfs_extlen_t ltlen; /* length of left neighbor */
1909 xfs_agblock_t nbno; /* new starting block of freesp */
1910 xfs_extlen_t nlen; /* new length of freespace */
1911 int haveleft; /* have a left neighbor */
1912 int haveright; /* have a right neighbor */
1915 struct xfs_perag *pag = agbp->b_pag;
1917 bno_cur = cnt_cur = NULL;
1920 if (!xfs_rmap_should_skip_owner_update(oinfo)) {
1921 error = xfs_rmap_free(tp, agbp, pag, bno, len, oinfo);
1927 * Allocate and initialize a cursor for the by-block btree.
1929 bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_BNO);
1931 * Look for a neighboring block on the left (lower block numbers)
1932 * that is contiguous with this space.
1934 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1938 * There is a block to our left.
1940 if ((error = xfs_alloc_get_rec(bno_cur, <bno, <len, &i)))
1942 if (XFS_IS_CORRUPT(mp, i != 1)) {
1943 error = -EFSCORRUPTED;
1947 * It's not contiguous, though.
1949 if (ltbno + ltlen < bno)
1953 * If this failure happens the request to free this
1954 * space was invalid, it's (partly) already free.
1957 if (XFS_IS_CORRUPT(mp, ltbno + ltlen > bno)) {
1958 error = -EFSCORRUPTED;
1964 * Look for a neighboring block on the right (higher block numbers)
1965 * that is contiguous with this space.
1967 if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
1971 * There is a block to our right.
1973 if ((error = xfs_alloc_get_rec(bno_cur, >bno, >len, &i)))
1975 if (XFS_IS_CORRUPT(mp, i != 1)) {
1976 error = -EFSCORRUPTED;
1980 * It's not contiguous, though.
1982 if (bno + len < gtbno)
1986 * If this failure happens the request to free this
1987 * space was invalid, it's (partly) already free.
1990 if (XFS_IS_CORRUPT(mp, bno + len > gtbno)) {
1991 error = -EFSCORRUPTED;
1997 * Now allocate and initialize a cursor for the by-size tree.
1999 cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_CNT);
2001 * Have both left and right contiguous neighbors.
2002 * Merge all three into a single free block.
2004 if (haveleft && haveright) {
2006 * Delete the old by-size entry on the left.
2008 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
2010 if (XFS_IS_CORRUPT(mp, i != 1)) {
2011 error = -EFSCORRUPTED;
2014 if ((error = xfs_btree_delete(cnt_cur, &i)))
2016 if (XFS_IS_CORRUPT(mp, i != 1)) {
2017 error = -EFSCORRUPTED;
2021 * Delete the old by-size entry on the right.
2023 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
2025 if (XFS_IS_CORRUPT(mp, i != 1)) {
2026 error = -EFSCORRUPTED;
2029 if ((error = xfs_btree_delete(cnt_cur, &i)))
2031 if (XFS_IS_CORRUPT(mp, i != 1)) {
2032 error = -EFSCORRUPTED;
2036 * Delete the old by-block entry for the right block.
2038 if ((error = xfs_btree_delete(bno_cur, &i)))
2040 if (XFS_IS_CORRUPT(mp, i != 1)) {
2041 error = -EFSCORRUPTED;
2045 * Move the by-block cursor back to the left neighbor.
2047 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
2049 if (XFS_IS_CORRUPT(mp, i != 1)) {
2050 error = -EFSCORRUPTED;
2055 * Check that this is the right record: delete didn't
2056 * mangle the cursor.
2059 xfs_agblock_t xxbno;
2062 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
2065 if (XFS_IS_CORRUPT(mp,
2069 error = -EFSCORRUPTED;
2075 * Update remaining by-block entry to the new, joined block.
2078 nlen = len + ltlen + gtlen;
2079 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2083 * Have only a left contiguous neighbor.
2084 * Merge it together with the new freespace.
2086 else if (haveleft) {
2088 * Delete the old by-size entry on the left.
2090 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
2092 if (XFS_IS_CORRUPT(mp, i != 1)) {
2093 error = -EFSCORRUPTED;
2096 if ((error = xfs_btree_delete(cnt_cur, &i)))
2098 if (XFS_IS_CORRUPT(mp, i != 1)) {
2099 error = -EFSCORRUPTED;
2103 * Back up the by-block cursor to the left neighbor, and
2104 * update its length.
2106 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
2108 if (XFS_IS_CORRUPT(mp, i != 1)) {
2109 error = -EFSCORRUPTED;
2114 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2118 * Have only a right contiguous neighbor.
2119 * Merge it together with the new freespace.
2121 else if (haveright) {
2123 * Delete the old by-size entry on the right.
2125 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
2127 if (XFS_IS_CORRUPT(mp, i != 1)) {
2128 error = -EFSCORRUPTED;
2131 if ((error = xfs_btree_delete(cnt_cur, &i)))
2133 if (XFS_IS_CORRUPT(mp, i != 1)) {
2134 error = -EFSCORRUPTED;
2138 * Update the starting block and length of the right
2139 * neighbor in the by-block tree.
2143 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2147 * No contiguous neighbors.
2148 * Insert the new freespace into the by-block tree.
2153 if ((error = xfs_btree_insert(bno_cur, &i)))
2155 if (XFS_IS_CORRUPT(mp, i != 1)) {
2156 error = -EFSCORRUPTED;
2160 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
2163 * In all cases we need to insert the new freespace in the by-size tree.
2165 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
2167 if (XFS_IS_CORRUPT(mp, i != 0)) {
2168 error = -EFSCORRUPTED;
2171 if ((error = xfs_btree_insert(cnt_cur, &i)))
2173 if (XFS_IS_CORRUPT(mp, i != 1)) {
2174 error = -EFSCORRUPTED;
2177 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
2181 * Update the freespace totals in the ag and superblock.
2183 error = xfs_alloc_update_counters(tp, agbp, len);
2184 xfs_ag_resv_free_extent(agbp->b_pag, type, tp, len);
2188 XFS_STATS_INC(mp, xs_freex);
2189 XFS_STATS_ADD(mp, xs_freeb, len);
2191 trace_xfs_free_extent(mp, agno, bno, len, type, haveleft, haveright);
2196 trace_xfs_free_extent(mp, agno, bno, len, type, -1, -1);
2198 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
2200 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
2205 * Visible (exported) allocation/free functions.
2206 * Some of these are used just by xfs_alloc_btree.c and this file.
2210 * Compute and fill in value of m_alloc_maxlevels.
2213 xfs_alloc_compute_maxlevels(
2214 xfs_mount_t *mp) /* file system mount structure */
2216 mp->m_alloc_maxlevels = xfs_btree_compute_maxlevels(mp->m_alloc_mnr,
2217 (mp->m_sb.sb_agblocks + 1) / 2);
2218 ASSERT(mp->m_alloc_maxlevels <= xfs_allocbt_maxlevels_ondisk());
2222 * Find the length of the longest extent in an AG. The 'need' parameter
2223 * specifies how much space we're going to need for the AGFL and the
2224 * 'reserved' parameter tells us how many blocks in this AG are reserved for
2228 xfs_alloc_longest_free_extent(
2229 struct xfs_perag *pag,
2231 xfs_extlen_t reserved)
2233 xfs_extlen_t delta = 0;
2236 * If the AGFL needs a recharge, we'll have to subtract that from the
2239 if (need > pag->pagf_flcount)
2240 delta = need - pag->pagf_flcount;
2243 * If we cannot maintain others' reservations with space from the
2244 * not-longest freesp extents, we'll have to subtract /that/ from
2245 * the longest extent too.
2247 if (pag->pagf_freeblks - pag->pagf_longest < reserved)
2248 delta += reserved - (pag->pagf_freeblks - pag->pagf_longest);
2251 * If the longest extent is long enough to satisfy all the
2252 * reservations and AGFL rules in place, we can return this extent.
2254 if (pag->pagf_longest > delta)
2255 return min_t(xfs_extlen_t, pag->pag_mount->m_ag_max_usable,
2256 pag->pagf_longest - delta);
2258 /* Otherwise, let the caller try for 1 block if there's space. */
2259 return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
2263 * Compute the minimum length of the AGFL in the given AG. If @pag is NULL,
2264 * return the largest possible minimum length.
2267 xfs_alloc_min_freelist(
2268 struct xfs_mount *mp,
2269 struct xfs_perag *pag)
2271 /* AG btrees have at least 1 level. */
2272 static const uint8_t fake_levels[XFS_BTNUM_AGF] = {1, 1, 1};
2273 const uint8_t *levels = pag ? pag->pagf_levels : fake_levels;
2274 unsigned int min_free;
2276 ASSERT(mp->m_alloc_maxlevels > 0);
2279 * For a btree shorter than the maximum height, the worst case is that
2280 * every level gets split and a new level is added, then while inserting
2281 * another entry to refill the AGFL, every level under the old root gets
2282 * split again. This is:
2284 * (full height split reservation) + (AGFL refill split height)
2285 * = (current height + 1) + (current height - 1)
2286 * = (new height) + (new height - 2)
2287 * = 2 * new height - 2
2289 * For a btree of maximum height, the worst case is that every level
2290 * under the root gets split, then while inserting another entry to
2291 * refill the AGFL, every level under the root gets split again. This is
2294 * 2 * (current height - 1)
2295 * = 2 * (new height - 1)
2296 * = 2 * new height - 2
2299 /* space needed by-bno freespace btree */
2300 min_free = min_t(unsigned int, levels[XFS_BTNUM_BNOi] + 1,
2301 mp->m_alloc_maxlevels) * 2 - 2;
2302 /* space needed by-size freespace btree */
2303 min_free += min_t(unsigned int, levels[XFS_BTNUM_CNTi] + 1,
2304 mp->m_alloc_maxlevels) * 2 - 2;
2305 /* space needed reverse mapping used space btree */
2306 if (xfs_has_rmapbt(mp))
2307 min_free += min_t(unsigned int, levels[XFS_BTNUM_RMAPi] + 1,
2308 mp->m_rmap_maxlevels) * 2 - 2;
2314 * Check if the operation we are fixing up the freelist for should go ahead or
2315 * not. If we are freeing blocks, we always allow it, otherwise the allocation
2316 * is dependent on whether the size and shape of free space available will
2317 * permit the requested allocation to take place.
2320 xfs_alloc_space_available(
2321 struct xfs_alloc_arg *args,
2322 xfs_extlen_t min_free,
2325 struct xfs_perag *pag = args->pag;
2326 xfs_extlen_t alloc_len, longest;
2327 xfs_extlen_t reservation; /* blocks that are still reserved */
2329 xfs_extlen_t agflcount;
2331 if (flags & XFS_ALLOC_FLAG_FREEING)
2334 reservation = xfs_ag_resv_needed(pag, args->resv);
2336 /* do we have enough contiguous free space for the allocation? */
2337 alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
2338 longest = xfs_alloc_longest_free_extent(pag, min_free, reservation);
2339 if (longest < alloc_len)
2343 * Do we have enough free space remaining for the allocation? Don't
2344 * account extra agfl blocks because we are about to defer free them,
2345 * making them unavailable until the current transaction commits.
2347 agflcount = min_t(xfs_extlen_t, pag->pagf_flcount, min_free);
2348 available = (int)(pag->pagf_freeblks + agflcount -
2349 reservation - min_free - args->minleft);
2350 if (available < (int)max(args->total, alloc_len))
2354 * Clamp maxlen to the amount of free space available for the actual
2355 * extent allocation.
2357 if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
2358 args->maxlen = available;
2359 ASSERT(args->maxlen > 0);
2360 ASSERT(args->maxlen >= args->minlen);
2367 xfs_free_agfl_block(
2368 struct xfs_trans *tp,
2369 xfs_agnumber_t agno,
2370 xfs_agblock_t agbno,
2371 struct xfs_buf *agbp,
2372 struct xfs_owner_info *oinfo)
2377 error = xfs_free_ag_extent(tp, agbp, agno, agbno, 1, oinfo,
2382 error = xfs_trans_get_buf(tp, tp->t_mountp->m_ddev_targp,
2383 XFS_AGB_TO_DADDR(tp->t_mountp, agno, agbno),
2384 tp->t_mountp->m_bsize, 0, &bp);
2387 xfs_trans_binval(tp, bp);
2393 * Check the agfl fields of the agf for inconsistency or corruption.
2395 * The original purpose was to detect an agfl header padding mismatch between
2396 * current and early v5 kernels. This problem manifests as a 1-slot size
2397 * difference between the on-disk flcount and the active [first, last] range of
2400 * However, we need to use these same checks to catch agfl count corruptions
2401 * unrelated to padding. This could occur on any v4 or v5 filesystem, so either
2402 * way, we need to reset the agfl and warn the user.
2404 * Return true if a reset is required before the agfl can be used, false
2408 xfs_agfl_needs_reset(
2409 struct xfs_mount *mp,
2410 struct xfs_agf *agf)
2412 uint32_t f = be32_to_cpu(agf->agf_flfirst);
2413 uint32_t l = be32_to_cpu(agf->agf_fllast);
2414 uint32_t c = be32_to_cpu(agf->agf_flcount);
2415 int agfl_size = xfs_agfl_size(mp);
2419 * The agf read verifier catches severe corruption of these fields.
2420 * Repeat some sanity checks to cover a packed -> unpacked mismatch if
2421 * the verifier allows it.
2423 if (f >= agfl_size || l >= agfl_size)
2429 * Check consistency between the on-disk count and the active range. An
2430 * agfl padding mismatch manifests as an inconsistent flcount.
2435 active = agfl_size - f + l + 1;
2443 * Reset the agfl to an empty state. Ignore/drop any existing blocks since the
2444 * agfl content cannot be trusted. Warn the user that a repair is required to
2445 * recover leaked blocks.
2447 * The purpose of this mechanism is to handle filesystems affected by the agfl
2448 * header padding mismatch problem. A reset keeps the filesystem online with a
2449 * relatively minor free space accounting inconsistency rather than suffer the
2450 * inevitable crash from use of an invalid agfl block.
2454 struct xfs_trans *tp,
2455 struct xfs_buf *agbp,
2456 struct xfs_perag *pag)
2458 struct xfs_mount *mp = tp->t_mountp;
2459 struct xfs_agf *agf = agbp->b_addr;
2461 ASSERT(xfs_perag_agfl_needs_reset(pag));
2462 trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
2465 "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
2466 "Please unmount and run xfs_repair.",
2467 pag->pag_agno, pag->pagf_flcount);
2469 agf->agf_flfirst = 0;
2470 agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1);
2471 agf->agf_flcount = 0;
2472 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST |
2475 pag->pagf_flcount = 0;
2476 clear_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
2480 * Defer an AGFL block free. This is effectively equivalent to
2481 * xfs_free_extent_later() with some special handling particular to AGFL blocks.
2483 * Deferring AGFL frees helps prevent log reservation overruns due to too many
2484 * allocation operations in a transaction. AGFL frees are prone to this problem
2485 * because for one they are always freed one at a time. Further, an immediate
2486 * AGFL block free can cause a btree join and require another block free before
2487 * the real allocation can proceed. Deferring the free disconnects freeing up
2488 * the AGFL slot from freeing the block.
2491 xfs_defer_agfl_block(
2492 struct xfs_trans *tp,
2493 xfs_agnumber_t agno,
2494 xfs_agblock_t agbno,
2495 struct xfs_owner_info *oinfo)
2497 struct xfs_mount *mp = tp->t_mountp;
2498 struct xfs_extent_free_item *xefi;
2499 xfs_fsblock_t fsbno = XFS_AGB_TO_FSB(mp, agno, agbno);
2501 ASSERT(xfs_extfree_item_cache != NULL);
2502 ASSERT(oinfo != NULL);
2504 if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, fsbno)))
2505 return -EFSCORRUPTED;
2507 xefi = kmem_cache_zalloc(xfs_extfree_item_cache,
2508 GFP_KERNEL | __GFP_NOFAIL);
2509 xefi->xefi_startblock = fsbno;
2510 xefi->xefi_blockcount = 1;
2511 xefi->xefi_owner = oinfo->oi_owner;
2512 xefi->xefi_agresv = XFS_AG_RESV_AGFL;
2514 trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
2516 xfs_extent_free_get_group(mp, xefi);
2517 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_AGFL_FREE, &xefi->xefi_list);
2522 * Add the extent to the list of extents to be free at transaction end.
2523 * The list is maintained sorted (by block number).
2526 __xfs_free_extent_later(
2527 struct xfs_trans *tp,
2530 const struct xfs_owner_info *oinfo,
2531 enum xfs_ag_resv_type type,
2534 struct xfs_extent_free_item *xefi;
2535 struct xfs_mount *mp = tp->t_mountp;
2537 xfs_agnumber_t agno;
2538 xfs_agblock_t agbno;
2540 ASSERT(bno != NULLFSBLOCK);
2542 ASSERT(len <= XFS_MAX_BMBT_EXTLEN);
2543 ASSERT(!isnullstartblock(bno));
2544 agno = XFS_FSB_TO_AGNO(mp, bno);
2545 agbno = XFS_FSB_TO_AGBNO(mp, bno);
2546 ASSERT(agno < mp->m_sb.sb_agcount);
2547 ASSERT(agbno < mp->m_sb.sb_agblocks);
2548 ASSERT(len < mp->m_sb.sb_agblocks);
2549 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
2551 ASSERT(xfs_extfree_item_cache != NULL);
2552 ASSERT(type != XFS_AG_RESV_AGFL);
2554 if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbext(mp, bno, len)))
2555 return -EFSCORRUPTED;
2557 xefi = kmem_cache_zalloc(xfs_extfree_item_cache,
2558 GFP_KERNEL | __GFP_NOFAIL);
2559 xefi->xefi_startblock = bno;
2560 xefi->xefi_blockcount = (xfs_extlen_t)len;
2561 xefi->xefi_agresv = type;
2563 xefi->xefi_flags |= XFS_EFI_SKIP_DISCARD;
2565 ASSERT(oinfo->oi_offset == 0);
2567 if (oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK)
2568 xefi->xefi_flags |= XFS_EFI_ATTR_FORK;
2569 if (oinfo->oi_flags & XFS_OWNER_INFO_BMBT_BLOCK)
2570 xefi->xefi_flags |= XFS_EFI_BMBT_BLOCK;
2571 xefi->xefi_owner = oinfo->oi_owner;
2573 xefi->xefi_owner = XFS_RMAP_OWN_NULL;
2575 trace_xfs_bmap_free_defer(mp,
2576 XFS_FSB_TO_AGNO(tp->t_mountp, bno), 0,
2577 XFS_FSB_TO_AGBNO(tp->t_mountp, bno), len);
2579 xfs_extent_free_get_group(mp, xefi);
2580 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_FREE, &xefi->xefi_list);
2586 * Check if an AGF has a free extent record whose length is equal to
2590 xfs_exact_minlen_extent_available(
2591 struct xfs_alloc_arg *args,
2592 struct xfs_buf *agbp,
2595 struct xfs_btree_cur *cnt_cur;
2600 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, agbp,
2601 args->pag, XFS_BTNUM_CNT);
2602 error = xfs_alloc_lookup_ge(cnt_cur, 0, args->minlen, stat);
2607 error = -EFSCORRUPTED;
2611 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, stat);
2615 if (*stat == 1 && flen != args->minlen)
2619 xfs_btree_del_cursor(cnt_cur, error);
2626 * Decide whether to use this allocation group for this allocation.
2627 * If so, fix up the btree freelist's size.
2630 xfs_alloc_fix_freelist(
2631 struct xfs_alloc_arg *args, /* allocation argument structure */
2632 uint32_t alloc_flags)
2634 struct xfs_mount *mp = args->mp;
2635 struct xfs_perag *pag = args->pag;
2636 struct xfs_trans *tp = args->tp;
2637 struct xfs_buf *agbp = NULL;
2638 struct xfs_buf *agflbp = NULL;
2639 struct xfs_alloc_arg targs; /* local allocation arguments */
2640 xfs_agblock_t bno; /* freelist block */
2641 xfs_extlen_t need; /* total blocks needed in freelist */
2644 /* deferred ops (AGFL block frees) require permanent transactions */
2645 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
2647 if (!xfs_perag_initialised_agf(pag)) {
2648 error = xfs_alloc_read_agf(pag, tp, alloc_flags, &agbp);
2650 /* Couldn't lock the AGF so skip this AG. */
2651 if (error == -EAGAIN)
2658 * If this is a metadata preferred pag and we are user data then try
2659 * somewhere else if we are not being asked to try harder at this
2662 if (xfs_perag_prefers_metadata(pag) &&
2663 (args->datatype & XFS_ALLOC_USERDATA) &&
2664 (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK)) {
2665 ASSERT(!(alloc_flags & XFS_ALLOC_FLAG_FREEING));
2666 goto out_agbp_relse;
2669 need = xfs_alloc_min_freelist(mp, pag);
2670 if (!xfs_alloc_space_available(args, need, alloc_flags |
2671 XFS_ALLOC_FLAG_CHECK))
2672 goto out_agbp_relse;
2675 * Get the a.g. freespace buffer.
2676 * Can fail if we're not blocking on locks, and it's held.
2679 error = xfs_alloc_read_agf(pag, tp, alloc_flags, &agbp);
2681 /* Couldn't lock the AGF so skip this AG. */
2682 if (error == -EAGAIN)
2688 /* reset a padding mismatched agfl before final free space check */
2689 if (xfs_perag_agfl_needs_reset(pag))
2690 xfs_agfl_reset(tp, agbp, pag);
2692 /* If there isn't enough total space or single-extent, reject it. */
2693 need = xfs_alloc_min_freelist(mp, pag);
2694 if (!xfs_alloc_space_available(args, need, alloc_flags))
2695 goto out_agbp_relse;
2698 if (args->alloc_minlen_only) {
2701 error = xfs_exact_minlen_extent_available(args, agbp, &stat);
2703 goto out_agbp_relse;
2707 * Make the freelist shorter if it's too long.
2709 * Note that from this point onwards, we will always release the agf and
2710 * agfl buffers on error. This handles the case where we error out and
2711 * the buffers are clean or may not have been joined to the transaction
2712 * and hence need to be released manually. If they have been joined to
2713 * the transaction, then xfs_trans_brelse() will handle them
2714 * appropriately based on the recursion count and dirty state of the
2717 * XXX (dgc): When we have lots of free space, does this buy us
2718 * anything other than extra overhead when we need to put more blocks
2719 * back on the free list? Maybe we should only do this when space is
2720 * getting low or the AGFL is more than half full?
2722 * The NOSHRINK flag prevents the AGFL from being shrunk if it's too
2723 * big; the NORMAP flag prevents AGFL expand/shrink operations from
2724 * updating the rmapbt. Both flags are used in xfs_repair while we're
2725 * rebuilding the rmapbt, and neither are used by the kernel. They're
2726 * both required to ensure that rmaps are correctly recorded for the
2727 * regenerated AGFL, bnobt, and cntbt. See repair/phase5.c and
2728 * repair/rmap.c in xfsprogs for details.
2730 memset(&targs, 0, sizeof(targs));
2731 /* struct copy below */
2732 if (alloc_flags & XFS_ALLOC_FLAG_NORMAP)
2733 targs.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
2735 targs.oinfo = XFS_RMAP_OINFO_AG;
2736 while (!(alloc_flags & XFS_ALLOC_FLAG_NOSHRINK) &&
2737 pag->pagf_flcount > need) {
2738 error = xfs_alloc_get_freelist(pag, tp, agbp, &bno, 0);
2740 goto out_agbp_relse;
2742 /* defer agfl frees */
2743 error = xfs_defer_agfl_block(tp, args->agno, bno, &targs.oinfo);
2745 goto out_agbp_relse;
2751 targs.agno = args->agno;
2752 targs.alignment = targs.minlen = targs.prod = 1;
2754 error = xfs_alloc_read_agfl(pag, tp, &agflbp);
2756 goto out_agbp_relse;
2758 /* Make the freelist longer if it's too short. */
2759 while (pag->pagf_flcount < need) {
2761 targs.maxlen = need - pag->pagf_flcount;
2762 targs.resv = XFS_AG_RESV_AGFL;
2764 /* Allocate as many blocks as possible at once. */
2765 error = xfs_alloc_ag_vextent_size(&targs, alloc_flags);
2767 goto out_agflbp_relse;
2770 * Stop if we run out. Won't happen if callers are obeying
2771 * the restrictions correctly. Can happen for free calls
2772 * on a completely full ag.
2774 if (targs.agbno == NULLAGBLOCK) {
2775 if (alloc_flags & XFS_ALLOC_FLAG_FREEING)
2777 goto out_agflbp_relse;
2780 if (!xfs_rmap_should_skip_owner_update(&targs.oinfo)) {
2781 error = xfs_rmap_alloc(tp, agbp, pag,
2782 targs.agbno, targs.len, &targs.oinfo);
2784 goto out_agflbp_relse;
2786 error = xfs_alloc_update_counters(tp, agbp,
2787 -((long)(targs.len)));
2789 goto out_agflbp_relse;
2792 * Put each allocated block on the list.
2794 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
2795 error = xfs_alloc_put_freelist(pag, tp, agbp,
2798 goto out_agflbp_relse;
2801 xfs_trans_brelse(tp, agflbp);
2806 xfs_trans_brelse(tp, agflbp);
2809 xfs_trans_brelse(tp, agbp);
2816 * Get a block from the freelist.
2817 * Returns with the buffer for the block gotten.
2820 xfs_alloc_get_freelist(
2821 struct xfs_perag *pag,
2822 struct xfs_trans *tp,
2823 struct xfs_buf *agbp,
2824 xfs_agblock_t *bnop,
2827 struct xfs_agf *agf = agbp->b_addr;
2828 struct xfs_buf *agflbp;
2833 struct xfs_mount *mp = tp->t_mountp;
2836 * Freelist is empty, give up.
2838 if (!agf->agf_flcount) {
2839 *bnop = NULLAGBLOCK;
2843 * Read the array of free blocks.
2845 error = xfs_alloc_read_agfl(pag, tp, &agflbp);
2851 * Get the block number and update the data structures.
2853 agfl_bno = xfs_buf_to_agfl_bno(agflbp);
2854 bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
2855 if (XFS_IS_CORRUPT(tp->t_mountp, !xfs_verify_agbno(pag, bno)))
2856 return -EFSCORRUPTED;
2858 be32_add_cpu(&agf->agf_flfirst, 1);
2859 xfs_trans_brelse(tp, agflbp);
2860 if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
2861 agf->agf_flfirst = 0;
2863 ASSERT(!xfs_perag_agfl_needs_reset(pag));
2864 be32_add_cpu(&agf->agf_flcount, -1);
2865 pag->pagf_flcount--;
2867 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
2869 be32_add_cpu(&agf->agf_btreeblks, 1);
2870 pag->pagf_btreeblks++;
2871 logflags |= XFS_AGF_BTREEBLKS;
2874 xfs_alloc_log_agf(tp, agbp, logflags);
2881 * Log the given fields from the agf structure.
2885 struct xfs_trans *tp,
2889 int first; /* first byte offset */
2890 int last; /* last byte offset */
2891 static const short offsets[] = {
2892 offsetof(xfs_agf_t, agf_magicnum),
2893 offsetof(xfs_agf_t, agf_versionnum),
2894 offsetof(xfs_agf_t, agf_seqno),
2895 offsetof(xfs_agf_t, agf_length),
2896 offsetof(xfs_agf_t, agf_roots[0]),
2897 offsetof(xfs_agf_t, agf_levels[0]),
2898 offsetof(xfs_agf_t, agf_flfirst),
2899 offsetof(xfs_agf_t, agf_fllast),
2900 offsetof(xfs_agf_t, agf_flcount),
2901 offsetof(xfs_agf_t, agf_freeblks),
2902 offsetof(xfs_agf_t, agf_longest),
2903 offsetof(xfs_agf_t, agf_btreeblks),
2904 offsetof(xfs_agf_t, agf_uuid),
2905 offsetof(xfs_agf_t, agf_rmap_blocks),
2906 offsetof(xfs_agf_t, agf_refcount_blocks),
2907 offsetof(xfs_agf_t, agf_refcount_root),
2908 offsetof(xfs_agf_t, agf_refcount_level),
2909 /* needed so that we don't log the whole rest of the structure: */
2910 offsetof(xfs_agf_t, agf_spare64),
2914 trace_xfs_agf(tp->t_mountp, bp->b_addr, fields, _RET_IP_);
2916 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
2918 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
2919 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
2923 * Put the block on the freelist for the allocation group.
2926 xfs_alloc_put_freelist(
2927 struct xfs_perag *pag,
2928 struct xfs_trans *tp,
2929 struct xfs_buf *agbp,
2930 struct xfs_buf *agflbp,
2934 struct xfs_mount *mp = tp->t_mountp;
2935 struct xfs_agf *agf = agbp->b_addr;
2943 error = xfs_alloc_read_agfl(pag, tp, &agflbp);
2948 be32_add_cpu(&agf->agf_fllast, 1);
2949 if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp))
2950 agf->agf_fllast = 0;
2952 ASSERT(!xfs_perag_agfl_needs_reset(pag));
2953 be32_add_cpu(&agf->agf_flcount, 1);
2954 pag->pagf_flcount++;
2956 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
2958 be32_add_cpu(&agf->agf_btreeblks, -1);
2959 pag->pagf_btreeblks--;
2960 logflags |= XFS_AGF_BTREEBLKS;
2963 xfs_alloc_log_agf(tp, agbp, logflags);
2965 ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
2967 agfl_bno = xfs_buf_to_agfl_bno(agflbp);
2968 blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
2969 *blockp = cpu_to_be32(bno);
2970 startoff = (char *)blockp - (char *)agflbp->b_addr;
2972 xfs_alloc_log_agf(tp, agbp, logflags);
2974 xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF);
2975 xfs_trans_log_buf(tp, agflbp, startoff,
2976 startoff + sizeof(xfs_agblock_t) - 1);
2981 * Check that this AGF/AGI header's sequence number and length matches the AG
2982 * number and size in fsblocks.
2985 xfs_validate_ag_length(
2990 struct xfs_mount *mp = bp->b_mount;
2992 * During growfs operations, the perag is not fully initialised,
2993 * so we can't use it for any useful checking. growfs ensures we can't
2994 * use it by using uncached buffers that don't have the perag attached
2995 * so we can detect and avoid this problem.
2997 if (bp->b_pag && seqno != bp->b_pag->pag_agno)
2998 return __this_address;
3001 * Only the last AG in the filesystem is allowed to be shorter
3002 * than the AG size recorded in the superblock.
3004 if (length != mp->m_sb.sb_agblocks) {
3006 * During growfs, the new last AG can get here before we
3007 * have updated the superblock. Give it a pass on the seqno
3010 if (bp->b_pag && seqno != mp->m_sb.sb_agcount - 1)
3011 return __this_address;
3012 if (length < XFS_MIN_AG_BLOCKS)
3013 return __this_address;
3014 if (length > mp->m_sb.sb_agblocks)
3015 return __this_address;
3022 * Verify the AGF is consistent.
3024 * We do not verify the AGFL indexes in the AGF are fully consistent here
3025 * because of issues with variable on-disk structure sizes. Instead, we check
3026 * the agfl indexes for consistency when we initialise the perag from the AGF
3027 * information after a read completes.
3029 * If the index is inconsistent, then we mark the perag as needing an AGFL
3030 * reset. The first AGFL update performed then resets the AGFL indexes and
3031 * refills the AGFL with known good free blocks, allowing the filesystem to
3032 * continue operating normally at the cost of a few leaked free space blocks.
3034 static xfs_failaddr_t
3038 struct xfs_mount *mp = bp->b_mount;
3039 struct xfs_agf *agf = bp->b_addr;
3041 uint32_t agf_seqno = be32_to_cpu(agf->agf_seqno);
3042 uint32_t agf_length = be32_to_cpu(agf->agf_length);
3044 if (xfs_has_crc(mp)) {
3045 if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
3046 return __this_address;
3047 if (!xfs_log_check_lsn(mp, be64_to_cpu(agf->agf_lsn)))
3048 return __this_address;
3051 if (!xfs_verify_magic(bp, agf->agf_magicnum))
3052 return __this_address;
3054 if (!XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)))
3055 return __this_address;
3058 * Both agf_seqno and agf_length need to validated before anything else
3059 * block number related in the AGF or AGFL can be checked.
3061 fa = xfs_validate_ag_length(bp, agf_seqno, agf_length);
3065 if (be32_to_cpu(agf->agf_flfirst) >= xfs_agfl_size(mp))
3066 return __this_address;
3067 if (be32_to_cpu(agf->agf_fllast) >= xfs_agfl_size(mp))
3068 return __this_address;
3069 if (be32_to_cpu(agf->agf_flcount) > xfs_agfl_size(mp))
3070 return __this_address;
3072 if (be32_to_cpu(agf->agf_freeblks) < be32_to_cpu(agf->agf_longest) ||
3073 be32_to_cpu(agf->agf_freeblks) > agf_length)
3074 return __this_address;
3076 if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
3077 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
3078 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) >
3079 mp->m_alloc_maxlevels ||
3080 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) >
3081 mp->m_alloc_maxlevels)
3082 return __this_address;
3084 if (xfs_has_lazysbcount(mp) &&
3085 be32_to_cpu(agf->agf_btreeblks) > agf_length)
3086 return __this_address;
3088 if (xfs_has_rmapbt(mp)) {
3089 if (be32_to_cpu(agf->agf_rmap_blocks) > agf_length)
3090 return __this_address;
3092 if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 ||
3093 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) >
3094 mp->m_rmap_maxlevels)
3095 return __this_address;
3098 if (xfs_has_reflink(mp)) {
3099 if (be32_to_cpu(agf->agf_refcount_blocks) > agf_length)
3100 return __this_address;
3102 if (be32_to_cpu(agf->agf_refcount_level) < 1 ||
3103 be32_to_cpu(agf->agf_refcount_level) > mp->m_refc_maxlevels)
3104 return __this_address;
3111 xfs_agf_read_verify(
3114 struct xfs_mount *mp = bp->b_mount;
3117 if (xfs_has_crc(mp) &&
3118 !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
3119 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
3121 fa = xfs_agf_verify(bp);
3122 if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_ALLOC_READ_AGF))
3123 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
3128 xfs_agf_write_verify(
3131 struct xfs_mount *mp = bp->b_mount;
3132 struct xfs_buf_log_item *bip = bp->b_log_item;
3133 struct xfs_agf *agf = bp->b_addr;
3136 fa = xfs_agf_verify(bp);
3138 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
3142 if (!xfs_has_crc(mp))
3146 agf->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
3148 xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
3151 const struct xfs_buf_ops xfs_agf_buf_ops = {
3153 .magic = { cpu_to_be32(XFS_AGF_MAGIC), cpu_to_be32(XFS_AGF_MAGIC) },
3154 .verify_read = xfs_agf_read_verify,
3155 .verify_write = xfs_agf_write_verify,
3156 .verify_struct = xfs_agf_verify,
3160 * Read in the allocation group header (free/alloc section).
3164 struct xfs_perag *pag,
3165 struct xfs_trans *tp,
3167 struct xfs_buf **agfbpp)
3169 struct xfs_mount *mp = pag->pag_mount;
3172 trace_xfs_read_agf(pag->pag_mount, pag->pag_agno);
3174 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
3175 XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGF_DADDR(mp)),
3176 XFS_FSS_TO_BB(mp, 1), flags, agfbpp, &xfs_agf_buf_ops);
3180 xfs_buf_set_ref(*agfbpp, XFS_AGF_REF);
3185 * Read in the allocation group header (free/alloc section) and initialise the
3186 * perag structure if necessary. If the caller provides @agfbpp, then return the
3187 * locked buffer to the caller, otherwise free it.
3191 struct xfs_perag *pag,
3192 struct xfs_trans *tp,
3194 struct xfs_buf **agfbpp)
3196 struct xfs_buf *agfbp;
3197 struct xfs_agf *agf;
3201 trace_xfs_alloc_read_agf(pag->pag_mount, pag->pag_agno);
3203 /* We don't support trylock when freeing. */
3204 ASSERT((flags & (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK)) !=
3205 (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK));
3206 error = xfs_read_agf(pag, tp,
3207 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
3212 agf = agfbp->b_addr;
3213 if (!xfs_perag_initialised_agf(pag)) {
3214 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
3215 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
3216 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
3217 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
3218 pag->pagf_levels[XFS_BTNUM_BNOi] =
3219 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
3220 pag->pagf_levels[XFS_BTNUM_CNTi] =
3221 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
3222 pag->pagf_levels[XFS_BTNUM_RMAPi] =
3223 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
3224 pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
3225 if (xfs_agfl_needs_reset(pag->pag_mount, agf))
3226 set_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
3228 clear_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
3231 * Update the in-core allocbt counter. Filter out the rmapbt
3232 * subset of the btreeblks counter because the rmapbt is managed
3233 * by perag reservation. Subtract one for the rmapbt root block
3234 * because the rmap counter includes it while the btreeblks
3235 * counter only tracks non-root blocks.
3237 allocbt_blks = pag->pagf_btreeblks;
3238 if (xfs_has_rmapbt(pag->pag_mount))
3239 allocbt_blks -= be32_to_cpu(agf->agf_rmap_blocks) - 1;
3240 if (allocbt_blks > 0)
3241 atomic64_add(allocbt_blks,
3242 &pag->pag_mount->m_allocbt_blks);
3244 set_bit(XFS_AGSTATE_AGF_INIT, &pag->pag_opstate);
3247 else if (!xfs_is_shutdown(pag->pag_mount)) {
3248 ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
3249 ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
3250 ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
3251 ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
3252 ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
3253 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
3254 ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
3255 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
3261 xfs_trans_brelse(tp, agfbp);
3266 * Pre-proces allocation arguments to set initial state that we don't require
3267 * callers to set up correctly, as well as bounds check the allocation args
3271 xfs_alloc_vextent_check_args(
3272 struct xfs_alloc_arg *args,
3273 xfs_fsblock_t target,
3274 xfs_agnumber_t *minimum_agno)
3276 struct xfs_mount *mp = args->mp;
3277 xfs_agblock_t agsize;
3279 args->fsbno = NULLFSBLOCK;
3282 if (args->tp->t_highest_agno != NULLAGNUMBER)
3283 *minimum_agno = args->tp->t_highest_agno;
3286 * Just fix this up, for the case where the last a.g. is shorter
3287 * (or there's only one a.g.) and the caller couldn't easily figure
3288 * that out (xfs_bmap_alloc).
3290 agsize = mp->m_sb.sb_agblocks;
3291 if (args->maxlen > agsize)
3292 args->maxlen = agsize;
3293 if (args->alignment == 0)
3294 args->alignment = 1;
3296 ASSERT(args->minlen > 0);
3297 ASSERT(args->maxlen > 0);
3298 ASSERT(args->alignment > 0);
3299 ASSERT(args->resv != XFS_AG_RESV_AGFL);
3301 ASSERT(XFS_FSB_TO_AGNO(mp, target) < mp->m_sb.sb_agcount);
3302 ASSERT(XFS_FSB_TO_AGBNO(mp, target) < agsize);
3303 ASSERT(args->minlen <= args->maxlen);
3304 ASSERT(args->minlen <= agsize);
3305 ASSERT(args->mod < args->prod);
3307 if (XFS_FSB_TO_AGNO(mp, target) >= mp->m_sb.sb_agcount ||
3308 XFS_FSB_TO_AGBNO(mp, target) >= agsize ||
3309 args->minlen > args->maxlen || args->minlen > agsize ||
3310 args->mod >= args->prod) {
3311 trace_xfs_alloc_vextent_badargs(args);
3315 if (args->agno != NULLAGNUMBER && *minimum_agno > args->agno) {
3316 trace_xfs_alloc_vextent_skip_deadlock(args);
3324 * Prepare an AG for allocation. If the AG is not prepared to accept the
3325 * allocation, return failure.
3327 * XXX(dgc): The complexity of "need_pag" will go away as all caller paths are
3328 * modified to hold their own perag references.
3331 xfs_alloc_vextent_prepare_ag(
3332 struct xfs_alloc_arg *args,
3333 uint32_t alloc_flags)
3335 bool need_pag = !args->pag;
3339 args->pag = xfs_perag_get(args->mp, args->agno);
3342 error = xfs_alloc_fix_freelist(args, alloc_flags);
3344 trace_xfs_alloc_vextent_nofix(args);
3346 xfs_perag_put(args->pag);
3347 args->agbno = NULLAGBLOCK;
3351 /* cannot allocate in this AG at all */
3352 trace_xfs_alloc_vextent_noagbp(args);
3353 args->agbno = NULLAGBLOCK;
3356 args->wasfromfl = 0;
3361 * Post-process allocation results to account for the allocation if it succeed
3362 * and set the allocated block number correctly for the caller.
3364 * XXX: we should really be returning ENOSPC for ENOSPC, not
3365 * hiding it behind a "successful" NULLFSBLOCK allocation.
3368 xfs_alloc_vextent_finish(
3369 struct xfs_alloc_arg *args,
3370 xfs_agnumber_t minimum_agno,
3374 struct xfs_mount *mp = args->mp;
3378 * We can end up here with a locked AGF. If we failed, the caller is
3379 * likely going to try to allocate again with different parameters, and
3380 * that can widen the AGs that are searched for free space. If we have
3381 * to do BMBT block allocation, we have to do a new allocation.
3383 * Hence leaving this function with the AGF locked opens up potential
3384 * ABBA AGF deadlocks because a future allocation attempt in this
3385 * transaction may attempt to lock a lower number AGF.
3387 * We can't release the AGF until the transaction is commited, so at
3388 * this point we must update the "first allocation" tracker to point at
3389 * this AG if the tracker is empty or points to a lower AG. This allows
3390 * the next allocation attempt to be modified appropriately to avoid
3394 (args->tp->t_highest_agno == NULLAGNUMBER ||
3395 args->agno > minimum_agno))
3396 args->tp->t_highest_agno = args->agno;
3399 * If the allocation failed with an error or we had an ENOSPC result,
3400 * preserve the returned error whilst also marking the allocation result
3401 * as "no extent allocated". This ensures that callers that fail to
3402 * capture the error will still treat it as a failed allocation.
3404 if (alloc_error || args->agbno == NULLAGBLOCK) {
3405 args->fsbno = NULLFSBLOCK;
3406 error = alloc_error;
3407 goto out_drop_perag;
3410 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
3412 ASSERT(args->len >= args->minlen);
3413 ASSERT(args->len <= args->maxlen);
3414 ASSERT(args->agbno % args->alignment == 0);
3415 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno), args->len);
3417 /* if not file data, insert new block into the reverse map btree */
3418 if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
3419 error = xfs_rmap_alloc(args->tp, args->agbp, args->pag,
3420 args->agbno, args->len, &args->oinfo);
3422 goto out_drop_perag;
3425 if (!args->wasfromfl) {
3426 error = xfs_alloc_update_counters(args->tp, args->agbp,
3427 -((long)(args->len)));
3429 goto out_drop_perag;
3431 ASSERT(!xfs_extent_busy_search(mp, args->pag, args->agbno,
3435 xfs_ag_resv_alloc_extent(args->pag, args->resv, args);
3437 XFS_STATS_INC(mp, xs_allocx);
3438 XFS_STATS_ADD(mp, xs_allocb, args->len);
3440 trace_xfs_alloc_vextent_finish(args);
3443 if (drop_perag && args->pag) {
3444 xfs_perag_rele(args->pag);
3451 * Allocate within a single AG only. This uses a best-fit length algorithm so if
3452 * you need an exact sized allocation without locality constraints, this is the
3453 * fastest way to do it.
3455 * Caller is expected to hold a perag reference in args->pag.
3458 xfs_alloc_vextent_this_ag(
3459 struct xfs_alloc_arg *args,
3460 xfs_agnumber_t agno)
3462 struct xfs_mount *mp = args->mp;
3463 xfs_agnumber_t minimum_agno;
3464 uint32_t alloc_flags = 0;
3467 ASSERT(args->pag != NULL);
3468 ASSERT(args->pag->pag_agno == agno);
3473 trace_xfs_alloc_vextent_this_ag(args);
3475 error = xfs_alloc_vextent_check_args(args, XFS_AGB_TO_FSB(mp, agno, 0),
3478 if (error == -ENOSPC)
3483 error = xfs_alloc_vextent_prepare_ag(args, alloc_flags);
3484 if (!error && args->agbp)
3485 error = xfs_alloc_ag_vextent_size(args, alloc_flags);
3487 return xfs_alloc_vextent_finish(args, minimum_agno, error, false);
3491 * Iterate all AGs trying to allocate an extent starting from @start_ag.
3493 * If the incoming allocation type is XFS_ALLOCTYPE_NEAR_BNO, it means the
3494 * allocation attempts in @start_agno have locality information. If we fail to
3495 * allocate in that AG, then we revert to anywhere-in-AG for all the other AGs
3496 * we attempt to allocation in as there is no locality optimisation possible for
3497 * those allocations.
3499 * On return, args->pag may be left referenced if we finish before the "all
3500 * failed" return point. The allocation finish still needs the perag, and
3501 * so the caller will release it once they've finished the allocation.
3503 * When we wrap the AG iteration at the end of the filesystem, we have to be
3504 * careful not to wrap into AGs below ones we already have locked in the
3505 * transaction if we are doing a blocking iteration. This will result in an
3506 * out-of-order locking of AGFs and hence can cause deadlocks.
3509 xfs_alloc_vextent_iterate_ags(
3510 struct xfs_alloc_arg *args,
3511 xfs_agnumber_t minimum_agno,
3512 xfs_agnumber_t start_agno,
3513 xfs_agblock_t target_agbno,
3514 uint32_t alloc_flags)
3516 struct xfs_mount *mp = args->mp;
3517 xfs_agnumber_t restart_agno = minimum_agno;
3518 xfs_agnumber_t agno;
3521 if (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK)
3524 for_each_perag_wrap_range(mp, start_agno, restart_agno,
3525 mp->m_sb.sb_agcount, agno, args->pag) {
3527 error = xfs_alloc_vextent_prepare_ag(args, alloc_flags);
3531 trace_xfs_alloc_vextent_loopfailed(args);
3536 * Allocation is supposed to succeed now, so break out of the
3537 * loop regardless of whether we succeed or not.
3539 if (args->agno == start_agno && target_agbno) {
3540 args->agbno = target_agbno;
3541 error = xfs_alloc_ag_vextent_near(args, alloc_flags);
3544 error = xfs_alloc_ag_vextent_size(args, alloc_flags);
3549 xfs_perag_rele(args->pag);
3557 * We didn't find an AG we can alloation from. If we were given
3558 * constraining flags by the caller, drop them and retry the allocation
3559 * without any constraints being set.
3561 if (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK) {
3562 alloc_flags &= ~XFS_ALLOC_FLAG_TRYLOCK;
3563 restart_agno = minimum_agno;
3567 ASSERT(args->pag == NULL);
3568 trace_xfs_alloc_vextent_allfailed(args);
3573 * Iterate from the AGs from the start AG to the end of the filesystem, trying
3574 * to allocate blocks. It starts with a near allocation attempt in the initial
3575 * AG, then falls back to anywhere-in-ag after the first AG fails. It will wrap
3576 * back to zero if allowed by previous allocations in this transaction,
3577 * otherwise will wrap back to the start AG and run a second blocking pass to
3578 * the end of the filesystem.
3581 xfs_alloc_vextent_start_ag(
3582 struct xfs_alloc_arg *args,
3583 xfs_fsblock_t target)
3585 struct xfs_mount *mp = args->mp;
3586 xfs_agnumber_t minimum_agno;
3587 xfs_agnumber_t start_agno;
3588 xfs_agnumber_t rotorstep = xfs_rotorstep;
3589 bool bump_rotor = false;
3590 uint32_t alloc_flags = XFS_ALLOC_FLAG_TRYLOCK;
3593 ASSERT(args->pag == NULL);
3595 args->agno = NULLAGNUMBER;
3596 args->agbno = NULLAGBLOCK;
3598 trace_xfs_alloc_vextent_start_ag(args);
3600 error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3602 if (error == -ENOSPC)
3607 if ((args->datatype & XFS_ALLOC_INITIAL_USER_DATA) &&
3608 xfs_is_inode32(mp)) {
3609 target = XFS_AGB_TO_FSB(mp,
3610 ((mp->m_agfrotor / rotorstep) %
3611 mp->m_sb.sb_agcount), 0);
3615 start_agno = max(minimum_agno, XFS_FSB_TO_AGNO(mp, target));
3616 error = xfs_alloc_vextent_iterate_ags(args, minimum_agno, start_agno,
3617 XFS_FSB_TO_AGBNO(mp, target), alloc_flags);
3620 if (args->agno == start_agno)
3621 mp->m_agfrotor = (mp->m_agfrotor + 1) %
3622 (mp->m_sb.sb_agcount * rotorstep);
3624 mp->m_agfrotor = (args->agno * rotorstep + 1) %
3625 (mp->m_sb.sb_agcount * rotorstep);
3628 return xfs_alloc_vextent_finish(args, minimum_agno, error, true);
3632 * Iterate from the agno indicated via @target through to the end of the
3633 * filesystem attempting blocking allocation. This does not wrap or try a second
3634 * pass, so will not recurse into AGs lower than indicated by the target.
3637 xfs_alloc_vextent_first_ag(
3638 struct xfs_alloc_arg *args,
3639 xfs_fsblock_t target)
3641 struct xfs_mount *mp = args->mp;
3642 xfs_agnumber_t minimum_agno;
3643 xfs_agnumber_t start_agno;
3644 uint32_t alloc_flags = XFS_ALLOC_FLAG_TRYLOCK;
3647 ASSERT(args->pag == NULL);
3649 args->agno = NULLAGNUMBER;
3650 args->agbno = NULLAGBLOCK;
3652 trace_xfs_alloc_vextent_first_ag(args);
3654 error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3656 if (error == -ENOSPC)
3661 start_agno = max(minimum_agno, XFS_FSB_TO_AGNO(mp, target));
3662 error = xfs_alloc_vextent_iterate_ags(args, minimum_agno, start_agno,
3663 XFS_FSB_TO_AGBNO(mp, target), alloc_flags);
3664 return xfs_alloc_vextent_finish(args, minimum_agno, error, true);
3668 * Allocate at the exact block target or fail. Caller is expected to hold a
3669 * perag reference in args->pag.
3672 xfs_alloc_vextent_exact_bno(
3673 struct xfs_alloc_arg *args,
3674 xfs_fsblock_t target)
3676 struct xfs_mount *mp = args->mp;
3677 xfs_agnumber_t minimum_agno;
3680 ASSERT(args->pag != NULL);
3681 ASSERT(args->pag->pag_agno == XFS_FSB_TO_AGNO(mp, target));
3683 args->agno = XFS_FSB_TO_AGNO(mp, target);
3684 args->agbno = XFS_FSB_TO_AGBNO(mp, target);
3686 trace_xfs_alloc_vextent_exact_bno(args);
3688 error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3690 if (error == -ENOSPC)
3695 error = xfs_alloc_vextent_prepare_ag(args, 0);
3696 if (!error && args->agbp)
3697 error = xfs_alloc_ag_vextent_exact(args);
3699 return xfs_alloc_vextent_finish(args, minimum_agno, error, false);
3703 * Allocate an extent as close to the target as possible. If there are not
3704 * viable candidates in the AG, then fail the allocation.
3706 * Caller may or may not have a per-ag reference in args->pag.
3709 xfs_alloc_vextent_near_bno(
3710 struct xfs_alloc_arg *args,
3711 xfs_fsblock_t target)
3713 struct xfs_mount *mp = args->mp;
3714 xfs_agnumber_t minimum_agno;
3715 bool needs_perag = args->pag == NULL;
3716 uint32_t alloc_flags = 0;
3720 ASSERT(args->pag->pag_agno == XFS_FSB_TO_AGNO(mp, target));
3722 args->agno = XFS_FSB_TO_AGNO(mp, target);
3723 args->agbno = XFS_FSB_TO_AGBNO(mp, target);
3725 trace_xfs_alloc_vextent_near_bno(args);
3727 error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3729 if (error == -ENOSPC)
3735 args->pag = xfs_perag_grab(mp, args->agno);
3737 error = xfs_alloc_vextent_prepare_ag(args, alloc_flags);
3738 if (!error && args->agbp)
3739 error = xfs_alloc_ag_vextent_near(args, alloc_flags);
3741 return xfs_alloc_vextent_finish(args, minimum_agno, error, needs_perag);
3744 /* Ensure that the freelist is at full capacity. */
3746 xfs_free_extent_fix_freelist(
3747 struct xfs_trans *tp,
3748 struct xfs_perag *pag,
3749 struct xfs_buf **agbp)
3751 struct xfs_alloc_arg args;
3754 memset(&args, 0, sizeof(struct xfs_alloc_arg));
3756 args.mp = tp->t_mountp;
3757 args.agno = pag->pag_agno;
3761 * validate that the block number is legal - the enables us to detect
3762 * and handle a silent filesystem corruption rather than crashing.
3764 if (args.agno >= args.mp->m_sb.sb_agcount)
3765 return -EFSCORRUPTED;
3767 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
3777 * Just break up the extent address and hand off to xfs_free_ag_extent
3778 * after fixing up the freelist.
3782 struct xfs_trans *tp,
3783 struct xfs_perag *pag,
3784 xfs_agblock_t agbno,
3786 const struct xfs_owner_info *oinfo,
3787 enum xfs_ag_resv_type type,
3790 struct xfs_mount *mp = tp->t_mountp;
3791 struct xfs_buf *agbp;
3792 struct xfs_agf *agf;
3794 unsigned int busy_flags = 0;
3797 ASSERT(type != XFS_AG_RESV_AGFL);
3799 if (XFS_TEST_ERROR(false, mp,
3800 XFS_ERRTAG_FREE_EXTENT))
3803 error = xfs_free_extent_fix_freelist(tp, pag, &agbp);
3808 if (XFS_IS_CORRUPT(mp, agbno >= mp->m_sb.sb_agblocks)) {
3809 error = -EFSCORRUPTED;
3813 /* validate the extent size is legal now we have the agf locked */
3814 if (XFS_IS_CORRUPT(mp, agbno + len > be32_to_cpu(agf->agf_length))) {
3815 error = -EFSCORRUPTED;
3819 error = xfs_free_ag_extent(tp, agbp, pag->pag_agno, agbno, len, oinfo,
3825 busy_flags |= XFS_EXTENT_BUSY_SKIP_DISCARD;
3826 xfs_extent_busy_insert(tp, pag, agbno, len, busy_flags);
3830 xfs_trans_brelse(tp, agbp);
3834 struct xfs_alloc_query_range_info {
3835 xfs_alloc_query_range_fn fn;
3839 /* Format btree record and pass to our callback. */
3841 xfs_alloc_query_range_helper(
3842 struct xfs_btree_cur *cur,
3843 const union xfs_btree_rec *rec,
3846 struct xfs_alloc_query_range_info *query = priv;
3847 struct xfs_alloc_rec_incore irec;
3850 xfs_alloc_btrec_to_irec(rec, &irec);
3851 fa = xfs_alloc_check_irec(cur, &irec);
3853 return xfs_alloc_complain_bad_rec(cur, fa, &irec);
3855 return query->fn(cur, &irec, query->priv);
3858 /* Find all free space within a given range of blocks. */
3860 xfs_alloc_query_range(
3861 struct xfs_btree_cur *cur,
3862 const struct xfs_alloc_rec_incore *low_rec,
3863 const struct xfs_alloc_rec_incore *high_rec,
3864 xfs_alloc_query_range_fn fn,
3867 union xfs_btree_irec low_brec = { .a = *low_rec };
3868 union xfs_btree_irec high_brec = { .a = *high_rec };
3869 struct xfs_alloc_query_range_info query = { .priv = priv, .fn = fn };
3871 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3872 return xfs_btree_query_range(cur, &low_brec, &high_brec,
3873 xfs_alloc_query_range_helper, &query);
3876 /* Find all free space records. */
3878 xfs_alloc_query_all(
3879 struct xfs_btree_cur *cur,
3880 xfs_alloc_query_range_fn fn,
3883 struct xfs_alloc_query_range_info query;
3885 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3888 return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
3892 * Scan part of the keyspace of the free space and tell us if the area has no
3893 * records, is fully mapped by records, or is partially filled.
3896 xfs_alloc_has_records(
3897 struct xfs_btree_cur *cur,
3900 enum xbtree_recpacking *outcome)
3902 union xfs_btree_irec low;
3903 union xfs_btree_irec high;
3905 memset(&low, 0, sizeof(low));
3906 low.a.ar_startblock = bno;
3907 memset(&high, 0xFF, sizeof(high));
3908 high.a.ar_startblock = bno + len - 1;
3910 return xfs_btree_has_records(cur, &low, &high, NULL, outcome);
3914 * Walk all the blocks in the AGFL. The @walk_fn can return any negative
3915 * error code or XFS_ITER_*.
3919 struct xfs_mount *mp,
3920 struct xfs_agf *agf,
3921 struct xfs_buf *agflbp,
3922 xfs_agfl_walk_fn walk_fn,
3929 agfl_bno = xfs_buf_to_agfl_bno(agflbp);
3930 i = be32_to_cpu(agf->agf_flfirst);
3932 /* Nothing to walk in an empty AGFL. */
3933 if (agf->agf_flcount == cpu_to_be32(0))
3936 /* Otherwise, walk from first to last, wrapping as needed. */
3938 error = walk_fn(mp, be32_to_cpu(agfl_bno[i]), priv);
3941 if (i == be32_to_cpu(agf->agf_fllast))
3943 if (++i == xfs_agfl_size(mp))
3951 xfs_extfree_intent_init_cache(void)
3953 xfs_extfree_item_cache = kmem_cache_create("xfs_extfree_intent",
3954 sizeof(struct xfs_extent_free_item),
3957 return xfs_extfree_item_cache != NULL ? 0 : -ENOMEM;
3961 xfs_extfree_intent_destroy_cache(void)
3963 kmem_cache_destroy(xfs_extfree_item_cache);
3964 xfs_extfree_item_cache = NULL;