1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
16 #include "xfs_ialloc.h"
17 #include "xfs_ialloc_btree.h"
18 #include "xfs_alloc.h"
19 #include "xfs_errortag.h"
20 #include "xfs_error.h"
22 #include "xfs_trans.h"
23 #include "xfs_buf_item.h"
24 #include "xfs_icreate_item.h"
25 #include "xfs_icache.h"
26 #include "xfs_trace.h"
32 * Lookup a record by ino in the btree given by cur.
36 struct xfs_btree_cur *cur, /* btree cursor */
37 xfs_agino_t ino, /* starting inode of chunk */
38 xfs_lookup_t dir, /* <=, >=, == */
39 int *stat) /* success/failure */
41 cur->bc_rec.i.ir_startino = ino;
42 cur->bc_rec.i.ir_holemask = 0;
43 cur->bc_rec.i.ir_count = 0;
44 cur->bc_rec.i.ir_freecount = 0;
45 cur->bc_rec.i.ir_free = 0;
46 return xfs_btree_lookup(cur, dir, stat);
50 * Update the record referred to by cur to the value given.
51 * This either works (return 0) or gets an EFSCORRUPTED error.
53 STATIC int /* error */
55 struct xfs_btree_cur *cur, /* btree cursor */
56 xfs_inobt_rec_incore_t *irec) /* btree record */
58 union xfs_btree_rec rec;
60 rec.inobt.ir_startino = cpu_to_be32(irec->ir_startino);
61 if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) {
62 rec.inobt.ir_u.sp.ir_holemask = cpu_to_be16(irec->ir_holemask);
63 rec.inobt.ir_u.sp.ir_count = irec->ir_count;
64 rec.inobt.ir_u.sp.ir_freecount = irec->ir_freecount;
66 /* ir_holemask/ir_count not supported on-disk */
67 rec.inobt.ir_u.f.ir_freecount = cpu_to_be32(irec->ir_freecount);
69 rec.inobt.ir_free = cpu_to_be64(irec->ir_free);
70 return xfs_btree_update(cur, &rec);
73 /* Convert on-disk btree record to incore inobt record. */
75 xfs_inobt_btrec_to_irec(
77 union xfs_btree_rec *rec,
78 struct xfs_inobt_rec_incore *irec)
80 irec->ir_startino = be32_to_cpu(rec->inobt.ir_startino);
81 if (xfs_sb_version_hassparseinodes(&mp->m_sb)) {
82 irec->ir_holemask = be16_to_cpu(rec->inobt.ir_u.sp.ir_holemask);
83 irec->ir_count = rec->inobt.ir_u.sp.ir_count;
84 irec->ir_freecount = rec->inobt.ir_u.sp.ir_freecount;
87 * ir_holemask/ir_count not supported on-disk. Fill in hardcoded
88 * values for full inode chunks.
90 irec->ir_holemask = XFS_INOBT_HOLEMASK_FULL;
91 irec->ir_count = XFS_INODES_PER_CHUNK;
93 be32_to_cpu(rec->inobt.ir_u.f.ir_freecount);
95 irec->ir_free = be64_to_cpu(rec->inobt.ir_free);
99 * Get the data from the pointed-to record.
103 struct xfs_btree_cur *cur,
104 struct xfs_inobt_rec_incore *irec,
107 struct xfs_mount *mp = cur->bc_mp;
108 xfs_agnumber_t agno = cur->bc_ag.pag->pag_agno;
109 union xfs_btree_rec *rec;
113 error = xfs_btree_get_rec(cur, &rec, stat);
114 if (error || *stat == 0)
117 xfs_inobt_btrec_to_irec(mp, rec, irec);
119 if (!xfs_verify_agino(mp, agno, irec->ir_startino))
121 if (irec->ir_count < XFS_INODES_PER_HOLEMASK_BIT ||
122 irec->ir_count > XFS_INODES_PER_CHUNK)
124 if (irec->ir_freecount > XFS_INODES_PER_CHUNK)
127 /* if there are no holes, return the first available offset */
128 if (!xfs_inobt_issparse(irec->ir_holemask))
129 realfree = irec->ir_free;
131 realfree = irec->ir_free & xfs_inobt_irec_to_allocmask(irec);
132 if (hweight64(realfree) != irec->ir_freecount)
139 "%s Inode BTree record corruption in AG %d detected!",
140 cur->bc_btnum == XFS_BTNUM_INO ? "Used" : "Free", agno);
142 "start inode 0x%x, count 0x%x, free 0x%x freemask 0x%llx, holemask 0x%x",
143 irec->ir_startino, irec->ir_count, irec->ir_freecount,
144 irec->ir_free, irec->ir_holemask);
145 return -EFSCORRUPTED;
149 * Insert a single inobt record. Cursor must already point to desired location.
152 xfs_inobt_insert_rec(
153 struct xfs_btree_cur *cur,
160 cur->bc_rec.i.ir_holemask = holemask;
161 cur->bc_rec.i.ir_count = count;
162 cur->bc_rec.i.ir_freecount = freecount;
163 cur->bc_rec.i.ir_free = free;
164 return xfs_btree_insert(cur, stat);
168 * Insert records describing a newly allocated inode chunk into the inobt.
172 struct xfs_mount *mp,
173 struct xfs_trans *tp,
174 struct xfs_buf *agbp,
175 struct xfs_perag *pag,
180 struct xfs_btree_cur *cur;
185 cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, btnum);
187 for (thisino = newino;
188 thisino < newino + newlen;
189 thisino += XFS_INODES_PER_CHUNK) {
190 error = xfs_inobt_lookup(cur, thisino, XFS_LOOKUP_EQ, &i);
192 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
197 error = xfs_inobt_insert_rec(cur, XFS_INOBT_HOLEMASK_FULL,
198 XFS_INODES_PER_CHUNK,
199 XFS_INODES_PER_CHUNK,
200 XFS_INOBT_ALL_FREE, &i);
202 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
208 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
214 * Verify that the number of free inodes in the AGI is correct.
218 xfs_check_agi_freecount(
219 struct xfs_btree_cur *cur)
221 if (cur->bc_nlevels == 1) {
222 xfs_inobt_rec_incore_t rec;
227 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
232 error = xfs_inobt_get_rec(cur, &rec, &i);
237 freecount += rec.ir_freecount;
238 error = xfs_btree_increment(cur, 0, &i);
244 if (!XFS_FORCED_SHUTDOWN(cur->bc_mp))
245 ASSERT(freecount == cur->bc_ag.pag->pagi_freecount);
250 #define xfs_check_agi_freecount(cur) 0
254 * Initialise a new set of inodes. When called without a transaction context
255 * (e.g. from recovery) we initiate a delayed write of the inode buffers rather
256 * than logging them (which in a transaction context puts them into the AIL
257 * for writeback rather than the xfsbufd queue).
260 xfs_ialloc_inode_init(
261 struct xfs_mount *mp,
262 struct xfs_trans *tp,
263 struct list_head *buffer_list,
267 xfs_agblock_t length,
270 struct xfs_buf *fbuf;
271 struct xfs_dinode *free;
280 * Loop over the new block(s), filling in the inodes. For small block
281 * sizes, manipulate the inodes in buffers which are multiples of the
284 nbufs = length / M_IGEO(mp)->blocks_per_cluster;
287 * Figure out what version number to use in the inodes we create. If
288 * the superblock version has caught up to the one that supports the new
289 * inode format, then use the new inode version. Otherwise use the old
290 * version so that old kernels will continue to be able to use the file
293 * For v3 inodes, we also need to write the inode number into the inode,
294 * so calculate the first inode number of the chunk here as
295 * XFS_AGB_TO_AGINO() only works within a filesystem block, not
296 * across multiple filesystem blocks (such as a cluster) and so cannot
297 * be used in the cluster buffer loop below.
299 * Further, because we are writing the inode directly into the buffer
300 * and calculating a CRC on the entire inode, we have ot log the entire
301 * inode so that the entire range the CRC covers is present in the log.
302 * That means for v3 inode we log the entire buffer rather than just the
305 if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
307 ino = XFS_AGINO_TO_INO(mp, agno, XFS_AGB_TO_AGINO(mp, agbno));
310 * log the initialisation that is about to take place as an
311 * logical operation. This means the transaction does not
312 * need to log the physical changes to the inode buffers as log
313 * recovery will know what initialisation is actually needed.
314 * Hence we only need to log the buffers as "ordered" buffers so
315 * they track in the AIL as if they were physically logged.
318 xfs_icreate_log(tp, agno, agbno, icount,
319 mp->m_sb.sb_inodesize, length, gen);
323 for (j = 0; j < nbufs; j++) {
327 d = XFS_AGB_TO_DADDR(mp, agno, agbno +
328 (j * M_IGEO(mp)->blocks_per_cluster));
329 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
330 mp->m_bsize * M_IGEO(mp)->blocks_per_cluster,
331 XBF_UNMAPPED, &fbuf);
335 /* Initialize the inode buffers and log them appropriately. */
336 fbuf->b_ops = &xfs_inode_buf_ops;
337 xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length));
338 for (i = 0; i < M_IGEO(mp)->inodes_per_cluster; i++) {
339 int ioffset = i << mp->m_sb.sb_inodelog;
340 uint isize = XFS_DINODE_SIZE(&mp->m_sb);
342 free = xfs_make_iptr(mp, fbuf, i);
343 free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
344 free->di_version = version;
345 free->di_gen = cpu_to_be32(gen);
346 free->di_next_unlinked = cpu_to_be32(NULLAGINO);
349 free->di_ino = cpu_to_be64(ino);
351 uuid_copy(&free->di_uuid,
352 &mp->m_sb.sb_meta_uuid);
353 xfs_dinode_calc_crc(mp, free);
355 /* just log the inode core */
356 xfs_trans_log_buf(tp, fbuf, ioffset,
357 ioffset + isize - 1);
363 * Mark the buffer as an inode allocation buffer so it
364 * sticks in AIL at the point of this allocation
365 * transaction. This ensures the they are on disk before
366 * the tail of the log can be moved past this
367 * transaction (i.e. by preventing relogging from moving
368 * it forward in the log).
370 xfs_trans_inode_alloc_buf(tp, fbuf);
373 * Mark the buffer as ordered so that they are
374 * not physically logged in the transaction but
375 * still tracked in the AIL as part of the
376 * transaction and pin the log appropriately.
378 xfs_trans_ordered_buf(tp, fbuf);
381 fbuf->b_flags |= XBF_DONE;
382 xfs_buf_delwri_queue(fbuf, buffer_list);
390 * Align startino and allocmask for a recently allocated sparse chunk such that
391 * they are fit for insertion (or merge) into the on-disk inode btrees.
395 * When enabled, sparse inode support increases the inode alignment from cluster
396 * size to inode chunk size. This means that the minimum range between two
397 * non-adjacent inode records in the inobt is large enough for a full inode
398 * record. This allows for cluster sized, cluster aligned block allocation
399 * without need to worry about whether the resulting inode record overlaps with
400 * another record in the tree. Without this basic rule, we would have to deal
401 * with the consequences of overlap by potentially undoing recent allocations in
402 * the inode allocation codepath.
404 * Because of this alignment rule (which is enforced on mount), there are two
405 * inobt possibilities for newly allocated sparse chunks. One is that the
406 * aligned inode record for the chunk covers a range of inodes not already
407 * covered in the inobt (i.e., it is safe to insert a new sparse record). The
408 * other is that a record already exists at the aligned startino that considers
409 * the newly allocated range as sparse. In the latter case, record content is
410 * merged in hope that sparse inode chunks fill to full chunks over time.
413 xfs_align_sparse_ino(
414 struct xfs_mount *mp,
415 xfs_agino_t *startino,
422 agbno = XFS_AGINO_TO_AGBNO(mp, *startino);
423 mod = agbno % mp->m_sb.sb_inoalignmt;
427 /* calculate the inode offset and align startino */
428 offset = XFS_AGB_TO_AGINO(mp, mod);
432 * Since startino has been aligned down, left shift allocmask such that
433 * it continues to represent the same physical inodes relative to the
436 *allocmask <<= offset / XFS_INODES_PER_HOLEMASK_BIT;
440 * Determine whether the source inode record can merge into the target. Both
441 * records must be sparse, the inode ranges must match and there must be no
442 * allocation overlap between the records.
445 __xfs_inobt_can_merge(
446 struct xfs_inobt_rec_incore *trec, /* tgt record */
447 struct xfs_inobt_rec_incore *srec) /* src record */
452 /* records must cover the same inode range */
453 if (trec->ir_startino != srec->ir_startino)
456 /* both records must be sparse */
457 if (!xfs_inobt_issparse(trec->ir_holemask) ||
458 !xfs_inobt_issparse(srec->ir_holemask))
461 /* both records must track some inodes */
462 if (!trec->ir_count || !srec->ir_count)
465 /* can't exceed capacity of a full record */
466 if (trec->ir_count + srec->ir_count > XFS_INODES_PER_CHUNK)
469 /* verify there is no allocation overlap */
470 talloc = xfs_inobt_irec_to_allocmask(trec);
471 salloc = xfs_inobt_irec_to_allocmask(srec);
479 * Merge the source inode record into the target. The caller must call
480 * __xfs_inobt_can_merge() to ensure the merge is valid.
483 __xfs_inobt_rec_merge(
484 struct xfs_inobt_rec_incore *trec, /* target */
485 struct xfs_inobt_rec_incore *srec) /* src */
487 ASSERT(trec->ir_startino == srec->ir_startino);
489 /* combine the counts */
490 trec->ir_count += srec->ir_count;
491 trec->ir_freecount += srec->ir_freecount;
494 * Merge the holemask and free mask. For both fields, 0 bits refer to
495 * allocated inodes. We combine the allocated ranges with bitwise AND.
497 trec->ir_holemask &= srec->ir_holemask;
498 trec->ir_free &= srec->ir_free;
502 * Insert a new sparse inode chunk into the associated inode btree. The inode
503 * record for the sparse chunk is pre-aligned to a startino that should match
504 * any pre-existing sparse inode record in the tree. This allows sparse chunks
507 * This function supports two modes of handling preexisting records depending on
508 * the merge flag. If merge is true, the provided record is merged with the
509 * existing record and updated in place. The merged record is returned in nrec.
510 * If merge is false, an existing record is replaced with the provided record.
511 * If no preexisting record exists, the provided record is always inserted.
513 * It is considered corruption if a merge is requested and not possible. Given
514 * the sparse inode alignment constraints, this should never happen.
517 xfs_inobt_insert_sprec(
518 struct xfs_mount *mp,
519 struct xfs_trans *tp,
520 struct xfs_buf *agbp,
521 struct xfs_perag *pag,
523 struct xfs_inobt_rec_incore *nrec, /* in/out: new/merged rec. */
524 bool merge) /* merge or replace */
526 struct xfs_btree_cur *cur;
529 struct xfs_inobt_rec_incore rec;
531 cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, btnum);
533 /* the new record is pre-aligned so we know where to look */
534 error = xfs_inobt_lookup(cur, nrec->ir_startino, XFS_LOOKUP_EQ, &i);
537 /* if nothing there, insert a new record and return */
539 error = xfs_inobt_insert_rec(cur, nrec->ir_holemask,
540 nrec->ir_count, nrec->ir_freecount,
544 if (XFS_IS_CORRUPT(mp, i != 1)) {
545 error = -EFSCORRUPTED;
553 * A record exists at this startino. Merge or replace the record
554 * depending on what we've been asked to do.
557 error = xfs_inobt_get_rec(cur, &rec, &i);
560 if (XFS_IS_CORRUPT(mp, i != 1)) {
561 error = -EFSCORRUPTED;
564 if (XFS_IS_CORRUPT(mp, rec.ir_startino != nrec->ir_startino)) {
565 error = -EFSCORRUPTED;
570 * This should never fail. If we have coexisting records that
571 * cannot merge, something is seriously wrong.
573 if (XFS_IS_CORRUPT(mp, !__xfs_inobt_can_merge(nrec, &rec))) {
574 error = -EFSCORRUPTED;
578 trace_xfs_irec_merge_pre(mp, pag->pag_agno, rec.ir_startino,
579 rec.ir_holemask, nrec->ir_startino,
582 /* merge to nrec to output the updated record */
583 __xfs_inobt_rec_merge(nrec, &rec);
585 trace_xfs_irec_merge_post(mp, pag->pag_agno, nrec->ir_startino,
588 error = xfs_inobt_rec_check_count(mp, nrec);
593 error = xfs_inobt_update(cur, nrec);
598 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
601 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
606 * Allocate new inodes in the allocation group specified by agbp. Returns 0 if
607 * inodes were allocated in this AG; -EAGAIN if there was no space in this AG so
608 * the caller knows it can try another AG, a hard -ENOSPC when over the maximum
609 * inode count threshold, or the usual negative error code for other errors.
613 struct xfs_trans *tp,
614 struct xfs_buf *agbp,
615 struct xfs_perag *pag)
618 struct xfs_alloc_arg args;
620 xfs_agino_t newino; /* new first inode's number */
621 xfs_agino_t newlen; /* new number of inodes */
622 int isaligned = 0; /* inode allocation at stripe */
624 /* init. to full chunk */
625 struct xfs_inobt_rec_incore rec;
626 struct xfs_ino_geometry *igeo = M_IGEO(tp->t_mountp);
627 uint16_t allocmask = (uint16_t) -1;
630 memset(&args, 0, sizeof(args));
632 args.mp = tp->t_mountp;
633 args.fsbno = NULLFSBLOCK;
634 args.oinfo = XFS_RMAP_OINFO_INODES;
637 /* randomly do sparse inode allocations */
638 if (xfs_sb_version_hassparseinodes(&tp->t_mountp->m_sb) &&
639 igeo->ialloc_min_blks < igeo->ialloc_blks)
640 do_sparse = prandom_u32() & 1;
644 * Locking will ensure that we don't have two callers in here
647 newlen = igeo->ialloc_inos;
648 if (igeo->maxicount &&
649 percpu_counter_read_positive(&args.mp->m_icount) + newlen >
652 args.minlen = args.maxlen = igeo->ialloc_blks;
654 * First try to allocate inodes contiguous with the last-allocated
655 * chunk of inodes. If the filesystem is striped, this will fill
656 * an entire stripe unit with inodes.
659 newino = be32_to_cpu(agi->agi_newino);
660 args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
664 if (likely(newino != NULLAGINO &&
665 (args.agbno < be32_to_cpu(agi->agi_length)))) {
666 args.fsbno = XFS_AGB_TO_FSB(args.mp, pag->pag_agno, args.agbno);
667 args.type = XFS_ALLOCTYPE_THIS_BNO;
671 * We need to take into account alignment here to ensure that
672 * we don't modify the free list if we fail to have an exact
673 * block. If we don't have an exact match, and every oher
674 * attempt allocation attempt fails, we'll end up cancelling
675 * a dirty transaction and shutting down.
677 * For an exact allocation, alignment must be 1,
678 * however we need to take cluster alignment into account when
679 * fixing up the freelist. Use the minalignslop field to
680 * indicate that extra blocks might be required for alignment,
681 * but not to use them in the actual exact allocation.
684 args.minalignslop = igeo->cluster_align - 1;
686 /* Allow space for the inode btree to split. */
687 args.minleft = igeo->inobt_maxlevels;
688 if ((error = xfs_alloc_vextent(&args)))
692 * This request might have dirtied the transaction if the AG can
693 * satisfy the request, but the exact block was not available.
694 * If the allocation did fail, subsequent requests will relax
695 * the exact agbno requirement and increase the alignment
696 * instead. It is critical that the total size of the request
697 * (len + alignment + slop) does not increase from this point
698 * on, so reset minalignslop to ensure it is not included in
699 * subsequent requests.
701 args.minalignslop = 0;
704 if (unlikely(args.fsbno == NULLFSBLOCK)) {
706 * Set the alignment for the allocation.
707 * If stripe alignment is turned on then align at stripe unit
709 * If the cluster size is smaller than a filesystem block
710 * then we're doing I/O for inodes in filesystem block size
711 * pieces, so don't need alignment anyway.
714 if (igeo->ialloc_align) {
715 ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN));
716 args.alignment = args.mp->m_dalign;
719 args.alignment = igeo->cluster_align;
721 * Need to figure out where to allocate the inode blocks.
722 * Ideally they should be spaced out through the a.g.
723 * For now, just allocate blocks up front.
725 args.agbno = be32_to_cpu(agi->agi_root);
726 args.fsbno = XFS_AGB_TO_FSB(args.mp, pag->pag_agno, args.agbno);
728 * Allocate a fixed-size extent of inodes.
730 args.type = XFS_ALLOCTYPE_NEAR_BNO;
733 * Allow space for the inode btree to split.
735 args.minleft = igeo->inobt_maxlevels;
736 if ((error = xfs_alloc_vextent(&args)))
741 * If stripe alignment is turned on, then try again with cluster
744 if (isaligned && args.fsbno == NULLFSBLOCK) {
745 args.type = XFS_ALLOCTYPE_NEAR_BNO;
746 args.agbno = be32_to_cpu(agi->agi_root);
747 args.fsbno = XFS_AGB_TO_FSB(args.mp, pag->pag_agno, args.agbno);
748 args.alignment = igeo->cluster_align;
749 if ((error = xfs_alloc_vextent(&args)))
754 * Finally, try a sparse allocation if the filesystem supports it and
755 * the sparse allocation length is smaller than a full chunk.
757 if (xfs_sb_version_hassparseinodes(&args.mp->m_sb) &&
758 igeo->ialloc_min_blks < igeo->ialloc_blks &&
759 args.fsbno == NULLFSBLOCK) {
761 args.type = XFS_ALLOCTYPE_NEAR_BNO;
762 args.agbno = be32_to_cpu(agi->agi_root);
763 args.fsbno = XFS_AGB_TO_FSB(args.mp, pag->pag_agno, args.agbno);
764 args.alignment = args.mp->m_sb.sb_spino_align;
767 args.minlen = igeo->ialloc_min_blks;
768 args.maxlen = args.minlen;
771 * The inode record will be aligned to full chunk size. We must
772 * prevent sparse allocation from AG boundaries that result in
773 * invalid inode records, such as records that start at agbno 0
774 * or extend beyond the AG.
776 * Set min agbno to the first aligned, non-zero agbno and max to
777 * the last aligned agbno that is at least one full chunk from
780 args.min_agbno = args.mp->m_sb.sb_inoalignmt;
781 args.max_agbno = round_down(args.mp->m_sb.sb_agblocks,
782 args.mp->m_sb.sb_inoalignmt) -
785 error = xfs_alloc_vextent(&args);
789 newlen = XFS_AGB_TO_AGINO(args.mp, args.len);
790 ASSERT(newlen <= XFS_INODES_PER_CHUNK);
791 allocmask = (1 << (newlen / XFS_INODES_PER_HOLEMASK_BIT)) - 1;
794 if (args.fsbno == NULLFSBLOCK)
797 ASSERT(args.len == args.minlen);
800 * Stamp and write the inode buffers.
802 * Seed the new inode cluster with a random generation number. This
803 * prevents short-term reuse of generation numbers if a chunk is
804 * freed and then immediately reallocated. We use random numbers
805 * rather than a linear progression to prevent the next generation
806 * number from being easily guessable.
808 error = xfs_ialloc_inode_init(args.mp, tp, NULL, newlen, pag->pag_agno,
809 args.agbno, args.len, prandom_u32());
814 * Convert the results.
816 newino = XFS_AGB_TO_AGINO(args.mp, args.agbno);
818 if (xfs_inobt_issparse(~allocmask)) {
820 * We've allocated a sparse chunk. Align the startino and mask.
822 xfs_align_sparse_ino(args.mp, &newino, &allocmask);
824 rec.ir_startino = newino;
825 rec.ir_holemask = ~allocmask;
826 rec.ir_count = newlen;
827 rec.ir_freecount = newlen;
828 rec.ir_free = XFS_INOBT_ALL_FREE;
831 * Insert the sparse record into the inobt and allow for a merge
832 * if necessary. If a merge does occur, rec is updated to the
835 error = xfs_inobt_insert_sprec(args.mp, tp, agbp, pag,
836 XFS_BTNUM_INO, &rec, true);
837 if (error == -EFSCORRUPTED) {
839 "invalid sparse inode record: ino 0x%llx holemask 0x%x count %u",
840 XFS_AGINO_TO_INO(args.mp, pag->pag_agno,
842 rec.ir_holemask, rec.ir_count);
843 xfs_force_shutdown(args.mp, SHUTDOWN_CORRUPT_INCORE);
849 * We can't merge the part we've just allocated as for the inobt
850 * due to finobt semantics. The original record may or may not
851 * exist independent of whether physical inodes exist in this
854 * We must update the finobt record based on the inobt record.
855 * rec contains the fully merged and up to date inobt record
856 * from the previous call. Set merge false to replace any
857 * existing record with this one.
859 if (xfs_sb_version_hasfinobt(&args.mp->m_sb)) {
860 error = xfs_inobt_insert_sprec(args.mp, tp, agbp, pag,
861 XFS_BTNUM_FINO, &rec, false);
866 /* full chunk - insert new records to both btrees */
867 error = xfs_inobt_insert(args.mp, tp, agbp, pag, newino, newlen,
872 if (xfs_sb_version_hasfinobt(&args.mp->m_sb)) {
873 error = xfs_inobt_insert(args.mp, tp, agbp, pag, newino,
874 newlen, XFS_BTNUM_FINO);
881 * Update AGI counts and newino.
883 be32_add_cpu(&agi->agi_count, newlen);
884 be32_add_cpu(&agi->agi_freecount, newlen);
885 pag->pagi_freecount += newlen;
886 pag->pagi_count += newlen;
887 agi->agi_newino = cpu_to_be32(newino);
890 * Log allocation group header fields
892 xfs_ialloc_log_agi(tp, agbp,
893 XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO);
895 * Modify/log superblock values for inode count and inode free count.
897 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen);
898 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen);
903 * Try to retrieve the next record to the left/right from the current one.
907 struct xfs_btree_cur *cur,
908 xfs_inobt_rec_incore_t *rec,
916 error = xfs_btree_decrement(cur, 0, &i);
918 error = xfs_btree_increment(cur, 0, &i);
924 error = xfs_inobt_get_rec(cur, rec, &i);
927 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
928 return -EFSCORRUPTED;
936 struct xfs_btree_cur *cur,
938 xfs_inobt_rec_incore_t *rec,
944 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_EQ, &i);
949 error = xfs_inobt_get_rec(cur, rec, &i);
952 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
953 return -EFSCORRUPTED;
960 * Return the offset of the first free inode in the record. If the inode chunk
961 * is sparsely allocated, we convert the record holemask to inode granularity
962 * and mask off the unallocated regions from the inode free mask.
965 xfs_inobt_first_free_inode(
966 struct xfs_inobt_rec_incore *rec)
968 xfs_inofree_t realfree;
970 /* if there are no holes, return the first available offset */
971 if (!xfs_inobt_issparse(rec->ir_holemask))
972 return xfs_lowbit64(rec->ir_free);
974 realfree = xfs_inobt_irec_to_allocmask(rec);
975 realfree &= rec->ir_free;
977 return xfs_lowbit64(realfree);
981 * Allocate an inode using the inobt-only algorithm.
984 xfs_dialloc_ag_inobt(
985 struct xfs_trans *tp,
986 struct xfs_buf *agbp,
987 struct xfs_perag *pag,
991 struct xfs_mount *mp = tp->t_mountp;
992 struct xfs_agi *agi = agbp->b_addr;
993 xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent);
994 xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent);
995 struct xfs_btree_cur *cur, *tcur;
996 struct xfs_inobt_rec_incore rec, trec;
1001 int searchdistance = 10;
1003 ASSERT(pag->pagi_init);
1004 ASSERT(pag->pagi_inodeok);
1005 ASSERT(pag->pagi_freecount > 0);
1008 cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_INO);
1010 * If pagino is 0 (this is the root inode allocation) use newino.
1011 * This must work because we've just allocated some.
1014 pagino = be32_to_cpu(agi->agi_newino);
1016 error = xfs_check_agi_freecount(cur);
1021 * If in the same AG as the parent, try to get near the parent.
1023 if (pagno == pag->pag_agno) {
1024 int doneleft; /* done, to the left */
1025 int doneright; /* done, to the right */
1027 error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i);
1030 if (XFS_IS_CORRUPT(mp, i != 1)) {
1031 error = -EFSCORRUPTED;
1035 error = xfs_inobt_get_rec(cur, &rec, &j);
1038 if (XFS_IS_CORRUPT(mp, j != 1)) {
1039 error = -EFSCORRUPTED;
1043 if (rec.ir_freecount > 0) {
1045 * Found a free inode in the same chunk
1046 * as the parent, done.
1053 * In the same AG as parent, but parent's chunk is full.
1056 /* duplicate the cursor, search left & right simultaneously */
1057 error = xfs_btree_dup_cursor(cur, &tcur);
1062 * Skip to last blocks looked up if same parent inode.
1064 if (pagino != NULLAGINO &&
1065 pag->pagl_pagino == pagino &&
1066 pag->pagl_leftrec != NULLAGINO &&
1067 pag->pagl_rightrec != NULLAGINO) {
1068 error = xfs_ialloc_get_rec(tcur, pag->pagl_leftrec,
1073 error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec,
1078 /* search left with tcur, back up 1 record */
1079 error = xfs_ialloc_next_rec(tcur, &trec, &doneleft, 1);
1083 /* search right with cur, go forward 1 record. */
1084 error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0);
1090 * Loop until we find an inode chunk with a free inode.
1092 while (--searchdistance > 0 && (!doneleft || !doneright)) {
1093 int useleft; /* using left inode chunk this time */
1095 /* figure out the closer block if both are valid. */
1096 if (!doneleft && !doneright) {
1098 (trec.ir_startino + XFS_INODES_PER_CHUNK - 1) <
1099 rec.ir_startino - pagino;
1101 useleft = !doneleft;
1104 /* free inodes to the left? */
1105 if (useleft && trec.ir_freecount) {
1106 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1109 pag->pagl_leftrec = trec.ir_startino;
1110 pag->pagl_rightrec = rec.ir_startino;
1111 pag->pagl_pagino = pagino;
1116 /* free inodes to the right? */
1117 if (!useleft && rec.ir_freecount) {
1118 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1120 pag->pagl_leftrec = trec.ir_startino;
1121 pag->pagl_rightrec = rec.ir_startino;
1122 pag->pagl_pagino = pagino;
1126 /* get next record to check */
1128 error = xfs_ialloc_next_rec(tcur, &trec,
1131 error = xfs_ialloc_next_rec(cur, &rec,
1138 if (searchdistance <= 0) {
1140 * Not in range - save last search
1141 * location and allocate a new inode
1143 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1144 pag->pagl_leftrec = trec.ir_startino;
1145 pag->pagl_rightrec = rec.ir_startino;
1146 pag->pagl_pagino = pagino;
1150 * We've reached the end of the btree. because
1151 * we are only searching a small chunk of the
1152 * btree each search, there is obviously free
1153 * inodes closer to the parent inode than we
1154 * are now. restart the search again.
1156 pag->pagl_pagino = NULLAGINO;
1157 pag->pagl_leftrec = NULLAGINO;
1158 pag->pagl_rightrec = NULLAGINO;
1159 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1160 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1166 * In a different AG from the parent.
1167 * See if the most recently allocated block has any free.
1169 if (agi->agi_newino != cpu_to_be32(NULLAGINO)) {
1170 error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
1176 error = xfs_inobt_get_rec(cur, &rec, &j);
1180 if (j == 1 && rec.ir_freecount > 0) {
1182 * The last chunk allocated in the group
1183 * still has a free inode.
1191 * None left in the last group, search the whole AG
1193 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
1196 if (XFS_IS_CORRUPT(mp, i != 1)) {
1197 error = -EFSCORRUPTED;
1202 error = xfs_inobt_get_rec(cur, &rec, &i);
1205 if (XFS_IS_CORRUPT(mp, i != 1)) {
1206 error = -EFSCORRUPTED;
1209 if (rec.ir_freecount > 0)
1211 error = xfs_btree_increment(cur, 0, &i);
1214 if (XFS_IS_CORRUPT(mp, i != 1)) {
1215 error = -EFSCORRUPTED;
1221 offset = xfs_inobt_first_free_inode(&rec);
1222 ASSERT(offset >= 0);
1223 ASSERT(offset < XFS_INODES_PER_CHUNK);
1224 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
1225 XFS_INODES_PER_CHUNK) == 0);
1226 ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset);
1227 rec.ir_free &= ~XFS_INOBT_MASK(offset);
1229 error = xfs_inobt_update(cur, &rec);
1232 be32_add_cpu(&agi->agi_freecount, -1);
1233 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
1234 pag->pagi_freecount--;
1236 error = xfs_check_agi_freecount(cur);
1240 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1241 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
1245 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
1247 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1252 * Use the free inode btree to allocate an inode based on distance from the
1253 * parent. Note that the provided cursor may be deleted and replaced.
1256 xfs_dialloc_ag_finobt_near(
1258 struct xfs_btree_cur **ocur,
1259 struct xfs_inobt_rec_incore *rec)
1261 struct xfs_btree_cur *lcur = *ocur; /* left search cursor */
1262 struct xfs_btree_cur *rcur; /* right search cursor */
1263 struct xfs_inobt_rec_incore rrec;
1267 error = xfs_inobt_lookup(lcur, pagino, XFS_LOOKUP_LE, &i);
1272 error = xfs_inobt_get_rec(lcur, rec, &i);
1275 if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1))
1276 return -EFSCORRUPTED;
1279 * See if we've landed in the parent inode record. The finobt
1280 * only tracks chunks with at least one free inode, so record
1281 * existence is enough.
1283 if (pagino >= rec->ir_startino &&
1284 pagino < (rec->ir_startino + XFS_INODES_PER_CHUNK))
1288 error = xfs_btree_dup_cursor(lcur, &rcur);
1292 error = xfs_inobt_lookup(rcur, pagino, XFS_LOOKUP_GE, &j);
1296 error = xfs_inobt_get_rec(rcur, &rrec, &j);
1299 if (XFS_IS_CORRUPT(lcur->bc_mp, j != 1)) {
1300 error = -EFSCORRUPTED;
1305 if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1 && j != 1)) {
1306 error = -EFSCORRUPTED;
1309 if (i == 1 && j == 1) {
1311 * Both the left and right records are valid. Choose the closer
1312 * inode chunk to the target.
1314 if ((pagino - rec->ir_startino + XFS_INODES_PER_CHUNK - 1) >
1315 (rrec.ir_startino - pagino)) {
1317 xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR);
1320 xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR);
1322 } else if (j == 1) {
1323 /* only the right record is valid */
1325 xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR);
1327 } else if (i == 1) {
1328 /* only the left record is valid */
1329 xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR);
1335 xfs_btree_del_cursor(rcur, XFS_BTREE_ERROR);
1340 * Use the free inode btree to find a free inode based on a newino hint. If
1341 * the hint is NULL, find the first free inode in the AG.
1344 xfs_dialloc_ag_finobt_newino(
1345 struct xfs_agi *agi,
1346 struct xfs_btree_cur *cur,
1347 struct xfs_inobt_rec_incore *rec)
1352 if (agi->agi_newino != cpu_to_be32(NULLAGINO)) {
1353 error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
1358 error = xfs_inobt_get_rec(cur, rec, &i);
1361 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1362 return -EFSCORRUPTED;
1368 * Find the first inode available in the AG.
1370 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
1373 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1374 return -EFSCORRUPTED;
1376 error = xfs_inobt_get_rec(cur, rec, &i);
1379 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1380 return -EFSCORRUPTED;
1386 * Update the inobt based on a modification made to the finobt. Also ensure that
1387 * the records from both trees are equivalent post-modification.
1390 xfs_dialloc_ag_update_inobt(
1391 struct xfs_btree_cur *cur, /* inobt cursor */
1392 struct xfs_inobt_rec_incore *frec, /* finobt record */
1393 int offset) /* inode offset */
1395 struct xfs_inobt_rec_incore rec;
1399 error = xfs_inobt_lookup(cur, frec->ir_startino, XFS_LOOKUP_EQ, &i);
1402 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1403 return -EFSCORRUPTED;
1405 error = xfs_inobt_get_rec(cur, &rec, &i);
1408 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1409 return -EFSCORRUPTED;
1410 ASSERT((XFS_AGINO_TO_OFFSET(cur->bc_mp, rec.ir_startino) %
1411 XFS_INODES_PER_CHUNK) == 0);
1413 rec.ir_free &= ~XFS_INOBT_MASK(offset);
1416 if (XFS_IS_CORRUPT(cur->bc_mp,
1417 rec.ir_free != frec->ir_free ||
1418 rec.ir_freecount != frec->ir_freecount))
1419 return -EFSCORRUPTED;
1421 return xfs_inobt_update(cur, &rec);
1425 * Allocate an inode using the free inode btree, if available. Otherwise, fall
1426 * back to the inobt search algorithm.
1428 * The caller selected an AG for us, and made sure that free inodes are
1433 struct xfs_trans *tp,
1434 struct xfs_buf *agbp,
1435 struct xfs_perag *pag,
1439 struct xfs_mount *mp = tp->t_mountp;
1440 struct xfs_agi *agi = agbp->b_addr;
1441 xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent);
1442 xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent);
1443 struct xfs_btree_cur *cur; /* finobt cursor */
1444 struct xfs_btree_cur *icur; /* inobt cursor */
1445 struct xfs_inobt_rec_incore rec;
1451 if (!xfs_sb_version_hasfinobt(&mp->m_sb))
1452 return xfs_dialloc_ag_inobt(tp, agbp, pag, parent, inop);
1455 * If pagino is 0 (this is the root inode allocation) use newino.
1456 * This must work because we've just allocated some.
1459 pagino = be32_to_cpu(agi->agi_newino);
1461 cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_FINO);
1463 error = xfs_check_agi_freecount(cur);
1468 * The search algorithm depends on whether we're in the same AG as the
1469 * parent. If so, find the closest available inode to the parent. If
1470 * not, consider the agi hint or find the first free inode in the AG.
1472 if (pag->pag_agno == pagno)
1473 error = xfs_dialloc_ag_finobt_near(pagino, &cur, &rec);
1475 error = xfs_dialloc_ag_finobt_newino(agi, cur, &rec);
1479 offset = xfs_inobt_first_free_inode(&rec);
1480 ASSERT(offset >= 0);
1481 ASSERT(offset < XFS_INODES_PER_CHUNK);
1482 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
1483 XFS_INODES_PER_CHUNK) == 0);
1484 ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset);
1487 * Modify or remove the finobt record.
1489 rec.ir_free &= ~XFS_INOBT_MASK(offset);
1491 if (rec.ir_freecount)
1492 error = xfs_inobt_update(cur, &rec);
1494 error = xfs_btree_delete(cur, &i);
1499 * The finobt has now been updated appropriately. We haven't updated the
1500 * agi and superblock yet, so we can create an inobt cursor and validate
1501 * the original freecount. If all is well, make the equivalent update to
1502 * the inobt using the finobt record and offset information.
1504 icur = xfs_inobt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_INO);
1506 error = xfs_check_agi_freecount(icur);
1510 error = xfs_dialloc_ag_update_inobt(icur, &rec, offset);
1515 * Both trees have now been updated. We must update the perag and
1516 * superblock before we can check the freecount for each btree.
1518 be32_add_cpu(&agi->agi_freecount, -1);
1519 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
1520 pag->pagi_freecount--;
1522 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
1524 error = xfs_check_agi_freecount(icur);
1527 error = xfs_check_agi_freecount(cur);
1531 xfs_btree_del_cursor(icur, XFS_BTREE_NOERROR);
1532 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1537 xfs_btree_del_cursor(icur, XFS_BTREE_ERROR);
1539 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1545 struct xfs_trans **tpp,
1546 struct xfs_buf *agibp)
1548 struct xfs_trans *tp = *tpp;
1549 struct xfs_dquot_acct *dqinfo;
1553 * Hold to on to the agibp across the commit so no other allocation can
1554 * come in and take the free inodes we just allocated for our caller.
1556 xfs_trans_bhold(tp, agibp);
1559 * We want the quota changes to be associated with the next transaction,
1560 * NOT this one. So, detach the dqinfo from this and attach it to the
1563 dqinfo = tp->t_dqinfo;
1564 tp->t_dqinfo = NULL;
1566 error = xfs_trans_roll(&tp);
1568 /* Re-attach the quota info that we detached from prev trx. */
1569 tp->t_dqinfo = dqinfo;
1572 * Join the buffer even on commit error so that the buffer is released
1573 * when the caller cancels the transaction and doesn't have to handle
1574 * this error case specially.
1576 xfs_trans_bjoin(tp, agibp);
1581 static xfs_agnumber_t
1585 xfs_agnumber_t agno;
1587 spin_lock(&mp->m_agirotor_lock);
1588 agno = mp->m_agirotor;
1589 if (++mp->m_agirotor >= mp->m_maxagi)
1591 spin_unlock(&mp->m_agirotor_lock);
1597 xfs_dialloc_good_ag(
1598 struct xfs_trans *tp,
1599 struct xfs_perag *pag,
1604 struct xfs_mount *mp = tp->t_mountp;
1606 xfs_extlen_t longest = 0;
1610 if (!pag->pagi_inodeok)
1613 if (!pag->pagi_init) {
1614 error = xfs_ialloc_pagi_init(mp, tp, pag->pag_agno);
1619 if (pag->pagi_freecount)
1624 if (!pag->pagf_init) {
1625 error = xfs_alloc_pagf_init(mp, tp, pag->pag_agno, flags);
1631 * Check that there is enough free space for the file plus a chunk of
1632 * inodes if we need to allocate some. If this is the first pass across
1633 * the AGs, take into account the potential space needed for alignment
1634 * of inode chunks when checking the longest contiguous free space in
1635 * the AG - this prevents us from getting ENOSPC because we have free
1636 * space larger than ialloc_blks but alignment constraints prevent us
1639 * If we can't find an AG with space for full alignment slack to be
1640 * taken into account, we must be near ENOSPC in all AGs. Hence we
1641 * don't include alignment for the second pass and so if we fail
1642 * allocation due to alignment issues then it is most likely a real
1645 * XXX(dgc): this calculation is now bogus thanks to the per-ag
1646 * reservations that xfs_alloc_fix_freelist() now does via
1647 * xfs_alloc_space_available(). When the AG fills up, pagf_freeblks will
1648 * be more than large enough for the check below to succeed, but
1649 * xfs_alloc_space_available() will fail because of the non-zero
1650 * metadata reservation and hence we won't actually be able to allocate
1651 * more inodes in this AG. We do soooo much unnecessary work near ENOSPC
1654 ineed = M_IGEO(mp)->ialloc_min_blks;
1655 if (flags && ineed > 1)
1656 ineed += M_IGEO(mp)->cluster_align;
1657 longest = pag->pagf_longest;
1659 longest = pag->pagf_flcount > 0;
1660 needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode);
1662 if (pag->pagf_freeblks < needspace + ineed || longest < ineed)
1669 struct xfs_trans **tpp,
1670 struct xfs_perag *pag,
1675 struct xfs_buf *agbp;
1680 * Then read in the AGI buffer and recheck with the AGI buffer
1683 error = xfs_ialloc_read_agi(pag->pag_mount, *tpp, pag->pag_agno, &agbp);
1687 if (!pag->pagi_freecount) {
1693 error = xfs_ialloc_ag_alloc(*tpp, agbp, pag);
1698 * We successfully allocated space for an inode cluster in this
1699 * AG. Roll the transaction so that we can allocate one of the
1702 ASSERT(pag->pagi_freecount > 0);
1703 error = xfs_dialloc_roll(tpp, agbp);
1708 /* Allocate an inode in the found AG */
1709 error = xfs_dialloc_ag(*tpp, agbp, pag, parent, &ino);
1715 xfs_trans_brelse(*tpp, agbp);
1720 * Allocate an on-disk inode.
1722 * Mode is used to tell whether the new inode is a directory and hence where to
1723 * locate it. The on-disk inode that is allocated will be returned in @new_ino
1724 * on success, otherwise an error will be set to indicate the failure (e.g.
1729 struct xfs_trans **tpp,
1734 struct xfs_mount *mp = (*tpp)->t_mountp;
1735 xfs_agnumber_t agno;
1737 xfs_agnumber_t start_agno;
1738 struct xfs_perag *pag;
1739 struct xfs_ino_geometry *igeo = M_IGEO(mp);
1740 bool ok_alloc = true;
1745 * Directories, symlinks, and regular files frequently allocate at least
1746 * one block, so factor that potential expansion when we examine whether
1747 * an AG has enough space for file creation.
1750 start_agno = xfs_ialloc_next_ag(mp);
1752 start_agno = XFS_INO_TO_AGNO(mp, parent);
1753 if (start_agno >= mp->m_maxagi)
1758 * If we have already hit the ceiling of inode blocks then clear
1759 * ok_alloc so we scan all available agi structures for a free
1762 * Read rough value of mp->m_icount by percpu_counter_read_positive,
1763 * which will sacrifice the preciseness but improve the performance.
1765 if (igeo->maxicount &&
1766 percpu_counter_read_positive(&mp->m_icount) + igeo->ialloc_inos
1767 > igeo->maxicount) {
1772 * Loop until we find an allocation group that either has free inodes
1773 * or in which we can allocate some inodes. Iterate through the
1774 * allocation groups upward, wrapping at the end.
1777 flags = XFS_ALLOC_FLAG_TRYLOCK;
1779 pag = xfs_perag_get(mp, agno);
1780 if (xfs_dialloc_good_ag(*tpp, pag, mode, flags, ok_alloc)) {
1781 error = xfs_dialloc_try_ag(tpp, pag, parent,
1783 if (error != -EAGAIN)
1787 if (XFS_FORCED_SHUTDOWN(mp)) {
1788 error = -EFSCORRUPTED;
1791 if (++agno == mp->m_maxagi)
1793 if (agno == start_agno) {
1810 * Free the blocks of an inode chunk. We must consider that the inode chunk
1811 * might be sparse and only free the regions that are allocated as part of the
1815 xfs_difree_inode_chunk(
1816 struct xfs_trans *tp,
1817 xfs_agnumber_t agno,
1818 struct xfs_inobt_rec_incore *rec)
1820 struct xfs_mount *mp = tp->t_mountp;
1821 xfs_agblock_t sagbno = XFS_AGINO_TO_AGBNO(mp,
1823 int startidx, endidx;
1825 xfs_agblock_t agbno;
1827 DECLARE_BITMAP(holemask, XFS_INOBT_HOLEMASK_BITS);
1829 if (!xfs_inobt_issparse(rec->ir_holemask)) {
1830 /* not sparse, calculate extent info directly */
1831 xfs_bmap_add_free(tp, XFS_AGB_TO_FSB(mp, agno, sagbno),
1832 M_IGEO(mp)->ialloc_blks,
1833 &XFS_RMAP_OINFO_INODES);
1837 /* holemask is only 16-bits (fits in an unsigned long) */
1838 ASSERT(sizeof(rec->ir_holemask) <= sizeof(holemask[0]));
1839 holemask[0] = rec->ir_holemask;
1842 * Find contiguous ranges of zeroes (i.e., allocated regions) in the
1843 * holemask and convert the start/end index of each range to an extent.
1844 * We start with the start and end index both pointing at the first 0 in
1847 startidx = endidx = find_first_zero_bit(holemask,
1848 XFS_INOBT_HOLEMASK_BITS);
1849 nextbit = startidx + 1;
1850 while (startidx < XFS_INOBT_HOLEMASK_BITS) {
1851 nextbit = find_next_zero_bit(holemask, XFS_INOBT_HOLEMASK_BITS,
1854 * If the next zero bit is contiguous, update the end index of
1855 * the current range and continue.
1857 if (nextbit != XFS_INOBT_HOLEMASK_BITS &&
1858 nextbit == endidx + 1) {
1864 * nextbit is not contiguous with the current end index. Convert
1865 * the current start/end to an extent and add it to the free
1868 agbno = sagbno + (startidx * XFS_INODES_PER_HOLEMASK_BIT) /
1869 mp->m_sb.sb_inopblock;
1870 contigblk = ((endidx - startidx + 1) *
1871 XFS_INODES_PER_HOLEMASK_BIT) /
1872 mp->m_sb.sb_inopblock;
1874 ASSERT(agbno % mp->m_sb.sb_spino_align == 0);
1875 ASSERT(contigblk % mp->m_sb.sb_spino_align == 0);
1876 xfs_bmap_add_free(tp, XFS_AGB_TO_FSB(mp, agno, agbno),
1877 contigblk, &XFS_RMAP_OINFO_INODES);
1879 /* reset range to current bit and carry on... */
1880 startidx = endidx = nextbit;
1889 struct xfs_mount *mp,
1890 struct xfs_trans *tp,
1891 struct xfs_buf *agbp,
1892 struct xfs_perag *pag,
1894 struct xfs_icluster *xic,
1895 struct xfs_inobt_rec_incore *orec)
1897 struct xfs_agi *agi = agbp->b_addr;
1898 struct xfs_btree_cur *cur;
1899 struct xfs_inobt_rec_incore rec;
1905 ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
1906 ASSERT(XFS_AGINO_TO_AGBNO(mp, agino) < be32_to_cpu(agi->agi_length));
1909 * Initialize the cursor.
1911 cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_INO);
1913 error = xfs_check_agi_freecount(cur);
1918 * Look for the entry describing this inode.
1920 if ((error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i))) {
1921 xfs_warn(mp, "%s: xfs_inobt_lookup() returned error %d.",
1925 if (XFS_IS_CORRUPT(mp, i != 1)) {
1926 error = -EFSCORRUPTED;
1929 error = xfs_inobt_get_rec(cur, &rec, &i);
1931 xfs_warn(mp, "%s: xfs_inobt_get_rec() returned error %d.",
1935 if (XFS_IS_CORRUPT(mp, i != 1)) {
1936 error = -EFSCORRUPTED;
1940 * Get the offset in the inode chunk.
1942 off = agino - rec.ir_startino;
1943 ASSERT(off >= 0 && off < XFS_INODES_PER_CHUNK);
1944 ASSERT(!(rec.ir_free & XFS_INOBT_MASK(off)));
1946 * Mark the inode free & increment the count.
1948 rec.ir_free |= XFS_INOBT_MASK(off);
1952 * When an inode chunk is free, it becomes eligible for removal. Don't
1953 * remove the chunk if the block size is large enough for multiple inode
1954 * chunks (that might not be free).
1956 if (!(mp->m_flags & XFS_MOUNT_IKEEP) &&
1957 rec.ir_free == XFS_INOBT_ALL_FREE &&
1958 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) {
1959 struct xfs_perag *pag = agbp->b_pag;
1961 xic->deleted = true;
1962 xic->first_ino = XFS_AGINO_TO_INO(mp, pag->pag_agno,
1964 xic->alloc = xfs_inobt_irec_to_allocmask(&rec);
1967 * Remove the inode cluster from the AGI B+Tree, adjust the
1968 * AGI and Superblock inode counts, and mark the disk space
1969 * to be freed when the transaction is committed.
1971 ilen = rec.ir_freecount;
1972 be32_add_cpu(&agi->agi_count, -ilen);
1973 be32_add_cpu(&agi->agi_freecount, -(ilen - 1));
1974 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT);
1975 pag->pagi_freecount -= ilen - 1;
1976 pag->pagi_count -= ilen;
1977 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen);
1978 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1));
1980 if ((error = xfs_btree_delete(cur, &i))) {
1981 xfs_warn(mp, "%s: xfs_btree_delete returned error %d.",
1986 xfs_difree_inode_chunk(tp, pag->pag_agno, &rec);
1988 xic->deleted = false;
1990 error = xfs_inobt_update(cur, &rec);
1992 xfs_warn(mp, "%s: xfs_inobt_update returned error %d.",
1998 * Change the inode free counts and log the ag/sb changes.
2000 be32_add_cpu(&agi->agi_freecount, 1);
2001 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
2002 pag->pagi_freecount++;
2003 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1);
2006 error = xfs_check_agi_freecount(cur);
2011 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
2015 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
2020 * Free an inode in the free inode btree.
2024 struct xfs_mount *mp,
2025 struct xfs_trans *tp,
2026 struct xfs_buf *agbp,
2027 struct xfs_perag *pag,
2029 struct xfs_inobt_rec_incore *ibtrec) /* inobt record */
2031 struct xfs_btree_cur *cur;
2032 struct xfs_inobt_rec_incore rec;
2033 int offset = agino - ibtrec->ir_startino;
2037 cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_FINO);
2039 error = xfs_inobt_lookup(cur, ibtrec->ir_startino, XFS_LOOKUP_EQ, &i);
2044 * If the record does not exist in the finobt, we must have just
2045 * freed an inode in a previously fully allocated chunk. If not,
2046 * something is out of sync.
2048 if (XFS_IS_CORRUPT(mp, ibtrec->ir_freecount != 1)) {
2049 error = -EFSCORRUPTED;
2053 error = xfs_inobt_insert_rec(cur, ibtrec->ir_holemask,
2055 ibtrec->ir_freecount,
2056 ibtrec->ir_free, &i);
2065 * Read and update the existing record. We could just copy the ibtrec
2066 * across here, but that would defeat the purpose of having redundant
2067 * metadata. By making the modifications independently, we can catch
2068 * corruptions that we wouldn't see if we just copied from one record
2071 error = xfs_inobt_get_rec(cur, &rec, &i);
2074 if (XFS_IS_CORRUPT(mp, i != 1)) {
2075 error = -EFSCORRUPTED;
2079 rec.ir_free |= XFS_INOBT_MASK(offset);
2082 if (XFS_IS_CORRUPT(mp,
2083 rec.ir_free != ibtrec->ir_free ||
2084 rec.ir_freecount != ibtrec->ir_freecount)) {
2085 error = -EFSCORRUPTED;
2090 * The content of inobt records should always match between the inobt
2091 * and finobt. The lifecycle of records in the finobt is different from
2092 * the inobt in that the finobt only tracks records with at least one
2093 * free inode. Hence, if all of the inodes are free and we aren't
2094 * keeping inode chunks permanently on disk, remove the record.
2095 * Otherwise, update the record with the new information.
2097 * Note that we currently can't free chunks when the block size is large
2098 * enough for multiple chunks. Leave the finobt record to remain in sync
2101 if (rec.ir_free == XFS_INOBT_ALL_FREE &&
2102 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK &&
2103 !(mp->m_flags & XFS_MOUNT_IKEEP)) {
2104 error = xfs_btree_delete(cur, &i);
2109 error = xfs_inobt_update(cur, &rec);
2115 error = xfs_check_agi_freecount(cur);
2119 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
2123 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
2128 * Free disk inode. Carefully avoids touching the incore inode, all
2129 * manipulations incore are the caller's responsibility.
2130 * The on-disk inode is not changed by this operation, only the
2131 * btree (free inode mask) is changed.
2135 struct xfs_trans *tp,
2136 struct xfs_perag *pag,
2138 struct xfs_icluster *xic)
2141 xfs_agblock_t agbno; /* block number containing inode */
2142 struct xfs_buf *agbp; /* buffer for allocation group header */
2143 xfs_agino_t agino; /* allocation group inode number */
2144 int error; /* error return value */
2145 struct xfs_mount *mp = tp->t_mountp;
2146 struct xfs_inobt_rec_incore rec;/* btree record */
2149 * Break up inode number into its components.
2151 if (pag->pag_agno != XFS_INO_TO_AGNO(mp, inode)) {
2152 xfs_warn(mp, "%s: agno != pag->pag_agno (%d != %d).",
2153 __func__, XFS_INO_TO_AGNO(mp, inode), pag->pag_agno);
2157 agino = XFS_INO_TO_AGINO(mp, inode);
2158 if (inode != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) {
2159 xfs_warn(mp, "%s: inode != XFS_AGINO_TO_INO() (%llu != %llu).",
2160 __func__, (unsigned long long)inode,
2161 (unsigned long long)XFS_AGINO_TO_INO(mp, pag->pag_agno, agino));
2165 agbno = XFS_AGINO_TO_AGBNO(mp, agino);
2166 if (agbno >= mp->m_sb.sb_agblocks) {
2167 xfs_warn(mp, "%s: agbno >= mp->m_sb.sb_agblocks (%d >= %d).",
2168 __func__, agbno, mp->m_sb.sb_agblocks);
2173 * Get the allocation group header.
2175 error = xfs_ialloc_read_agi(mp, tp, pag->pag_agno, &agbp);
2177 xfs_warn(mp, "%s: xfs_ialloc_read_agi() returned error %d.",
2183 * Fix up the inode allocation btree.
2185 error = xfs_difree_inobt(mp, tp, agbp, pag, agino, xic, &rec);
2190 * Fix up the free inode btree.
2192 if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
2193 error = xfs_difree_finobt(mp, tp, agbp, pag, agino, &rec);
2206 struct xfs_mount *mp,
2207 struct xfs_trans *tp,
2208 struct xfs_perag *pag,
2210 xfs_agblock_t agbno,
2211 xfs_agblock_t *chunk_agbno,
2212 xfs_agblock_t *offset_agbno,
2215 struct xfs_inobt_rec_incore rec;
2216 struct xfs_btree_cur *cur;
2217 struct xfs_buf *agbp;
2221 error = xfs_ialloc_read_agi(mp, tp, pag->pag_agno, &agbp);
2224 "%s: xfs_ialloc_read_agi() returned error %d, agno %d",
2225 __func__, error, pag->pag_agno);
2230 * Lookup the inode record for the given agino. If the record cannot be
2231 * found, then it's an invalid inode number and we should abort. Once
2232 * we have a record, we need to ensure it contains the inode number
2233 * we are looking up.
2235 cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_INO);
2236 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i);
2239 error = xfs_inobt_get_rec(cur, &rec, &i);
2240 if (!error && i == 0)
2244 xfs_trans_brelse(tp, agbp);
2245 xfs_btree_del_cursor(cur, error);
2249 /* check that the returned record contains the required inode */
2250 if (rec.ir_startino > agino ||
2251 rec.ir_startino + M_IGEO(mp)->ialloc_inos <= agino)
2254 /* for untrusted inodes check it is allocated first */
2255 if ((flags & XFS_IGET_UNTRUSTED) &&
2256 (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino)))
2259 *chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino);
2260 *offset_agbno = agbno - *chunk_agbno;
2265 * Return the location of the inode in imap, for mapping it into a buffer.
2269 struct xfs_mount *mp, /* file system mount structure */
2270 struct xfs_trans *tp, /* transaction pointer */
2271 xfs_ino_t ino, /* inode to locate */
2272 struct xfs_imap *imap, /* location map structure */
2273 uint flags) /* flags for inode btree lookup */
2275 xfs_agblock_t agbno; /* block number of inode in the alloc group */
2276 xfs_agino_t agino; /* inode number within alloc group */
2277 xfs_agblock_t chunk_agbno; /* first block in inode chunk */
2278 xfs_agblock_t cluster_agbno; /* first block in inode cluster */
2279 int error; /* error code */
2280 int offset; /* index of inode in its buffer */
2281 xfs_agblock_t offset_agbno; /* blks from chunk start to inode */
2282 struct xfs_perag *pag;
2284 ASSERT(ino != NULLFSINO);
2287 * Split up the inode number into its parts.
2289 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
2290 agino = XFS_INO_TO_AGINO(mp, ino);
2291 agbno = XFS_AGINO_TO_AGBNO(mp, agino);
2292 if (!pag || agbno >= mp->m_sb.sb_agblocks ||
2293 ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) {
2297 * Don't output diagnostic information for untrusted inodes
2298 * as they can be invalid without implying corruption.
2300 if (flags & XFS_IGET_UNTRUSTED)
2304 "%s: agno (%d) >= mp->m_sb.sb_agcount (%d)",
2305 __func__, XFS_INO_TO_AGNO(mp, ino),
2306 mp->m_sb.sb_agcount);
2308 if (agbno >= mp->m_sb.sb_agblocks) {
2310 "%s: agbno (0x%llx) >= mp->m_sb.sb_agblocks (0x%lx)",
2311 __func__, (unsigned long long)agbno,
2312 (unsigned long)mp->m_sb.sb_agblocks);
2314 if (pag && ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) {
2316 "%s: ino (0x%llx) != XFS_AGINO_TO_INO() (0x%llx)",
2318 XFS_AGINO_TO_INO(mp, pag->pag_agno, agino));
2326 * For bulkstat and handle lookups, we have an untrusted inode number
2327 * that we have to verify is valid. We cannot do this just by reading
2328 * the inode buffer as it may have been unlinked and removed leaving
2329 * inodes in stale state on disk. Hence we have to do a btree lookup
2330 * in all cases where an untrusted inode number is passed.
2332 if (flags & XFS_IGET_UNTRUSTED) {
2333 error = xfs_imap_lookup(mp, tp, pag, agino, agbno,
2334 &chunk_agbno, &offset_agbno, flags);
2341 * If the inode cluster size is the same as the blocksize or
2342 * smaller we get to the buffer by simple arithmetics.
2344 if (M_IGEO(mp)->blocks_per_cluster == 1) {
2345 offset = XFS_INO_TO_OFFSET(mp, ino);
2346 ASSERT(offset < mp->m_sb.sb_inopblock);
2348 imap->im_blkno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, agbno);
2349 imap->im_len = XFS_FSB_TO_BB(mp, 1);
2350 imap->im_boffset = (unsigned short)(offset <<
2351 mp->m_sb.sb_inodelog);
2357 * If the inode chunks are aligned then use simple maths to
2358 * find the location. Otherwise we have to do a btree
2359 * lookup to find the location.
2361 if (M_IGEO(mp)->inoalign_mask) {
2362 offset_agbno = agbno & M_IGEO(mp)->inoalign_mask;
2363 chunk_agbno = agbno - offset_agbno;
2365 error = xfs_imap_lookup(mp, tp, pag, agino, agbno,
2366 &chunk_agbno, &offset_agbno, flags);
2372 ASSERT(agbno >= chunk_agbno);
2373 cluster_agbno = chunk_agbno +
2374 ((offset_agbno / M_IGEO(mp)->blocks_per_cluster) *
2375 M_IGEO(mp)->blocks_per_cluster);
2376 offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) +
2377 XFS_INO_TO_OFFSET(mp, ino);
2379 imap->im_blkno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, cluster_agbno);
2380 imap->im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster);
2381 imap->im_boffset = (unsigned short)(offset << mp->m_sb.sb_inodelog);
2384 * If the inode number maps to a block outside the bounds
2385 * of the file system then return NULL rather than calling
2386 * read_buf and panicing when we get an error from the
2389 if ((imap->im_blkno + imap->im_len) >
2390 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
2392 "%s: (im_blkno (0x%llx) + im_len (0x%llx)) > sb_dblocks (0x%llx)",
2393 __func__, (unsigned long long) imap->im_blkno,
2394 (unsigned long long) imap->im_len,
2395 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
2407 * Log specified fields for the ag hdr (inode section). The growth of the agi
2408 * structure over time requires that we interpret the buffer as two logical
2409 * regions delineated by the end of the unlinked list. This is due to the size
2410 * of the hash table and its location in the middle of the agi.
2412 * For example, a request to log a field before agi_unlinked and a field after
2413 * agi_unlinked could cause us to log the entire hash table and use an excessive
2414 * amount of log space. To avoid this behavior, log the region up through
2415 * agi_unlinked in one call and the region after agi_unlinked through the end of
2416 * the structure in another.
2420 xfs_trans_t *tp, /* transaction pointer */
2421 struct xfs_buf *bp, /* allocation group header buffer */
2422 int fields) /* bitmask of fields to log */
2424 int first; /* first byte number */
2425 int last; /* last byte number */
2426 static const short offsets[] = { /* field starting offsets */
2427 /* keep in sync with bit definitions */
2428 offsetof(xfs_agi_t, agi_magicnum),
2429 offsetof(xfs_agi_t, agi_versionnum),
2430 offsetof(xfs_agi_t, agi_seqno),
2431 offsetof(xfs_agi_t, agi_length),
2432 offsetof(xfs_agi_t, agi_count),
2433 offsetof(xfs_agi_t, agi_root),
2434 offsetof(xfs_agi_t, agi_level),
2435 offsetof(xfs_agi_t, agi_freecount),
2436 offsetof(xfs_agi_t, agi_newino),
2437 offsetof(xfs_agi_t, agi_dirino),
2438 offsetof(xfs_agi_t, agi_unlinked),
2439 offsetof(xfs_agi_t, agi_free_root),
2440 offsetof(xfs_agi_t, agi_free_level),
2441 offsetof(xfs_agi_t, agi_iblocks),
2445 struct xfs_agi *agi = bp->b_addr;
2447 ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
2451 * Compute byte offsets for the first and last fields in the first
2452 * region and log the agi buffer. This only logs up through
2455 if (fields & XFS_AGI_ALL_BITS_R1) {
2456 xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R1,
2458 xfs_trans_log_buf(tp, bp, first, last);
2462 * Mask off the bits in the first region and calculate the first and
2463 * last field offsets for any bits in the second region.
2465 fields &= ~XFS_AGI_ALL_BITS_R1;
2467 xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R2,
2469 xfs_trans_log_buf(tp, bp, first, last);
2473 static xfs_failaddr_t
2477 struct xfs_mount *mp = bp->b_mount;
2478 struct xfs_agi *agi = bp->b_addr;
2481 if (xfs_sb_version_hascrc(&mp->m_sb)) {
2482 if (!uuid_equal(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid))
2483 return __this_address;
2484 if (!xfs_log_check_lsn(mp, be64_to_cpu(agi->agi_lsn)))
2485 return __this_address;
2489 * Validate the magic number of the agi block.
2491 if (!xfs_verify_magic(bp, agi->agi_magicnum))
2492 return __this_address;
2493 if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)))
2494 return __this_address;
2496 if (be32_to_cpu(agi->agi_level) < 1 ||
2497 be32_to_cpu(agi->agi_level) > M_IGEO(mp)->inobt_maxlevels)
2498 return __this_address;
2500 if (xfs_sb_version_hasfinobt(&mp->m_sb) &&
2501 (be32_to_cpu(agi->agi_free_level) < 1 ||
2502 be32_to_cpu(agi->agi_free_level) > M_IGEO(mp)->inobt_maxlevels))
2503 return __this_address;
2506 * during growfs operations, the perag is not fully initialised,
2507 * so we can't use it for any useful checking. growfs ensures we can't
2508 * use it by using uncached buffers that don't have the perag attached
2509 * so we can detect and avoid this problem.
2511 if (bp->b_pag && be32_to_cpu(agi->agi_seqno) != bp->b_pag->pag_agno)
2512 return __this_address;
2514 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
2515 if (agi->agi_unlinked[i] == cpu_to_be32(NULLAGINO))
2517 if (!xfs_verify_ino(mp, be32_to_cpu(agi->agi_unlinked[i])))
2518 return __this_address;
2525 xfs_agi_read_verify(
2528 struct xfs_mount *mp = bp->b_mount;
2531 if (xfs_sb_version_hascrc(&mp->m_sb) &&
2532 !xfs_buf_verify_cksum(bp, XFS_AGI_CRC_OFF))
2533 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
2535 fa = xfs_agi_verify(bp);
2536 if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_IALLOC_READ_AGI))
2537 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2542 xfs_agi_write_verify(
2545 struct xfs_mount *mp = bp->b_mount;
2546 struct xfs_buf_log_item *bip = bp->b_log_item;
2547 struct xfs_agi *agi = bp->b_addr;
2550 fa = xfs_agi_verify(bp);
2552 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2556 if (!xfs_sb_version_hascrc(&mp->m_sb))
2560 agi->agi_lsn = cpu_to_be64(bip->bli_item.li_lsn);
2561 xfs_buf_update_cksum(bp, XFS_AGI_CRC_OFF);
2564 const struct xfs_buf_ops xfs_agi_buf_ops = {
2566 .magic = { cpu_to_be32(XFS_AGI_MAGIC), cpu_to_be32(XFS_AGI_MAGIC) },
2567 .verify_read = xfs_agi_read_verify,
2568 .verify_write = xfs_agi_write_verify,
2569 .verify_struct = xfs_agi_verify,
2573 * Read in the allocation group header (inode allocation section)
2577 struct xfs_mount *mp, /* file system mount structure */
2578 struct xfs_trans *tp, /* transaction pointer */
2579 xfs_agnumber_t agno, /* allocation group number */
2580 struct xfs_buf **bpp) /* allocation group hdr buf */
2584 trace_xfs_read_agi(mp, agno);
2586 ASSERT(agno != NULLAGNUMBER);
2587 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
2588 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
2589 XFS_FSS_TO_BB(mp, 1), 0, bpp, &xfs_agi_buf_ops);
2593 xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_AGI_BUF);
2595 xfs_buf_set_ref(*bpp, XFS_AGI_REF);
2600 xfs_ialloc_read_agi(
2601 struct xfs_mount *mp, /* file system mount structure */
2602 struct xfs_trans *tp, /* transaction pointer */
2603 xfs_agnumber_t agno, /* allocation group number */
2604 struct xfs_buf **bpp) /* allocation group hdr buf */
2606 struct xfs_agi *agi; /* allocation group header */
2607 struct xfs_perag *pag; /* per allocation group data */
2610 trace_xfs_ialloc_read_agi(mp, agno);
2612 error = xfs_read_agi(mp, tp, agno, bpp);
2616 agi = (*bpp)->b_addr;
2617 pag = (*bpp)->b_pag;
2618 if (!pag->pagi_init) {
2619 pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
2620 pag->pagi_count = be32_to_cpu(agi->agi_count);
2625 * It's possible for these to be out of sync if
2626 * we are in the middle of a forced shutdown.
2628 ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) ||
2629 XFS_FORCED_SHUTDOWN(mp));
2634 * Read in the agi to initialise the per-ag data in the mount structure
2637 xfs_ialloc_pagi_init(
2638 xfs_mount_t *mp, /* file system mount structure */
2639 xfs_trans_t *tp, /* transaction pointer */
2640 xfs_agnumber_t agno) /* allocation group number */
2642 struct xfs_buf *bp = NULL;
2645 error = xfs_ialloc_read_agi(mp, tp, agno, &bp);
2649 xfs_trans_brelse(tp, bp);
2653 /* Is there an inode record covering a given range of inode numbers? */
2655 xfs_ialloc_has_inode_record(
2656 struct xfs_btree_cur *cur,
2661 struct xfs_inobt_rec_incore irec;
2669 error = xfs_inobt_lookup(cur, low, XFS_LOOKUP_LE, &has_record);
2670 while (error == 0 && has_record) {
2671 error = xfs_inobt_get_rec(cur, &irec, &has_record);
2672 if (error || irec.ir_startino > high)
2675 agino = irec.ir_startino;
2676 holemask = irec.ir_holemask;
2677 for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; holemask >>= 1,
2678 i++, agino += XFS_INODES_PER_HOLEMASK_BIT) {
2681 if (agino + XFS_INODES_PER_HOLEMASK_BIT > low &&
2688 error = xfs_btree_increment(cur, 0, &has_record);
2693 /* Is there an inode record covering a given extent? */
2695 xfs_ialloc_has_inodes_at_extent(
2696 struct xfs_btree_cur *cur,
2704 low = XFS_AGB_TO_AGINO(cur->bc_mp, bno);
2705 high = XFS_AGB_TO_AGINO(cur->bc_mp, bno + len) - 1;
2707 return xfs_ialloc_has_inode_record(cur, low, high, exists);
2710 struct xfs_ialloc_count_inodes {
2712 xfs_agino_t freecount;
2715 /* Record inode counts across all inobt records. */
2717 xfs_ialloc_count_inodes_rec(
2718 struct xfs_btree_cur *cur,
2719 union xfs_btree_rec *rec,
2722 struct xfs_inobt_rec_incore irec;
2723 struct xfs_ialloc_count_inodes *ci = priv;
2725 xfs_inobt_btrec_to_irec(cur->bc_mp, rec, &irec);
2726 ci->count += irec.ir_count;
2727 ci->freecount += irec.ir_freecount;
2732 /* Count allocated and free inodes under an inobt. */
2734 xfs_ialloc_count_inodes(
2735 struct xfs_btree_cur *cur,
2737 xfs_agino_t *freecount)
2739 struct xfs_ialloc_count_inodes ci = {0};
2742 ASSERT(cur->bc_btnum == XFS_BTNUM_INO);
2743 error = xfs_btree_query_all(cur, xfs_ialloc_count_inodes_rec, &ci);
2748 *freecount = ci.freecount;
2753 * Initialize inode-related geometry information.
2755 * Compute the inode btree min and max levels and set maxicount.
2757 * Set the inode cluster size. This may still be overridden by the file
2758 * system block size if it is larger than the chosen cluster size.
2760 * For v5 filesystems, scale the cluster size with the inode size to keep a
2761 * constant ratio of inode per cluster buffer, but only if mkfs has set the
2762 * inode alignment value appropriately for larger cluster sizes.
2764 * Then compute the inode cluster alignment information.
2767 xfs_ialloc_setup_geometry(
2768 struct xfs_mount *mp)
2770 struct xfs_sb *sbp = &mp->m_sb;
2771 struct xfs_ino_geometry *igeo = M_IGEO(mp);
2775 igeo->new_diflags2 = 0;
2776 if (xfs_sb_version_hasbigtime(&mp->m_sb))
2777 igeo->new_diflags2 |= XFS_DIFLAG2_BIGTIME;
2779 /* Compute inode btree geometry. */
2780 igeo->agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
2781 igeo->inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1);
2782 igeo->inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0);
2783 igeo->inobt_mnr[0] = igeo->inobt_mxr[0] / 2;
2784 igeo->inobt_mnr[1] = igeo->inobt_mxr[1] / 2;
2786 igeo->ialloc_inos = max_t(uint16_t, XFS_INODES_PER_CHUNK,
2788 igeo->ialloc_blks = igeo->ialloc_inos >> sbp->sb_inopblog;
2790 if (sbp->sb_spino_align)
2791 igeo->ialloc_min_blks = sbp->sb_spino_align;
2793 igeo->ialloc_min_blks = igeo->ialloc_blks;
2795 /* Compute and fill in value of m_ino_geo.inobt_maxlevels. */
2796 inodes = (1LL << XFS_INO_AGINO_BITS(mp)) >> XFS_INODES_PER_CHUNK_LOG;
2797 igeo->inobt_maxlevels = xfs_btree_compute_maxlevels(igeo->inobt_mnr,
2801 * Set the maximum inode count for this filesystem, being careful not
2802 * to use obviously garbage sb_inopblog/sb_inopblock values. Regular
2803 * users should never get here due to failing sb verification, but
2804 * certain users (xfs_db) need to be usable even with corrupt metadata.
2806 if (sbp->sb_imax_pct && igeo->ialloc_blks) {
2808 * Make sure the maximum inode count is a multiple
2809 * of the units we allocate inodes in.
2811 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
2812 do_div(icount, 100);
2813 do_div(icount, igeo->ialloc_blks);
2814 igeo->maxicount = XFS_FSB_TO_INO(mp,
2815 icount * igeo->ialloc_blks);
2817 igeo->maxicount = 0;
2821 * Compute the desired size of an inode cluster buffer size, which
2822 * starts at 8K and (on v5 filesystems) scales up with larger inode
2825 * Preserve the desired inode cluster size because the sparse inodes
2826 * feature uses that desired size (not the actual size) to compute the
2827 * sparse inode alignment. The mount code validates this value, so we
2828 * cannot change the behavior.
2830 igeo->inode_cluster_size_raw = XFS_INODE_BIG_CLUSTER_SIZE;
2831 if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
2832 int new_size = igeo->inode_cluster_size_raw;
2834 new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE;
2835 if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size))
2836 igeo->inode_cluster_size_raw = new_size;
2839 /* Calculate inode cluster ratios. */
2840 if (igeo->inode_cluster_size_raw > mp->m_sb.sb_blocksize)
2841 igeo->blocks_per_cluster = XFS_B_TO_FSBT(mp,
2842 igeo->inode_cluster_size_raw);
2844 igeo->blocks_per_cluster = 1;
2845 igeo->inode_cluster_size = XFS_FSB_TO_B(mp, igeo->blocks_per_cluster);
2846 igeo->inodes_per_cluster = XFS_FSB_TO_INO(mp, igeo->blocks_per_cluster);
2848 /* Calculate inode cluster alignment. */
2849 if (xfs_sb_version_hasalign(&mp->m_sb) &&
2850 mp->m_sb.sb_inoalignmt >= igeo->blocks_per_cluster)
2851 igeo->cluster_align = mp->m_sb.sb_inoalignmt;
2853 igeo->cluster_align = 1;
2854 igeo->inoalign_mask = igeo->cluster_align - 1;
2855 igeo->cluster_align_inodes = XFS_FSB_TO_INO(mp, igeo->cluster_align);
2858 * If we are using stripe alignment, check whether
2859 * the stripe unit is a multiple of the inode alignment
2861 if (mp->m_dalign && igeo->inoalign_mask &&
2862 !(mp->m_dalign & igeo->inoalign_mask))
2863 igeo->ialloc_align = mp->m_dalign;
2865 igeo->ialloc_align = 0;
2868 /* Compute the location of the root directory inode that is laid out by mkfs. */
2870 xfs_ialloc_calc_rootino(
2871 struct xfs_mount *mp,
2874 struct xfs_ino_geometry *igeo = M_IGEO(mp);
2875 xfs_agblock_t first_bno;
2878 * Pre-calculate the geometry of AG 0. We know what it looks like
2879 * because libxfs knows how to create allocation groups now.
2881 * first_bno is the first block in which mkfs could possibly have
2882 * allocated the root directory inode, once we factor in the metadata
2883 * that mkfs formats before it. Namely, the four AG headers...
2885 first_bno = howmany(4 * mp->m_sb.sb_sectsize, mp->m_sb.sb_blocksize);
2887 /* ...the two free space btree roots... */
2890 /* ...the inode btree root... */
2893 /* ...the initial AGFL... */
2894 first_bno += xfs_alloc_min_freelist(mp, NULL);
2896 /* ...the free inode btree root... */
2897 if (xfs_sb_version_hasfinobt(&mp->m_sb))
2900 /* ...the reverse mapping btree root... */
2901 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
2904 /* ...the reference count btree... */
2905 if (xfs_sb_version_hasreflink(&mp->m_sb))
2909 * ...and the log, if it is allocated in the first allocation group.
2911 * This can happen with filesystems that only have a single
2912 * allocation group, or very odd geometries created by old mkfs
2913 * versions on very small filesystems.
2915 if (mp->m_sb.sb_logstart &&
2916 XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == 0)
2917 first_bno += mp->m_sb.sb_logblocks;
2920 * Now round first_bno up to whatever allocation alignment is given
2921 * by the filesystem or was passed in.
2923 if (xfs_sb_version_hasdalign(&mp->m_sb) && igeo->ialloc_align > 0)
2924 first_bno = roundup(first_bno, sunit);
2925 else if (xfs_sb_version_hasalign(&mp->m_sb) &&
2926 mp->m_sb.sb_inoalignmt > 1)
2927 first_bno = roundup(first_bno, mp->m_sb.sb_inoalignmt);
2929 return XFS_AGINO_TO_INO(mp, 0, XFS_AGB_TO_AGINO(mp, first_bno));
2933 * Ensure there are not sparse inode clusters that cross the new EOAG.
2935 * This is a no-op for non-spinode filesystems since clusters are always fully
2936 * allocated and checking the bnobt suffices. However, a spinode filesystem
2937 * could have a record where the upper inodes are free blocks. If those blocks
2938 * were removed from the filesystem, the inode record would extend beyond EOAG,
2939 * which will be flagged as corruption.
2942 xfs_ialloc_check_shrink(
2943 struct xfs_trans *tp,
2944 xfs_agnumber_t agno,
2945 struct xfs_buf *agibp,
2946 xfs_agblock_t new_length)
2948 struct xfs_inobt_rec_incore rec;
2949 struct xfs_btree_cur *cur;
2950 struct xfs_mount *mp = tp->t_mountp;
2951 struct xfs_perag *pag;
2952 xfs_agino_t agino = XFS_AGB_TO_AGINO(mp, new_length);
2956 if (!xfs_sb_version_hassparseinodes(&mp->m_sb))
2959 pag = xfs_perag_get(mp, agno);
2960 cur = xfs_inobt_init_cursor(mp, tp, agibp, pag, XFS_BTNUM_INO);
2962 /* Look up the inobt record that would correspond to the new EOFS. */
2963 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has);
2967 error = xfs_inobt_get_rec(cur, &rec, &has);
2972 error = -EFSCORRUPTED;
2976 /* If the record covers inodes that would be beyond EOFS, bail out. */
2977 if (rec.ir_startino + XFS_INODES_PER_CHUNK > agino) {
2982 xfs_btree_del_cursor(cur, error);