1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
16 #include "xfs_ialloc.h"
17 #include "xfs_ialloc_btree.h"
18 #include "xfs_alloc.h"
19 #include "xfs_errortag.h"
20 #include "xfs_error.h"
22 #include "xfs_trans.h"
23 #include "xfs_buf_item.h"
24 #include "xfs_icreate_item.h"
25 #include "xfs_icache.h"
26 #include "xfs_trace.h"
32 * Lookup a record by ino in the btree given by cur.
36 struct xfs_btree_cur *cur, /* btree cursor */
37 xfs_agino_t ino, /* starting inode of chunk */
38 xfs_lookup_t dir, /* <=, >=, == */
39 int *stat) /* success/failure */
41 cur->bc_rec.i.ir_startino = ino;
42 cur->bc_rec.i.ir_holemask = 0;
43 cur->bc_rec.i.ir_count = 0;
44 cur->bc_rec.i.ir_freecount = 0;
45 cur->bc_rec.i.ir_free = 0;
46 return xfs_btree_lookup(cur, dir, stat);
50 * Update the record referred to by cur to the value given.
51 * This either works (return 0) or gets an EFSCORRUPTED error.
53 STATIC int /* error */
55 struct xfs_btree_cur *cur, /* btree cursor */
56 xfs_inobt_rec_incore_t *irec) /* btree record */
58 union xfs_btree_rec rec;
60 rec.inobt.ir_startino = cpu_to_be32(irec->ir_startino);
61 if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) {
62 rec.inobt.ir_u.sp.ir_holemask = cpu_to_be16(irec->ir_holemask);
63 rec.inobt.ir_u.sp.ir_count = irec->ir_count;
64 rec.inobt.ir_u.sp.ir_freecount = irec->ir_freecount;
66 /* ir_holemask/ir_count not supported on-disk */
67 rec.inobt.ir_u.f.ir_freecount = cpu_to_be32(irec->ir_freecount);
69 rec.inobt.ir_free = cpu_to_be64(irec->ir_free);
70 return xfs_btree_update(cur, &rec);
73 /* Convert on-disk btree record to incore inobt record. */
75 xfs_inobt_btrec_to_irec(
77 union xfs_btree_rec *rec,
78 struct xfs_inobt_rec_incore *irec)
80 irec->ir_startino = be32_to_cpu(rec->inobt.ir_startino);
81 if (xfs_sb_version_hassparseinodes(&mp->m_sb)) {
82 irec->ir_holemask = be16_to_cpu(rec->inobt.ir_u.sp.ir_holemask);
83 irec->ir_count = rec->inobt.ir_u.sp.ir_count;
84 irec->ir_freecount = rec->inobt.ir_u.sp.ir_freecount;
87 * ir_holemask/ir_count not supported on-disk. Fill in hardcoded
88 * values for full inode chunks.
90 irec->ir_holemask = XFS_INOBT_HOLEMASK_FULL;
91 irec->ir_count = XFS_INODES_PER_CHUNK;
93 be32_to_cpu(rec->inobt.ir_u.f.ir_freecount);
95 irec->ir_free = be64_to_cpu(rec->inobt.ir_free);
99 * Get the data from the pointed-to record.
103 struct xfs_btree_cur *cur,
104 struct xfs_inobt_rec_incore *irec,
107 struct xfs_mount *mp = cur->bc_mp;
108 xfs_agnumber_t agno = cur->bc_ag.agno;
109 union xfs_btree_rec *rec;
113 error = xfs_btree_get_rec(cur, &rec, stat);
114 if (error || *stat == 0)
117 xfs_inobt_btrec_to_irec(mp, rec, irec);
119 if (!xfs_verify_agino(mp, agno, irec->ir_startino))
121 if (irec->ir_count < XFS_INODES_PER_HOLEMASK_BIT ||
122 irec->ir_count > XFS_INODES_PER_CHUNK)
124 if (irec->ir_freecount > XFS_INODES_PER_CHUNK)
127 /* if there are no holes, return the first available offset */
128 if (!xfs_inobt_issparse(irec->ir_holemask))
129 realfree = irec->ir_free;
131 realfree = irec->ir_free & xfs_inobt_irec_to_allocmask(irec);
132 if (hweight64(realfree) != irec->ir_freecount)
139 "%s Inode BTree record corruption in AG %d detected!",
140 cur->bc_btnum == XFS_BTNUM_INO ? "Used" : "Free", agno);
142 "start inode 0x%x, count 0x%x, free 0x%x freemask 0x%llx, holemask 0x%x",
143 irec->ir_startino, irec->ir_count, irec->ir_freecount,
144 irec->ir_free, irec->ir_holemask);
145 return -EFSCORRUPTED;
149 * Insert a single inobt record. Cursor must already point to desired location.
152 xfs_inobt_insert_rec(
153 struct xfs_btree_cur *cur,
160 cur->bc_rec.i.ir_holemask = holemask;
161 cur->bc_rec.i.ir_count = count;
162 cur->bc_rec.i.ir_freecount = freecount;
163 cur->bc_rec.i.ir_free = free;
164 return xfs_btree_insert(cur, stat);
168 * Insert records describing a newly allocated inode chunk into the inobt.
172 struct xfs_mount *mp,
173 struct xfs_trans *tp,
174 struct xfs_buf *agbp,
175 struct xfs_perag *pag,
180 struct xfs_btree_cur *cur;
185 cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, btnum);
187 for (thisino = newino;
188 thisino < newino + newlen;
189 thisino += XFS_INODES_PER_CHUNK) {
190 error = xfs_inobt_lookup(cur, thisino, XFS_LOOKUP_EQ, &i);
192 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
197 error = xfs_inobt_insert_rec(cur, XFS_INOBT_HOLEMASK_FULL,
198 XFS_INODES_PER_CHUNK,
199 XFS_INODES_PER_CHUNK,
200 XFS_INOBT_ALL_FREE, &i);
202 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
208 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
214 * Verify that the number of free inodes in the AGI is correct.
218 xfs_check_agi_freecount(
219 struct xfs_btree_cur *cur,
222 if (cur->bc_nlevels == 1) {
223 xfs_inobt_rec_incore_t rec;
228 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
233 error = xfs_inobt_get_rec(cur, &rec, &i);
238 freecount += rec.ir_freecount;
239 error = xfs_btree_increment(cur, 0, &i);
245 if (!XFS_FORCED_SHUTDOWN(cur->bc_mp))
246 ASSERT(freecount == be32_to_cpu(agi->agi_freecount));
251 #define xfs_check_agi_freecount(cur, agi) 0
255 * Initialise a new set of inodes. When called without a transaction context
256 * (e.g. from recovery) we initiate a delayed write of the inode buffers rather
257 * than logging them (which in a transaction context puts them into the AIL
258 * for writeback rather than the xfsbufd queue).
261 xfs_ialloc_inode_init(
262 struct xfs_mount *mp,
263 struct xfs_trans *tp,
264 struct list_head *buffer_list,
268 xfs_agblock_t length,
271 struct xfs_buf *fbuf;
272 struct xfs_dinode *free;
281 * Loop over the new block(s), filling in the inodes. For small block
282 * sizes, manipulate the inodes in buffers which are multiples of the
285 nbufs = length / M_IGEO(mp)->blocks_per_cluster;
288 * Figure out what version number to use in the inodes we create. If
289 * the superblock version has caught up to the one that supports the new
290 * inode format, then use the new inode version. Otherwise use the old
291 * version so that old kernels will continue to be able to use the file
294 * For v3 inodes, we also need to write the inode number into the inode,
295 * so calculate the first inode number of the chunk here as
296 * XFS_AGB_TO_AGINO() only works within a filesystem block, not
297 * across multiple filesystem blocks (such as a cluster) and so cannot
298 * be used in the cluster buffer loop below.
300 * Further, because we are writing the inode directly into the buffer
301 * and calculating a CRC on the entire inode, we have ot log the entire
302 * inode so that the entire range the CRC covers is present in the log.
303 * That means for v3 inode we log the entire buffer rather than just the
306 if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
308 ino = XFS_AGINO_TO_INO(mp, agno, XFS_AGB_TO_AGINO(mp, agbno));
311 * log the initialisation that is about to take place as an
312 * logical operation. This means the transaction does not
313 * need to log the physical changes to the inode buffers as log
314 * recovery will know what initialisation is actually needed.
315 * Hence we only need to log the buffers as "ordered" buffers so
316 * they track in the AIL as if they were physically logged.
319 xfs_icreate_log(tp, agno, agbno, icount,
320 mp->m_sb.sb_inodesize, length, gen);
324 for (j = 0; j < nbufs; j++) {
328 d = XFS_AGB_TO_DADDR(mp, agno, agbno +
329 (j * M_IGEO(mp)->blocks_per_cluster));
330 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
331 mp->m_bsize * M_IGEO(mp)->blocks_per_cluster,
332 XBF_UNMAPPED, &fbuf);
336 /* Initialize the inode buffers and log them appropriately. */
337 fbuf->b_ops = &xfs_inode_buf_ops;
338 xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length));
339 for (i = 0; i < M_IGEO(mp)->inodes_per_cluster; i++) {
340 int ioffset = i << mp->m_sb.sb_inodelog;
341 uint isize = XFS_DINODE_SIZE(&mp->m_sb);
343 free = xfs_make_iptr(mp, fbuf, i);
344 free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
345 free->di_version = version;
346 free->di_gen = cpu_to_be32(gen);
347 free->di_next_unlinked = cpu_to_be32(NULLAGINO);
350 free->di_ino = cpu_to_be64(ino);
352 uuid_copy(&free->di_uuid,
353 &mp->m_sb.sb_meta_uuid);
354 xfs_dinode_calc_crc(mp, free);
356 /* just log the inode core */
357 xfs_trans_log_buf(tp, fbuf, ioffset,
358 ioffset + isize - 1);
364 * Mark the buffer as an inode allocation buffer so it
365 * sticks in AIL at the point of this allocation
366 * transaction. This ensures the they are on disk before
367 * the tail of the log can be moved past this
368 * transaction (i.e. by preventing relogging from moving
369 * it forward in the log).
371 xfs_trans_inode_alloc_buf(tp, fbuf);
374 * Mark the buffer as ordered so that they are
375 * not physically logged in the transaction but
376 * still tracked in the AIL as part of the
377 * transaction and pin the log appropriately.
379 xfs_trans_ordered_buf(tp, fbuf);
382 fbuf->b_flags |= XBF_DONE;
383 xfs_buf_delwri_queue(fbuf, buffer_list);
391 * Align startino and allocmask for a recently allocated sparse chunk such that
392 * they are fit for insertion (or merge) into the on-disk inode btrees.
396 * When enabled, sparse inode support increases the inode alignment from cluster
397 * size to inode chunk size. This means that the minimum range between two
398 * non-adjacent inode records in the inobt is large enough for a full inode
399 * record. This allows for cluster sized, cluster aligned block allocation
400 * without need to worry about whether the resulting inode record overlaps with
401 * another record in the tree. Without this basic rule, we would have to deal
402 * with the consequences of overlap by potentially undoing recent allocations in
403 * the inode allocation codepath.
405 * Because of this alignment rule (which is enforced on mount), there are two
406 * inobt possibilities for newly allocated sparse chunks. One is that the
407 * aligned inode record for the chunk covers a range of inodes not already
408 * covered in the inobt (i.e., it is safe to insert a new sparse record). The
409 * other is that a record already exists at the aligned startino that considers
410 * the newly allocated range as sparse. In the latter case, record content is
411 * merged in hope that sparse inode chunks fill to full chunks over time.
414 xfs_align_sparse_ino(
415 struct xfs_mount *mp,
416 xfs_agino_t *startino,
423 agbno = XFS_AGINO_TO_AGBNO(mp, *startino);
424 mod = agbno % mp->m_sb.sb_inoalignmt;
428 /* calculate the inode offset and align startino */
429 offset = XFS_AGB_TO_AGINO(mp, mod);
433 * Since startino has been aligned down, left shift allocmask such that
434 * it continues to represent the same physical inodes relative to the
437 *allocmask <<= offset / XFS_INODES_PER_HOLEMASK_BIT;
441 * Determine whether the source inode record can merge into the target. Both
442 * records must be sparse, the inode ranges must match and there must be no
443 * allocation overlap between the records.
446 __xfs_inobt_can_merge(
447 struct xfs_inobt_rec_incore *trec, /* tgt record */
448 struct xfs_inobt_rec_incore *srec) /* src record */
453 /* records must cover the same inode range */
454 if (trec->ir_startino != srec->ir_startino)
457 /* both records must be sparse */
458 if (!xfs_inobt_issparse(trec->ir_holemask) ||
459 !xfs_inobt_issparse(srec->ir_holemask))
462 /* both records must track some inodes */
463 if (!trec->ir_count || !srec->ir_count)
466 /* can't exceed capacity of a full record */
467 if (trec->ir_count + srec->ir_count > XFS_INODES_PER_CHUNK)
470 /* verify there is no allocation overlap */
471 talloc = xfs_inobt_irec_to_allocmask(trec);
472 salloc = xfs_inobt_irec_to_allocmask(srec);
480 * Merge the source inode record into the target. The caller must call
481 * __xfs_inobt_can_merge() to ensure the merge is valid.
484 __xfs_inobt_rec_merge(
485 struct xfs_inobt_rec_incore *trec, /* target */
486 struct xfs_inobt_rec_incore *srec) /* src */
488 ASSERT(trec->ir_startino == srec->ir_startino);
490 /* combine the counts */
491 trec->ir_count += srec->ir_count;
492 trec->ir_freecount += srec->ir_freecount;
495 * Merge the holemask and free mask. For both fields, 0 bits refer to
496 * allocated inodes. We combine the allocated ranges with bitwise AND.
498 trec->ir_holemask &= srec->ir_holemask;
499 trec->ir_free &= srec->ir_free;
503 * Insert a new sparse inode chunk into the associated inode btree. The inode
504 * record for the sparse chunk is pre-aligned to a startino that should match
505 * any pre-existing sparse inode record in the tree. This allows sparse chunks
508 * This function supports two modes of handling preexisting records depending on
509 * the merge flag. If merge is true, the provided record is merged with the
510 * existing record and updated in place. The merged record is returned in nrec.
511 * If merge is false, an existing record is replaced with the provided record.
512 * If no preexisting record exists, the provided record is always inserted.
514 * It is considered corruption if a merge is requested and not possible. Given
515 * the sparse inode alignment constraints, this should never happen.
518 xfs_inobt_insert_sprec(
519 struct xfs_mount *mp,
520 struct xfs_trans *tp,
521 struct xfs_buf *agbp,
522 struct xfs_perag *pag,
524 struct xfs_inobt_rec_incore *nrec, /* in/out: new/merged rec. */
525 bool merge) /* merge or replace */
527 struct xfs_btree_cur *cur;
530 struct xfs_inobt_rec_incore rec;
532 cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, btnum);
534 /* the new record is pre-aligned so we know where to look */
535 error = xfs_inobt_lookup(cur, nrec->ir_startino, XFS_LOOKUP_EQ, &i);
538 /* if nothing there, insert a new record and return */
540 error = xfs_inobt_insert_rec(cur, nrec->ir_holemask,
541 nrec->ir_count, nrec->ir_freecount,
545 if (XFS_IS_CORRUPT(mp, i != 1)) {
546 error = -EFSCORRUPTED;
554 * A record exists at this startino. Merge or replace the record
555 * depending on what we've been asked to do.
558 error = xfs_inobt_get_rec(cur, &rec, &i);
561 if (XFS_IS_CORRUPT(mp, i != 1)) {
562 error = -EFSCORRUPTED;
565 if (XFS_IS_CORRUPT(mp, rec.ir_startino != nrec->ir_startino)) {
566 error = -EFSCORRUPTED;
571 * This should never fail. If we have coexisting records that
572 * cannot merge, something is seriously wrong.
574 if (XFS_IS_CORRUPT(mp, !__xfs_inobt_can_merge(nrec, &rec))) {
575 error = -EFSCORRUPTED;
579 trace_xfs_irec_merge_pre(mp, pag->pag_agno, rec.ir_startino,
580 rec.ir_holemask, nrec->ir_startino,
583 /* merge to nrec to output the updated record */
584 __xfs_inobt_rec_merge(nrec, &rec);
586 trace_xfs_irec_merge_post(mp, pag->pag_agno, nrec->ir_startino,
589 error = xfs_inobt_rec_check_count(mp, nrec);
594 error = xfs_inobt_update(cur, nrec);
599 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
602 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
607 * Allocate new inodes in the allocation group specified by agbp.
608 * Returns 0 if inodes were allocated in this AG; 1 if there was no space
609 * in this AG; or the usual negative error code.
613 struct xfs_trans *tp,
614 struct xfs_buf *agbp,
615 struct xfs_perag *pag)
618 struct xfs_alloc_arg args;
620 xfs_agino_t newino; /* new first inode's number */
621 xfs_agino_t newlen; /* new number of inodes */
622 int isaligned = 0; /* inode allocation at stripe */
624 /* init. to full chunk */
625 struct xfs_inobt_rec_incore rec;
626 struct xfs_ino_geometry *igeo = M_IGEO(tp->t_mountp);
627 uint16_t allocmask = (uint16_t) -1;
630 memset(&args, 0, sizeof(args));
632 args.mp = tp->t_mountp;
633 args.fsbno = NULLFSBLOCK;
634 args.oinfo = XFS_RMAP_OINFO_INODES;
637 /* randomly do sparse inode allocations */
638 if (xfs_sb_version_hassparseinodes(&tp->t_mountp->m_sb) &&
639 igeo->ialloc_min_blks < igeo->ialloc_blks)
640 do_sparse = prandom_u32() & 1;
644 * Locking will ensure that we don't have two callers in here
647 newlen = igeo->ialloc_inos;
648 if (igeo->maxicount &&
649 percpu_counter_read_positive(&args.mp->m_icount) + newlen >
652 args.minlen = args.maxlen = igeo->ialloc_blks;
654 * First try to allocate inodes contiguous with the last-allocated
655 * chunk of inodes. If the filesystem is striped, this will fill
656 * an entire stripe unit with inodes.
659 newino = be32_to_cpu(agi->agi_newino);
660 args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
664 if (likely(newino != NULLAGINO &&
665 (args.agbno < be32_to_cpu(agi->agi_length)))) {
666 args.fsbno = XFS_AGB_TO_FSB(args.mp, pag->pag_agno, args.agbno);
667 args.type = XFS_ALLOCTYPE_THIS_BNO;
671 * We need to take into account alignment here to ensure that
672 * we don't modify the free list if we fail to have an exact
673 * block. If we don't have an exact match, and every oher
674 * attempt allocation attempt fails, we'll end up cancelling
675 * a dirty transaction and shutting down.
677 * For an exact allocation, alignment must be 1,
678 * however we need to take cluster alignment into account when
679 * fixing up the freelist. Use the minalignslop field to
680 * indicate that extra blocks might be required for alignment,
681 * but not to use them in the actual exact allocation.
684 args.minalignslop = igeo->cluster_align - 1;
686 /* Allow space for the inode btree to split. */
687 args.minleft = igeo->inobt_maxlevels;
688 if ((error = xfs_alloc_vextent(&args)))
692 * This request might have dirtied the transaction if the AG can
693 * satisfy the request, but the exact block was not available.
694 * If the allocation did fail, subsequent requests will relax
695 * the exact agbno requirement and increase the alignment
696 * instead. It is critical that the total size of the request
697 * (len + alignment + slop) does not increase from this point
698 * on, so reset minalignslop to ensure it is not included in
699 * subsequent requests.
701 args.minalignslop = 0;
704 if (unlikely(args.fsbno == NULLFSBLOCK)) {
706 * Set the alignment for the allocation.
707 * If stripe alignment is turned on then align at stripe unit
709 * If the cluster size is smaller than a filesystem block
710 * then we're doing I/O for inodes in filesystem block size
711 * pieces, so don't need alignment anyway.
714 if (igeo->ialloc_align) {
715 ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN));
716 args.alignment = args.mp->m_dalign;
719 args.alignment = igeo->cluster_align;
721 * Need to figure out where to allocate the inode blocks.
722 * Ideally they should be spaced out through the a.g.
723 * For now, just allocate blocks up front.
725 args.agbno = be32_to_cpu(agi->agi_root);
726 args.fsbno = XFS_AGB_TO_FSB(args.mp, pag->pag_agno, args.agbno);
728 * Allocate a fixed-size extent of inodes.
730 args.type = XFS_ALLOCTYPE_NEAR_BNO;
733 * Allow space for the inode btree to split.
735 args.minleft = igeo->inobt_maxlevels;
736 if ((error = xfs_alloc_vextent(&args)))
741 * If stripe alignment is turned on, then try again with cluster
744 if (isaligned && args.fsbno == NULLFSBLOCK) {
745 args.type = XFS_ALLOCTYPE_NEAR_BNO;
746 args.agbno = be32_to_cpu(agi->agi_root);
747 args.fsbno = XFS_AGB_TO_FSB(args.mp, pag->pag_agno, args.agbno);
748 args.alignment = igeo->cluster_align;
749 if ((error = xfs_alloc_vextent(&args)))
754 * Finally, try a sparse allocation if the filesystem supports it and
755 * the sparse allocation length is smaller than a full chunk.
757 if (xfs_sb_version_hassparseinodes(&args.mp->m_sb) &&
758 igeo->ialloc_min_blks < igeo->ialloc_blks &&
759 args.fsbno == NULLFSBLOCK) {
761 args.type = XFS_ALLOCTYPE_NEAR_BNO;
762 args.agbno = be32_to_cpu(agi->agi_root);
763 args.fsbno = XFS_AGB_TO_FSB(args.mp, pag->pag_agno, args.agbno);
764 args.alignment = args.mp->m_sb.sb_spino_align;
767 args.minlen = igeo->ialloc_min_blks;
768 args.maxlen = args.minlen;
771 * The inode record will be aligned to full chunk size. We must
772 * prevent sparse allocation from AG boundaries that result in
773 * invalid inode records, such as records that start at agbno 0
774 * or extend beyond the AG.
776 * Set min agbno to the first aligned, non-zero agbno and max to
777 * the last aligned agbno that is at least one full chunk from
780 args.min_agbno = args.mp->m_sb.sb_inoalignmt;
781 args.max_agbno = round_down(args.mp->m_sb.sb_agblocks,
782 args.mp->m_sb.sb_inoalignmt) -
785 error = xfs_alloc_vextent(&args);
789 newlen = XFS_AGB_TO_AGINO(args.mp, args.len);
790 ASSERT(newlen <= XFS_INODES_PER_CHUNK);
791 allocmask = (1 << (newlen / XFS_INODES_PER_HOLEMASK_BIT)) - 1;
794 if (args.fsbno == NULLFSBLOCK)
797 ASSERT(args.len == args.minlen);
800 * Stamp and write the inode buffers.
802 * Seed the new inode cluster with a random generation number. This
803 * prevents short-term reuse of generation numbers if a chunk is
804 * freed and then immediately reallocated. We use random numbers
805 * rather than a linear progression to prevent the next generation
806 * number from being easily guessable.
808 error = xfs_ialloc_inode_init(args.mp, tp, NULL, newlen, pag->pag_agno,
809 args.agbno, args.len, prandom_u32());
814 * Convert the results.
816 newino = XFS_AGB_TO_AGINO(args.mp, args.agbno);
818 if (xfs_inobt_issparse(~allocmask)) {
820 * We've allocated a sparse chunk. Align the startino and mask.
822 xfs_align_sparse_ino(args.mp, &newino, &allocmask);
824 rec.ir_startino = newino;
825 rec.ir_holemask = ~allocmask;
826 rec.ir_count = newlen;
827 rec.ir_freecount = newlen;
828 rec.ir_free = XFS_INOBT_ALL_FREE;
831 * Insert the sparse record into the inobt and allow for a merge
832 * if necessary. If a merge does occur, rec is updated to the
835 error = xfs_inobt_insert_sprec(args.mp, tp, agbp, pag,
836 XFS_BTNUM_INO, &rec, true);
837 if (error == -EFSCORRUPTED) {
839 "invalid sparse inode record: ino 0x%llx holemask 0x%x count %u",
840 XFS_AGINO_TO_INO(args.mp, pag->pag_agno,
842 rec.ir_holemask, rec.ir_count);
843 xfs_force_shutdown(args.mp, SHUTDOWN_CORRUPT_INCORE);
849 * We can't merge the part we've just allocated as for the inobt
850 * due to finobt semantics. The original record may or may not
851 * exist independent of whether physical inodes exist in this
854 * We must update the finobt record based on the inobt record.
855 * rec contains the fully merged and up to date inobt record
856 * from the previous call. Set merge false to replace any
857 * existing record with this one.
859 if (xfs_sb_version_hasfinobt(&args.mp->m_sb)) {
860 error = xfs_inobt_insert_sprec(args.mp, tp, agbp, pag,
861 XFS_BTNUM_FINO, &rec, false);
866 /* full chunk - insert new records to both btrees */
867 error = xfs_inobt_insert(args.mp, tp, agbp, pag, newino, newlen,
872 if (xfs_sb_version_hasfinobt(&args.mp->m_sb)) {
873 error = xfs_inobt_insert(args.mp, tp, agbp, pag, newino,
874 newlen, XFS_BTNUM_FINO);
881 * Update AGI counts and newino.
883 be32_add_cpu(&agi->agi_count, newlen);
884 be32_add_cpu(&agi->agi_freecount, newlen);
885 pag->pagi_freecount += newlen;
886 pag->pagi_count += newlen;
887 agi->agi_newino = cpu_to_be32(newino);
890 * Log allocation group header fields
892 xfs_ialloc_log_agi(tp, agbp,
893 XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO);
895 * Modify/log superblock values for inode count and inode free count.
897 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen);
898 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen);
902 STATIC xfs_agnumber_t
908 spin_lock(&mp->m_agirotor_lock);
909 agno = mp->m_agirotor;
910 if (++mp->m_agirotor >= mp->m_maxagi)
912 spin_unlock(&mp->m_agirotor_lock);
918 * Select an allocation group to look for a free inode in, based on the parent
919 * inode and the mode. Return the allocation group buffer.
921 STATIC xfs_agnumber_t
922 xfs_ialloc_ag_select(
923 xfs_trans_t *tp, /* transaction pointer */
924 xfs_ino_t parent, /* parent directory inode number */
925 umode_t mode) /* bits set to indicate file type */
927 xfs_agnumber_t agcount; /* number of ag's in the filesystem */
928 xfs_agnumber_t agno; /* current ag number */
929 int flags; /* alloc buffer locking flags */
930 xfs_extlen_t ineed; /* blocks needed for inode allocation */
931 xfs_extlen_t longest = 0; /* longest extent available */
932 xfs_mount_t *mp; /* mount point structure */
933 int needspace; /* file mode implies space allocated */
934 xfs_perag_t *pag; /* per allocation group data */
935 xfs_agnumber_t pagno; /* parent (starting) ag number */
939 * Files of these types need at least one block if length > 0
940 * (and they won't fit in the inode, but that's hard to figure out).
942 needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode);
944 agcount = mp->m_maxagi;
946 pagno = xfs_ialloc_next_ag(mp);
948 pagno = XFS_INO_TO_AGNO(mp, parent);
949 if (pagno >= agcount)
953 ASSERT(pagno < agcount);
956 * Loop through allocation groups, looking for one with a little
957 * free space in it. Note we don't look for free inodes, exactly.
958 * Instead, we include whether there is a need to allocate inodes
959 * to mean that blocks must be allocated for them,
960 * if none are currently free.
963 flags = XFS_ALLOC_FLAG_TRYLOCK;
965 pag = xfs_perag_get(mp, agno);
966 if (!pag->pagi_inodeok) {
967 xfs_ialloc_next_ag(mp);
971 if (!pag->pagi_init) {
972 error = xfs_ialloc_pagi_init(mp, tp, agno);
977 if (pag->pagi_freecount) {
982 if (!pag->pagf_init) {
983 error = xfs_alloc_pagf_init(mp, tp, agno, flags);
989 * Check that there is enough free space for the file plus a
990 * chunk of inodes if we need to allocate some. If this is the
991 * first pass across the AGs, take into account the potential
992 * space needed for alignment of inode chunks when checking the
993 * longest contiguous free space in the AG - this prevents us
994 * from getting ENOSPC because we have free space larger than
995 * ialloc_blks but alignment constraints prevent us from using
998 * If we can't find an AG with space for full alignment slack to
999 * be taken into account, we must be near ENOSPC in all AGs.
1000 * Hence we don't include alignment for the second pass and so
1001 * if we fail allocation due to alignment issues then it is most
1002 * likely a real ENOSPC condition.
1004 ineed = M_IGEO(mp)->ialloc_min_blks;
1005 if (flags && ineed > 1)
1006 ineed += M_IGEO(mp)->cluster_align;
1007 longest = pag->pagf_longest;
1009 longest = pag->pagf_flcount > 0;
1011 if (pag->pagf_freeblks >= needspace + ineed &&
1019 * No point in iterating over the rest, if we're shutting
1022 if (XFS_FORCED_SHUTDOWN(mp))
1023 return NULLAGNUMBER;
1025 if (agno >= agcount)
1027 if (agno == pagno) {
1029 return NULLAGNUMBER;
1036 * Try to retrieve the next record to the left/right from the current one.
1039 xfs_ialloc_next_rec(
1040 struct xfs_btree_cur *cur,
1041 xfs_inobt_rec_incore_t *rec,
1049 error = xfs_btree_decrement(cur, 0, &i);
1051 error = xfs_btree_increment(cur, 0, &i);
1057 error = xfs_inobt_get_rec(cur, rec, &i);
1060 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1061 return -EFSCORRUPTED;
1069 struct xfs_btree_cur *cur,
1071 xfs_inobt_rec_incore_t *rec,
1077 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_EQ, &i);
1082 error = xfs_inobt_get_rec(cur, rec, &i);
1085 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1086 return -EFSCORRUPTED;
1093 * Return the offset of the first free inode in the record. If the inode chunk
1094 * is sparsely allocated, we convert the record holemask to inode granularity
1095 * and mask off the unallocated regions from the inode free mask.
1098 xfs_inobt_first_free_inode(
1099 struct xfs_inobt_rec_incore *rec)
1101 xfs_inofree_t realfree;
1103 /* if there are no holes, return the first available offset */
1104 if (!xfs_inobt_issparse(rec->ir_holemask))
1105 return xfs_lowbit64(rec->ir_free);
1107 realfree = xfs_inobt_irec_to_allocmask(rec);
1108 realfree &= rec->ir_free;
1110 return xfs_lowbit64(realfree);
1114 * Allocate an inode using the inobt-only algorithm.
1117 xfs_dialloc_ag_inobt(
1118 struct xfs_trans *tp,
1119 struct xfs_buf *agbp,
1120 struct xfs_perag *pag,
1124 struct xfs_mount *mp = tp->t_mountp;
1125 struct xfs_agi *agi = agbp->b_addr;
1126 xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent);
1127 xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent);
1128 struct xfs_btree_cur *cur, *tcur;
1129 struct xfs_inobt_rec_incore rec, trec;
1134 int searchdistance = 10;
1136 ASSERT(pag->pagi_init);
1137 ASSERT(pag->pagi_inodeok);
1138 ASSERT(pag->pagi_freecount > 0);
1141 cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_INO);
1143 * If pagino is 0 (this is the root inode allocation) use newino.
1144 * This must work because we've just allocated some.
1147 pagino = be32_to_cpu(agi->agi_newino);
1149 error = xfs_check_agi_freecount(cur, agi);
1154 * If in the same AG as the parent, try to get near the parent.
1156 if (pagno == pag->pag_agno) {
1157 int doneleft; /* done, to the left */
1158 int doneright; /* done, to the right */
1160 error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i);
1163 if (XFS_IS_CORRUPT(mp, i != 1)) {
1164 error = -EFSCORRUPTED;
1168 error = xfs_inobt_get_rec(cur, &rec, &j);
1171 if (XFS_IS_CORRUPT(mp, j != 1)) {
1172 error = -EFSCORRUPTED;
1176 if (rec.ir_freecount > 0) {
1178 * Found a free inode in the same chunk
1179 * as the parent, done.
1186 * In the same AG as parent, but parent's chunk is full.
1189 /* duplicate the cursor, search left & right simultaneously */
1190 error = xfs_btree_dup_cursor(cur, &tcur);
1195 * Skip to last blocks looked up if same parent inode.
1197 if (pagino != NULLAGINO &&
1198 pag->pagl_pagino == pagino &&
1199 pag->pagl_leftrec != NULLAGINO &&
1200 pag->pagl_rightrec != NULLAGINO) {
1201 error = xfs_ialloc_get_rec(tcur, pag->pagl_leftrec,
1206 error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec,
1211 /* search left with tcur, back up 1 record */
1212 error = xfs_ialloc_next_rec(tcur, &trec, &doneleft, 1);
1216 /* search right with cur, go forward 1 record. */
1217 error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0);
1223 * Loop until we find an inode chunk with a free inode.
1225 while (--searchdistance > 0 && (!doneleft || !doneright)) {
1226 int useleft; /* using left inode chunk this time */
1228 /* figure out the closer block if both are valid. */
1229 if (!doneleft && !doneright) {
1231 (trec.ir_startino + XFS_INODES_PER_CHUNK - 1) <
1232 rec.ir_startino - pagino;
1234 useleft = !doneleft;
1237 /* free inodes to the left? */
1238 if (useleft && trec.ir_freecount) {
1239 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1242 pag->pagl_leftrec = trec.ir_startino;
1243 pag->pagl_rightrec = rec.ir_startino;
1244 pag->pagl_pagino = pagino;
1249 /* free inodes to the right? */
1250 if (!useleft && rec.ir_freecount) {
1251 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1253 pag->pagl_leftrec = trec.ir_startino;
1254 pag->pagl_rightrec = rec.ir_startino;
1255 pag->pagl_pagino = pagino;
1259 /* get next record to check */
1261 error = xfs_ialloc_next_rec(tcur, &trec,
1264 error = xfs_ialloc_next_rec(cur, &rec,
1271 if (searchdistance <= 0) {
1273 * Not in range - save last search
1274 * location and allocate a new inode
1276 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1277 pag->pagl_leftrec = trec.ir_startino;
1278 pag->pagl_rightrec = rec.ir_startino;
1279 pag->pagl_pagino = pagino;
1283 * We've reached the end of the btree. because
1284 * we are only searching a small chunk of the
1285 * btree each search, there is obviously free
1286 * inodes closer to the parent inode than we
1287 * are now. restart the search again.
1289 pag->pagl_pagino = NULLAGINO;
1290 pag->pagl_leftrec = NULLAGINO;
1291 pag->pagl_rightrec = NULLAGINO;
1292 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
1293 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1299 * In a different AG from the parent.
1300 * See if the most recently allocated block has any free.
1302 if (agi->agi_newino != cpu_to_be32(NULLAGINO)) {
1303 error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
1309 error = xfs_inobt_get_rec(cur, &rec, &j);
1313 if (j == 1 && rec.ir_freecount > 0) {
1315 * The last chunk allocated in the group
1316 * still has a free inode.
1324 * None left in the last group, search the whole AG
1326 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
1329 if (XFS_IS_CORRUPT(mp, i != 1)) {
1330 error = -EFSCORRUPTED;
1335 error = xfs_inobt_get_rec(cur, &rec, &i);
1338 if (XFS_IS_CORRUPT(mp, i != 1)) {
1339 error = -EFSCORRUPTED;
1342 if (rec.ir_freecount > 0)
1344 error = xfs_btree_increment(cur, 0, &i);
1347 if (XFS_IS_CORRUPT(mp, i != 1)) {
1348 error = -EFSCORRUPTED;
1354 offset = xfs_inobt_first_free_inode(&rec);
1355 ASSERT(offset >= 0);
1356 ASSERT(offset < XFS_INODES_PER_CHUNK);
1357 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
1358 XFS_INODES_PER_CHUNK) == 0);
1359 ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset);
1360 rec.ir_free &= ~XFS_INOBT_MASK(offset);
1362 error = xfs_inobt_update(cur, &rec);
1365 be32_add_cpu(&agi->agi_freecount, -1);
1366 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
1367 pag->pagi_freecount--;
1369 error = xfs_check_agi_freecount(cur, agi);
1373 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1374 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
1378 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
1380 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1385 * Use the free inode btree to allocate an inode based on distance from the
1386 * parent. Note that the provided cursor may be deleted and replaced.
1389 xfs_dialloc_ag_finobt_near(
1391 struct xfs_btree_cur **ocur,
1392 struct xfs_inobt_rec_incore *rec)
1394 struct xfs_btree_cur *lcur = *ocur; /* left search cursor */
1395 struct xfs_btree_cur *rcur; /* right search cursor */
1396 struct xfs_inobt_rec_incore rrec;
1400 error = xfs_inobt_lookup(lcur, pagino, XFS_LOOKUP_LE, &i);
1405 error = xfs_inobt_get_rec(lcur, rec, &i);
1408 if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1))
1409 return -EFSCORRUPTED;
1412 * See if we've landed in the parent inode record. The finobt
1413 * only tracks chunks with at least one free inode, so record
1414 * existence is enough.
1416 if (pagino >= rec->ir_startino &&
1417 pagino < (rec->ir_startino + XFS_INODES_PER_CHUNK))
1421 error = xfs_btree_dup_cursor(lcur, &rcur);
1425 error = xfs_inobt_lookup(rcur, pagino, XFS_LOOKUP_GE, &j);
1429 error = xfs_inobt_get_rec(rcur, &rrec, &j);
1432 if (XFS_IS_CORRUPT(lcur->bc_mp, j != 1)) {
1433 error = -EFSCORRUPTED;
1438 if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1 && j != 1)) {
1439 error = -EFSCORRUPTED;
1442 if (i == 1 && j == 1) {
1444 * Both the left and right records are valid. Choose the closer
1445 * inode chunk to the target.
1447 if ((pagino - rec->ir_startino + XFS_INODES_PER_CHUNK - 1) >
1448 (rrec.ir_startino - pagino)) {
1450 xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR);
1453 xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR);
1455 } else if (j == 1) {
1456 /* only the right record is valid */
1458 xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR);
1460 } else if (i == 1) {
1461 /* only the left record is valid */
1462 xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR);
1468 xfs_btree_del_cursor(rcur, XFS_BTREE_ERROR);
1473 * Use the free inode btree to find a free inode based on a newino hint. If
1474 * the hint is NULL, find the first free inode in the AG.
1477 xfs_dialloc_ag_finobt_newino(
1478 struct xfs_agi *agi,
1479 struct xfs_btree_cur *cur,
1480 struct xfs_inobt_rec_incore *rec)
1485 if (agi->agi_newino != cpu_to_be32(NULLAGINO)) {
1486 error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
1491 error = xfs_inobt_get_rec(cur, rec, &i);
1494 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1495 return -EFSCORRUPTED;
1501 * Find the first inode available in the AG.
1503 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
1506 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1507 return -EFSCORRUPTED;
1509 error = xfs_inobt_get_rec(cur, rec, &i);
1512 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1513 return -EFSCORRUPTED;
1519 * Update the inobt based on a modification made to the finobt. Also ensure that
1520 * the records from both trees are equivalent post-modification.
1523 xfs_dialloc_ag_update_inobt(
1524 struct xfs_btree_cur *cur, /* inobt cursor */
1525 struct xfs_inobt_rec_incore *frec, /* finobt record */
1526 int offset) /* inode offset */
1528 struct xfs_inobt_rec_incore rec;
1532 error = xfs_inobt_lookup(cur, frec->ir_startino, XFS_LOOKUP_EQ, &i);
1535 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1536 return -EFSCORRUPTED;
1538 error = xfs_inobt_get_rec(cur, &rec, &i);
1541 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
1542 return -EFSCORRUPTED;
1543 ASSERT((XFS_AGINO_TO_OFFSET(cur->bc_mp, rec.ir_startino) %
1544 XFS_INODES_PER_CHUNK) == 0);
1546 rec.ir_free &= ~XFS_INOBT_MASK(offset);
1549 if (XFS_IS_CORRUPT(cur->bc_mp,
1550 rec.ir_free != frec->ir_free ||
1551 rec.ir_freecount != frec->ir_freecount))
1552 return -EFSCORRUPTED;
1554 return xfs_inobt_update(cur, &rec);
1558 * Allocate an inode using the free inode btree, if available. Otherwise, fall
1559 * back to the inobt search algorithm.
1561 * The caller selected an AG for us, and made sure that free inodes are
1566 struct xfs_trans *tp,
1567 struct xfs_buf *agbp,
1571 struct xfs_mount *mp = tp->t_mountp;
1572 struct xfs_agi *agi = agbp->b_addr;
1573 xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent);
1574 xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent);
1575 struct xfs_btree_cur *cur; /* finobt cursor */
1576 struct xfs_btree_cur *icur; /* inobt cursor */
1577 struct xfs_inobt_rec_incore rec;
1582 struct xfs_perag *pag = agbp->b_pag;
1584 if (!xfs_sb_version_hasfinobt(&mp->m_sb))
1585 return xfs_dialloc_ag_inobt(tp, agbp, pag, parent, inop);
1588 * If pagino is 0 (this is the root inode allocation) use newino.
1589 * This must work because we've just allocated some.
1592 pagino = be32_to_cpu(agi->agi_newino);
1594 cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_FINO);
1596 error = xfs_check_agi_freecount(cur, agi);
1601 * The search algorithm depends on whether we're in the same AG as the
1602 * parent. If so, find the closest available inode to the parent. If
1603 * not, consider the agi hint or find the first free inode in the AG.
1605 if (pag->pag_agno == pagno)
1606 error = xfs_dialloc_ag_finobt_near(pagino, &cur, &rec);
1608 error = xfs_dialloc_ag_finobt_newino(agi, cur, &rec);
1612 offset = xfs_inobt_first_free_inode(&rec);
1613 ASSERT(offset >= 0);
1614 ASSERT(offset < XFS_INODES_PER_CHUNK);
1615 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
1616 XFS_INODES_PER_CHUNK) == 0);
1617 ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset);
1620 * Modify or remove the finobt record.
1622 rec.ir_free &= ~XFS_INOBT_MASK(offset);
1624 if (rec.ir_freecount)
1625 error = xfs_inobt_update(cur, &rec);
1627 error = xfs_btree_delete(cur, &i);
1632 * The finobt has now been updated appropriately. We haven't updated the
1633 * agi and superblock yet, so we can create an inobt cursor and validate
1634 * the original freecount. If all is well, make the equivalent update to
1635 * the inobt using the finobt record and offset information.
1637 icur = xfs_inobt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_INO);
1639 error = xfs_check_agi_freecount(icur, agi);
1643 error = xfs_dialloc_ag_update_inobt(icur, &rec, offset);
1648 * Both trees have now been updated. We must update the perag and
1649 * superblock before we can check the freecount for each btree.
1651 be32_add_cpu(&agi->agi_freecount, -1);
1652 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
1653 pag->pagi_freecount--;
1655 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
1657 error = xfs_check_agi_freecount(icur, agi);
1660 error = xfs_check_agi_freecount(cur, agi);
1664 xfs_btree_del_cursor(icur, XFS_BTREE_NOERROR);
1665 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1670 xfs_btree_del_cursor(icur, XFS_BTREE_ERROR);
1672 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1678 struct xfs_trans **tpp,
1679 struct xfs_buf *agibp)
1681 struct xfs_trans *tp = *tpp;
1682 struct xfs_dquot_acct *dqinfo;
1686 * Hold to on to the agibp across the commit so no other allocation can
1687 * come in and take the free inodes we just allocated for our caller.
1689 xfs_trans_bhold(tp, agibp);
1692 * We want the quota changes to be associated with the next transaction,
1693 * NOT this one. So, detach the dqinfo from this and attach it to the
1696 dqinfo = tp->t_dqinfo;
1697 tp->t_dqinfo = NULL;
1699 error = xfs_trans_roll(&tp);
1701 /* Re-attach the quota info that we detached from prev trx. */
1702 tp->t_dqinfo = dqinfo;
1707 xfs_trans_bjoin(tp, agibp);
1712 * Select and prepare an AG for inode allocation.
1714 * Mode is used to tell whether the new inode is a directory and hence where to
1717 * This function will ensure that the selected AG has free inodes available to
1718 * allocate from. The selected AGI will be returned locked to the caller, and it
1719 * will allocate more free inodes if required. If no free inodes are found or
1720 * can be allocated, no AGI will be returned.
1723 xfs_dialloc_select_ag(
1724 struct xfs_trans **tpp,
1727 struct xfs_buf **IO_agbp)
1729 struct xfs_mount *mp = (*tpp)->t_mountp;
1730 struct xfs_buf *agbp;
1731 xfs_agnumber_t agno;
1733 bool noroom = false;
1734 xfs_agnumber_t start_agno;
1735 struct xfs_perag *pag;
1736 struct xfs_ino_geometry *igeo = M_IGEO(mp);
1737 bool okalloc = true;
1742 * We do not have an agbp, so select an initial allocation
1743 * group for inode allocation.
1745 start_agno = xfs_ialloc_ag_select(*tpp, parent, mode);
1746 if (start_agno == NULLAGNUMBER)
1750 * If we have already hit the ceiling of inode blocks then clear
1751 * okalloc so we scan all available agi structures for a free
1754 * Read rough value of mp->m_icount by percpu_counter_read_positive,
1755 * which will sacrifice the preciseness but improve the performance.
1757 if (igeo->maxicount &&
1758 percpu_counter_read_positive(&mp->m_icount) + igeo->ialloc_inos
1759 > igeo->maxicount) {
1765 * Loop until we find an allocation group that either has free inodes
1766 * or in which we can allocate some inodes. Iterate through the
1767 * allocation groups upward, wrapping at the end.
1771 pag = xfs_perag_get(mp, agno);
1772 if (!pag->pagi_inodeok) {
1773 xfs_ialloc_next_ag(mp);
1777 if (!pag->pagi_init) {
1778 error = xfs_ialloc_pagi_init(mp, *tpp, agno);
1784 * Do a first racy fast path check if this AG is usable.
1786 if (!pag->pagi_freecount && !okalloc)
1790 * Then read in the AGI buffer and recheck with the AGI buffer
1793 error = xfs_ialloc_read_agi(mp, *tpp, agno, &agbp);
1797 if (pag->pagi_freecount) {
1803 goto nextag_relse_buffer;
1805 error = xfs_ialloc_ag_alloc(*tpp, agbp, pag);
1807 xfs_trans_brelse(*tpp, agbp);
1809 if (error == -ENOSPC)
1816 * We successfully allocated space for an inode cluster
1817 * in this AG. Roll the transaction so that we can
1818 * allocate one of the new inodes.
1820 ASSERT(pag->pagi_freecount > 0);
1823 error = xfs_dialloc_roll(tpp, agbp);
1825 xfs_buf_relse(agbp);
1831 nextag_relse_buffer:
1832 xfs_trans_brelse(*tpp, agbp);
1835 if (++agno == mp->m_sb.sb_agcount)
1837 if (agno == start_agno)
1838 return noroom ? -ENOSPC : 0;
1849 * Free the blocks of an inode chunk. We must consider that the inode chunk
1850 * might be sparse and only free the regions that are allocated as part of the
1854 xfs_difree_inode_chunk(
1855 struct xfs_trans *tp,
1856 xfs_agnumber_t agno,
1857 struct xfs_inobt_rec_incore *rec)
1859 struct xfs_mount *mp = tp->t_mountp;
1860 xfs_agblock_t sagbno = XFS_AGINO_TO_AGBNO(mp,
1862 int startidx, endidx;
1864 xfs_agblock_t agbno;
1866 DECLARE_BITMAP(holemask, XFS_INOBT_HOLEMASK_BITS);
1868 if (!xfs_inobt_issparse(rec->ir_holemask)) {
1869 /* not sparse, calculate extent info directly */
1870 xfs_bmap_add_free(tp, XFS_AGB_TO_FSB(mp, agno, sagbno),
1871 M_IGEO(mp)->ialloc_blks,
1872 &XFS_RMAP_OINFO_INODES);
1876 /* holemask is only 16-bits (fits in an unsigned long) */
1877 ASSERT(sizeof(rec->ir_holemask) <= sizeof(holemask[0]));
1878 holemask[0] = rec->ir_holemask;
1881 * Find contiguous ranges of zeroes (i.e., allocated regions) in the
1882 * holemask and convert the start/end index of each range to an extent.
1883 * We start with the start and end index both pointing at the first 0 in
1886 startidx = endidx = find_first_zero_bit(holemask,
1887 XFS_INOBT_HOLEMASK_BITS);
1888 nextbit = startidx + 1;
1889 while (startidx < XFS_INOBT_HOLEMASK_BITS) {
1890 nextbit = find_next_zero_bit(holemask, XFS_INOBT_HOLEMASK_BITS,
1893 * If the next zero bit is contiguous, update the end index of
1894 * the current range and continue.
1896 if (nextbit != XFS_INOBT_HOLEMASK_BITS &&
1897 nextbit == endidx + 1) {
1903 * nextbit is not contiguous with the current end index. Convert
1904 * the current start/end to an extent and add it to the free
1907 agbno = sagbno + (startidx * XFS_INODES_PER_HOLEMASK_BIT) /
1908 mp->m_sb.sb_inopblock;
1909 contigblk = ((endidx - startidx + 1) *
1910 XFS_INODES_PER_HOLEMASK_BIT) /
1911 mp->m_sb.sb_inopblock;
1913 ASSERT(agbno % mp->m_sb.sb_spino_align == 0);
1914 ASSERT(contigblk % mp->m_sb.sb_spino_align == 0);
1915 xfs_bmap_add_free(tp, XFS_AGB_TO_FSB(mp, agno, agbno),
1916 contigblk, &XFS_RMAP_OINFO_INODES);
1918 /* reset range to current bit and carry on... */
1919 startidx = endidx = nextbit;
1928 struct xfs_mount *mp,
1929 struct xfs_trans *tp,
1930 struct xfs_buf *agbp,
1931 struct xfs_perag *pag,
1933 struct xfs_icluster *xic,
1934 struct xfs_inobt_rec_incore *orec)
1936 struct xfs_agi *agi = agbp->b_addr;
1937 struct xfs_btree_cur *cur;
1938 struct xfs_inobt_rec_incore rec;
1944 ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
1945 ASSERT(XFS_AGINO_TO_AGBNO(mp, agino) < be32_to_cpu(agi->agi_length));
1948 * Initialize the cursor.
1950 cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_INO);
1952 error = xfs_check_agi_freecount(cur, agi);
1957 * Look for the entry describing this inode.
1959 if ((error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i))) {
1960 xfs_warn(mp, "%s: xfs_inobt_lookup() returned error %d.",
1964 if (XFS_IS_CORRUPT(mp, i != 1)) {
1965 error = -EFSCORRUPTED;
1968 error = xfs_inobt_get_rec(cur, &rec, &i);
1970 xfs_warn(mp, "%s: xfs_inobt_get_rec() returned error %d.",
1974 if (XFS_IS_CORRUPT(mp, i != 1)) {
1975 error = -EFSCORRUPTED;
1979 * Get the offset in the inode chunk.
1981 off = agino - rec.ir_startino;
1982 ASSERT(off >= 0 && off < XFS_INODES_PER_CHUNK);
1983 ASSERT(!(rec.ir_free & XFS_INOBT_MASK(off)));
1985 * Mark the inode free & increment the count.
1987 rec.ir_free |= XFS_INOBT_MASK(off);
1991 * When an inode chunk is free, it becomes eligible for removal. Don't
1992 * remove the chunk if the block size is large enough for multiple inode
1993 * chunks (that might not be free).
1995 if (!(mp->m_flags & XFS_MOUNT_IKEEP) &&
1996 rec.ir_free == XFS_INOBT_ALL_FREE &&
1997 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) {
1998 struct xfs_perag *pag = agbp->b_pag;
2000 xic->deleted = true;
2001 xic->first_ino = XFS_AGINO_TO_INO(mp, pag->pag_agno,
2003 xic->alloc = xfs_inobt_irec_to_allocmask(&rec);
2006 * Remove the inode cluster from the AGI B+Tree, adjust the
2007 * AGI and Superblock inode counts, and mark the disk space
2008 * to be freed when the transaction is committed.
2010 ilen = rec.ir_freecount;
2011 be32_add_cpu(&agi->agi_count, -ilen);
2012 be32_add_cpu(&agi->agi_freecount, -(ilen - 1));
2013 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT);
2014 pag->pagi_freecount -= ilen - 1;
2015 pag->pagi_count -= ilen;
2016 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen);
2017 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1));
2019 if ((error = xfs_btree_delete(cur, &i))) {
2020 xfs_warn(mp, "%s: xfs_btree_delete returned error %d.",
2025 xfs_difree_inode_chunk(tp, pag->pag_agno, &rec);
2027 xic->deleted = false;
2029 error = xfs_inobt_update(cur, &rec);
2031 xfs_warn(mp, "%s: xfs_inobt_update returned error %d.",
2037 * Change the inode free counts and log the ag/sb changes.
2039 be32_add_cpu(&agi->agi_freecount, 1);
2040 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
2041 pag->pagi_freecount++;
2042 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1);
2045 error = xfs_check_agi_freecount(cur, agi);
2050 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
2054 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
2059 * Free an inode in the free inode btree.
2063 struct xfs_mount *mp,
2064 struct xfs_trans *tp,
2065 struct xfs_buf *agbp,
2066 struct xfs_perag *pag,
2068 struct xfs_inobt_rec_incore *ibtrec) /* inobt record */
2070 struct xfs_agi *agi = agbp->b_addr;
2071 struct xfs_btree_cur *cur;
2072 struct xfs_inobt_rec_incore rec;
2073 int offset = agino - ibtrec->ir_startino;
2077 cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_FINO);
2079 error = xfs_inobt_lookup(cur, ibtrec->ir_startino, XFS_LOOKUP_EQ, &i);
2084 * If the record does not exist in the finobt, we must have just
2085 * freed an inode in a previously fully allocated chunk. If not,
2086 * something is out of sync.
2088 if (XFS_IS_CORRUPT(mp, ibtrec->ir_freecount != 1)) {
2089 error = -EFSCORRUPTED;
2093 error = xfs_inobt_insert_rec(cur, ibtrec->ir_holemask,
2095 ibtrec->ir_freecount,
2096 ibtrec->ir_free, &i);
2105 * Read and update the existing record. We could just copy the ibtrec
2106 * across here, but that would defeat the purpose of having redundant
2107 * metadata. By making the modifications independently, we can catch
2108 * corruptions that we wouldn't see if we just copied from one record
2111 error = xfs_inobt_get_rec(cur, &rec, &i);
2114 if (XFS_IS_CORRUPT(mp, i != 1)) {
2115 error = -EFSCORRUPTED;
2119 rec.ir_free |= XFS_INOBT_MASK(offset);
2122 if (XFS_IS_CORRUPT(mp,
2123 rec.ir_free != ibtrec->ir_free ||
2124 rec.ir_freecount != ibtrec->ir_freecount)) {
2125 error = -EFSCORRUPTED;
2130 * The content of inobt records should always match between the inobt
2131 * and finobt. The lifecycle of records in the finobt is different from
2132 * the inobt in that the finobt only tracks records with at least one
2133 * free inode. Hence, if all of the inodes are free and we aren't
2134 * keeping inode chunks permanently on disk, remove the record.
2135 * Otherwise, update the record with the new information.
2137 * Note that we currently can't free chunks when the block size is large
2138 * enough for multiple chunks. Leave the finobt record to remain in sync
2141 if (rec.ir_free == XFS_INOBT_ALL_FREE &&
2142 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK &&
2143 !(mp->m_flags & XFS_MOUNT_IKEEP)) {
2144 error = xfs_btree_delete(cur, &i);
2149 error = xfs_inobt_update(cur, &rec);
2155 error = xfs_check_agi_freecount(cur, agi);
2159 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
2163 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
2168 * Free disk inode. Carefully avoids touching the incore inode, all
2169 * manipulations incore are the caller's responsibility.
2170 * The on-disk inode is not changed by this operation, only the
2171 * btree (free inode mask) is changed.
2175 struct xfs_trans *tp, /* transaction pointer */
2176 xfs_ino_t inode, /* inode to be freed */
2177 struct xfs_icluster *xic) /* cluster info if deleted */
2180 xfs_agblock_t agbno; /* block number containing inode */
2181 struct xfs_buf *agbp; /* buffer for allocation group header */
2182 xfs_agino_t agino; /* allocation group inode number */
2183 xfs_agnumber_t agno; /* allocation group number */
2184 int error; /* error return value */
2185 struct xfs_mount *mp = tp->t_mountp;
2186 struct xfs_inobt_rec_incore rec;/* btree record */
2187 struct xfs_perag *pag;
2190 * Break up inode number into its components.
2192 agno = XFS_INO_TO_AGNO(mp, inode);
2193 if (agno >= mp->m_sb.sb_agcount) {
2194 xfs_warn(mp, "%s: agno >= mp->m_sb.sb_agcount (%d >= %d).",
2195 __func__, agno, mp->m_sb.sb_agcount);
2199 agino = XFS_INO_TO_AGINO(mp, inode);
2200 if (inode != XFS_AGINO_TO_INO(mp, agno, agino)) {
2201 xfs_warn(mp, "%s: inode != XFS_AGINO_TO_INO() (%llu != %llu).",
2202 __func__, (unsigned long long)inode,
2203 (unsigned long long)XFS_AGINO_TO_INO(mp, agno, agino));
2207 agbno = XFS_AGINO_TO_AGBNO(mp, agino);
2208 if (agbno >= mp->m_sb.sb_agblocks) {
2209 xfs_warn(mp, "%s: agbno >= mp->m_sb.sb_agblocks (%d >= %d).",
2210 __func__, agbno, mp->m_sb.sb_agblocks);
2215 * Get the allocation group header.
2217 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
2219 xfs_warn(mp, "%s: xfs_ialloc_read_agi() returned error %d.",
2225 * Fix up the inode allocation btree.
2228 error = xfs_difree_inobt(mp, tp, agbp, pag, agino, xic, &rec);
2233 * Fix up the free inode btree.
2235 if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
2236 error = xfs_difree_finobt(mp, tp, agbp, pag, agino, &rec);
2249 struct xfs_mount *mp,
2250 struct xfs_trans *tp,
2251 struct xfs_perag *pag,
2253 xfs_agblock_t agbno,
2254 xfs_agblock_t *chunk_agbno,
2255 xfs_agblock_t *offset_agbno,
2258 struct xfs_inobt_rec_incore rec;
2259 struct xfs_btree_cur *cur;
2260 struct xfs_buf *agbp;
2264 error = xfs_ialloc_read_agi(mp, tp, pag->pag_agno, &agbp);
2267 "%s: xfs_ialloc_read_agi() returned error %d, agno %d",
2268 __func__, error, pag->pag_agno);
2273 * Lookup the inode record for the given agino. If the record cannot be
2274 * found, then it's an invalid inode number and we should abort. Once
2275 * we have a record, we need to ensure it contains the inode number
2276 * we are looking up.
2278 cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_INO);
2279 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i);
2282 error = xfs_inobt_get_rec(cur, &rec, &i);
2283 if (!error && i == 0)
2287 xfs_trans_brelse(tp, agbp);
2288 xfs_btree_del_cursor(cur, error);
2292 /* check that the returned record contains the required inode */
2293 if (rec.ir_startino > agino ||
2294 rec.ir_startino + M_IGEO(mp)->ialloc_inos <= agino)
2297 /* for untrusted inodes check it is allocated first */
2298 if ((flags & XFS_IGET_UNTRUSTED) &&
2299 (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino)))
2302 *chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino);
2303 *offset_agbno = agbno - *chunk_agbno;
2308 * Return the location of the inode in imap, for mapping it into a buffer.
2312 struct xfs_mount *mp, /* file system mount structure */
2313 struct xfs_trans *tp, /* transaction pointer */
2314 xfs_ino_t ino, /* inode to locate */
2315 struct xfs_imap *imap, /* location map structure */
2316 uint flags) /* flags for inode btree lookup */
2318 xfs_agblock_t agbno; /* block number of inode in the alloc group */
2319 xfs_agino_t agino; /* inode number within alloc group */
2320 xfs_agblock_t chunk_agbno; /* first block in inode chunk */
2321 xfs_agblock_t cluster_agbno; /* first block in inode cluster */
2322 int error; /* error code */
2323 int offset; /* index of inode in its buffer */
2324 xfs_agblock_t offset_agbno; /* blks from chunk start to inode */
2325 struct xfs_perag *pag;
2327 ASSERT(ino != NULLFSINO);
2330 * Split up the inode number into its parts.
2332 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
2333 agino = XFS_INO_TO_AGINO(mp, ino);
2334 agbno = XFS_AGINO_TO_AGBNO(mp, agino);
2335 if (!pag || agbno >= mp->m_sb.sb_agblocks ||
2336 ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) {
2340 * Don't output diagnostic information for untrusted inodes
2341 * as they can be invalid without implying corruption.
2343 if (flags & XFS_IGET_UNTRUSTED)
2347 "%s: agno (%d) >= mp->m_sb.sb_agcount (%d)",
2348 __func__, XFS_INO_TO_AGNO(mp, ino),
2349 mp->m_sb.sb_agcount);
2351 if (agbno >= mp->m_sb.sb_agblocks) {
2353 "%s: agbno (0x%llx) >= mp->m_sb.sb_agblocks (0x%lx)",
2354 __func__, (unsigned long long)agbno,
2355 (unsigned long)mp->m_sb.sb_agblocks);
2357 if (pag && ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) {
2359 "%s: ino (0x%llx) != XFS_AGINO_TO_INO() (0x%llx)",
2361 XFS_AGINO_TO_INO(mp, pag->pag_agno, agino));
2369 * For bulkstat and handle lookups, we have an untrusted inode number
2370 * that we have to verify is valid. We cannot do this just by reading
2371 * the inode buffer as it may have been unlinked and removed leaving
2372 * inodes in stale state on disk. Hence we have to do a btree lookup
2373 * in all cases where an untrusted inode number is passed.
2375 if (flags & XFS_IGET_UNTRUSTED) {
2376 error = xfs_imap_lookup(mp, tp, pag, agino, agbno,
2377 &chunk_agbno, &offset_agbno, flags);
2384 * If the inode cluster size is the same as the blocksize or
2385 * smaller we get to the buffer by simple arithmetics.
2387 if (M_IGEO(mp)->blocks_per_cluster == 1) {
2388 offset = XFS_INO_TO_OFFSET(mp, ino);
2389 ASSERT(offset < mp->m_sb.sb_inopblock);
2391 imap->im_blkno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, agbno);
2392 imap->im_len = XFS_FSB_TO_BB(mp, 1);
2393 imap->im_boffset = (unsigned short)(offset <<
2394 mp->m_sb.sb_inodelog);
2400 * If the inode chunks are aligned then use simple maths to
2401 * find the location. Otherwise we have to do a btree
2402 * lookup to find the location.
2404 if (M_IGEO(mp)->inoalign_mask) {
2405 offset_agbno = agbno & M_IGEO(mp)->inoalign_mask;
2406 chunk_agbno = agbno - offset_agbno;
2408 error = xfs_imap_lookup(mp, tp, pag, agino, agbno,
2409 &chunk_agbno, &offset_agbno, flags);
2415 ASSERT(agbno >= chunk_agbno);
2416 cluster_agbno = chunk_agbno +
2417 ((offset_agbno / M_IGEO(mp)->blocks_per_cluster) *
2418 M_IGEO(mp)->blocks_per_cluster);
2419 offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) +
2420 XFS_INO_TO_OFFSET(mp, ino);
2422 imap->im_blkno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, cluster_agbno);
2423 imap->im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster);
2424 imap->im_boffset = (unsigned short)(offset << mp->m_sb.sb_inodelog);
2427 * If the inode number maps to a block outside the bounds
2428 * of the file system then return NULL rather than calling
2429 * read_buf and panicing when we get an error from the
2432 if ((imap->im_blkno + imap->im_len) >
2433 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
2435 "%s: (im_blkno (0x%llx) + im_len (0x%llx)) > sb_dblocks (0x%llx)",
2436 __func__, (unsigned long long) imap->im_blkno,
2437 (unsigned long long) imap->im_len,
2438 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
2449 * Log specified fields for the ag hdr (inode section). The growth of the agi
2450 * structure over time requires that we interpret the buffer as two logical
2451 * regions delineated by the end of the unlinked list. This is due to the size
2452 * of the hash table and its location in the middle of the agi.
2454 * For example, a request to log a field before agi_unlinked and a field after
2455 * agi_unlinked could cause us to log the entire hash table and use an excessive
2456 * amount of log space. To avoid this behavior, log the region up through
2457 * agi_unlinked in one call and the region after agi_unlinked through the end of
2458 * the structure in another.
2462 xfs_trans_t *tp, /* transaction pointer */
2463 struct xfs_buf *bp, /* allocation group header buffer */
2464 int fields) /* bitmask of fields to log */
2466 int first; /* first byte number */
2467 int last; /* last byte number */
2468 static const short offsets[] = { /* field starting offsets */
2469 /* keep in sync with bit definitions */
2470 offsetof(xfs_agi_t, agi_magicnum),
2471 offsetof(xfs_agi_t, agi_versionnum),
2472 offsetof(xfs_agi_t, agi_seqno),
2473 offsetof(xfs_agi_t, agi_length),
2474 offsetof(xfs_agi_t, agi_count),
2475 offsetof(xfs_agi_t, agi_root),
2476 offsetof(xfs_agi_t, agi_level),
2477 offsetof(xfs_agi_t, agi_freecount),
2478 offsetof(xfs_agi_t, agi_newino),
2479 offsetof(xfs_agi_t, agi_dirino),
2480 offsetof(xfs_agi_t, agi_unlinked),
2481 offsetof(xfs_agi_t, agi_free_root),
2482 offsetof(xfs_agi_t, agi_free_level),
2483 offsetof(xfs_agi_t, agi_iblocks),
2487 struct xfs_agi *agi = bp->b_addr;
2489 ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
2493 * Compute byte offsets for the first and last fields in the first
2494 * region and log the agi buffer. This only logs up through
2497 if (fields & XFS_AGI_ALL_BITS_R1) {
2498 xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R1,
2500 xfs_trans_log_buf(tp, bp, first, last);
2504 * Mask off the bits in the first region and calculate the first and
2505 * last field offsets for any bits in the second region.
2507 fields &= ~XFS_AGI_ALL_BITS_R1;
2509 xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R2,
2511 xfs_trans_log_buf(tp, bp, first, last);
2515 static xfs_failaddr_t
2519 struct xfs_mount *mp = bp->b_mount;
2520 struct xfs_agi *agi = bp->b_addr;
2523 if (xfs_sb_version_hascrc(&mp->m_sb)) {
2524 if (!uuid_equal(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid))
2525 return __this_address;
2526 if (!xfs_log_check_lsn(mp, be64_to_cpu(agi->agi_lsn)))
2527 return __this_address;
2531 * Validate the magic number of the agi block.
2533 if (!xfs_verify_magic(bp, agi->agi_magicnum))
2534 return __this_address;
2535 if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)))
2536 return __this_address;
2538 if (be32_to_cpu(agi->agi_level) < 1 ||
2539 be32_to_cpu(agi->agi_level) > M_IGEO(mp)->inobt_maxlevels)
2540 return __this_address;
2542 if (xfs_sb_version_hasfinobt(&mp->m_sb) &&
2543 (be32_to_cpu(agi->agi_free_level) < 1 ||
2544 be32_to_cpu(agi->agi_free_level) > M_IGEO(mp)->inobt_maxlevels))
2545 return __this_address;
2548 * during growfs operations, the perag is not fully initialised,
2549 * so we can't use it for any useful checking. growfs ensures we can't
2550 * use it by using uncached buffers that don't have the perag attached
2551 * so we can detect and avoid this problem.
2553 if (bp->b_pag && be32_to_cpu(agi->agi_seqno) != bp->b_pag->pag_agno)
2554 return __this_address;
2556 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
2557 if (agi->agi_unlinked[i] == cpu_to_be32(NULLAGINO))
2559 if (!xfs_verify_ino(mp, be32_to_cpu(agi->agi_unlinked[i])))
2560 return __this_address;
2567 xfs_agi_read_verify(
2570 struct xfs_mount *mp = bp->b_mount;
2573 if (xfs_sb_version_hascrc(&mp->m_sb) &&
2574 !xfs_buf_verify_cksum(bp, XFS_AGI_CRC_OFF))
2575 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
2577 fa = xfs_agi_verify(bp);
2578 if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_IALLOC_READ_AGI))
2579 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2584 xfs_agi_write_verify(
2587 struct xfs_mount *mp = bp->b_mount;
2588 struct xfs_buf_log_item *bip = bp->b_log_item;
2589 struct xfs_agi *agi = bp->b_addr;
2592 fa = xfs_agi_verify(bp);
2594 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2598 if (!xfs_sb_version_hascrc(&mp->m_sb))
2602 agi->agi_lsn = cpu_to_be64(bip->bli_item.li_lsn);
2603 xfs_buf_update_cksum(bp, XFS_AGI_CRC_OFF);
2606 const struct xfs_buf_ops xfs_agi_buf_ops = {
2608 .magic = { cpu_to_be32(XFS_AGI_MAGIC), cpu_to_be32(XFS_AGI_MAGIC) },
2609 .verify_read = xfs_agi_read_verify,
2610 .verify_write = xfs_agi_write_verify,
2611 .verify_struct = xfs_agi_verify,
2615 * Read in the allocation group header (inode allocation section)
2619 struct xfs_mount *mp, /* file system mount structure */
2620 struct xfs_trans *tp, /* transaction pointer */
2621 xfs_agnumber_t agno, /* allocation group number */
2622 struct xfs_buf **bpp) /* allocation group hdr buf */
2626 trace_xfs_read_agi(mp, agno);
2628 ASSERT(agno != NULLAGNUMBER);
2629 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
2630 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
2631 XFS_FSS_TO_BB(mp, 1), 0, bpp, &xfs_agi_buf_ops);
2635 xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_AGI_BUF);
2637 xfs_buf_set_ref(*bpp, XFS_AGI_REF);
2642 xfs_ialloc_read_agi(
2643 struct xfs_mount *mp, /* file system mount structure */
2644 struct xfs_trans *tp, /* transaction pointer */
2645 xfs_agnumber_t agno, /* allocation group number */
2646 struct xfs_buf **bpp) /* allocation group hdr buf */
2648 struct xfs_agi *agi; /* allocation group header */
2649 struct xfs_perag *pag; /* per allocation group data */
2652 trace_xfs_ialloc_read_agi(mp, agno);
2654 error = xfs_read_agi(mp, tp, agno, bpp);
2658 agi = (*bpp)->b_addr;
2659 pag = (*bpp)->b_pag;
2660 if (!pag->pagi_init) {
2661 pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
2662 pag->pagi_count = be32_to_cpu(agi->agi_count);
2667 * It's possible for these to be out of sync if
2668 * we are in the middle of a forced shutdown.
2670 ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) ||
2671 XFS_FORCED_SHUTDOWN(mp));
2676 * Read in the agi to initialise the per-ag data in the mount structure
2679 xfs_ialloc_pagi_init(
2680 xfs_mount_t *mp, /* file system mount structure */
2681 xfs_trans_t *tp, /* transaction pointer */
2682 xfs_agnumber_t agno) /* allocation group number */
2684 struct xfs_buf *bp = NULL;
2687 error = xfs_ialloc_read_agi(mp, tp, agno, &bp);
2691 xfs_trans_brelse(tp, bp);
2695 /* Is there an inode record covering a given range of inode numbers? */
2697 xfs_ialloc_has_inode_record(
2698 struct xfs_btree_cur *cur,
2703 struct xfs_inobt_rec_incore irec;
2711 error = xfs_inobt_lookup(cur, low, XFS_LOOKUP_LE, &has_record);
2712 while (error == 0 && has_record) {
2713 error = xfs_inobt_get_rec(cur, &irec, &has_record);
2714 if (error || irec.ir_startino > high)
2717 agino = irec.ir_startino;
2718 holemask = irec.ir_holemask;
2719 for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; holemask >>= 1,
2720 i++, agino += XFS_INODES_PER_HOLEMASK_BIT) {
2723 if (agino + XFS_INODES_PER_HOLEMASK_BIT > low &&
2730 error = xfs_btree_increment(cur, 0, &has_record);
2735 /* Is there an inode record covering a given extent? */
2737 xfs_ialloc_has_inodes_at_extent(
2738 struct xfs_btree_cur *cur,
2746 low = XFS_AGB_TO_AGINO(cur->bc_mp, bno);
2747 high = XFS_AGB_TO_AGINO(cur->bc_mp, bno + len) - 1;
2749 return xfs_ialloc_has_inode_record(cur, low, high, exists);
2752 struct xfs_ialloc_count_inodes {
2754 xfs_agino_t freecount;
2757 /* Record inode counts across all inobt records. */
2759 xfs_ialloc_count_inodes_rec(
2760 struct xfs_btree_cur *cur,
2761 union xfs_btree_rec *rec,
2764 struct xfs_inobt_rec_incore irec;
2765 struct xfs_ialloc_count_inodes *ci = priv;
2767 xfs_inobt_btrec_to_irec(cur->bc_mp, rec, &irec);
2768 ci->count += irec.ir_count;
2769 ci->freecount += irec.ir_freecount;
2774 /* Count allocated and free inodes under an inobt. */
2776 xfs_ialloc_count_inodes(
2777 struct xfs_btree_cur *cur,
2779 xfs_agino_t *freecount)
2781 struct xfs_ialloc_count_inodes ci = {0};
2784 ASSERT(cur->bc_btnum == XFS_BTNUM_INO);
2785 error = xfs_btree_query_all(cur, xfs_ialloc_count_inodes_rec, &ci);
2790 *freecount = ci.freecount;
2795 * Initialize inode-related geometry information.
2797 * Compute the inode btree min and max levels and set maxicount.
2799 * Set the inode cluster size. This may still be overridden by the file
2800 * system block size if it is larger than the chosen cluster size.
2802 * For v5 filesystems, scale the cluster size with the inode size to keep a
2803 * constant ratio of inode per cluster buffer, but only if mkfs has set the
2804 * inode alignment value appropriately for larger cluster sizes.
2806 * Then compute the inode cluster alignment information.
2809 xfs_ialloc_setup_geometry(
2810 struct xfs_mount *mp)
2812 struct xfs_sb *sbp = &mp->m_sb;
2813 struct xfs_ino_geometry *igeo = M_IGEO(mp);
2817 igeo->new_diflags2 = 0;
2818 if (xfs_sb_version_hasbigtime(&mp->m_sb))
2819 igeo->new_diflags2 |= XFS_DIFLAG2_BIGTIME;
2821 /* Compute inode btree geometry. */
2822 igeo->agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
2823 igeo->inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1);
2824 igeo->inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0);
2825 igeo->inobt_mnr[0] = igeo->inobt_mxr[0] / 2;
2826 igeo->inobt_mnr[1] = igeo->inobt_mxr[1] / 2;
2828 igeo->ialloc_inos = max_t(uint16_t, XFS_INODES_PER_CHUNK,
2830 igeo->ialloc_blks = igeo->ialloc_inos >> sbp->sb_inopblog;
2832 if (sbp->sb_spino_align)
2833 igeo->ialloc_min_blks = sbp->sb_spino_align;
2835 igeo->ialloc_min_blks = igeo->ialloc_blks;
2837 /* Compute and fill in value of m_ino_geo.inobt_maxlevels. */
2838 inodes = (1LL << XFS_INO_AGINO_BITS(mp)) >> XFS_INODES_PER_CHUNK_LOG;
2839 igeo->inobt_maxlevels = xfs_btree_compute_maxlevels(igeo->inobt_mnr,
2843 * Set the maximum inode count for this filesystem, being careful not
2844 * to use obviously garbage sb_inopblog/sb_inopblock values. Regular
2845 * users should never get here due to failing sb verification, but
2846 * certain users (xfs_db) need to be usable even with corrupt metadata.
2848 if (sbp->sb_imax_pct && igeo->ialloc_blks) {
2850 * Make sure the maximum inode count is a multiple
2851 * of the units we allocate inodes in.
2853 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
2854 do_div(icount, 100);
2855 do_div(icount, igeo->ialloc_blks);
2856 igeo->maxicount = XFS_FSB_TO_INO(mp,
2857 icount * igeo->ialloc_blks);
2859 igeo->maxicount = 0;
2863 * Compute the desired size of an inode cluster buffer size, which
2864 * starts at 8K and (on v5 filesystems) scales up with larger inode
2867 * Preserve the desired inode cluster size because the sparse inodes
2868 * feature uses that desired size (not the actual size) to compute the
2869 * sparse inode alignment. The mount code validates this value, so we
2870 * cannot change the behavior.
2872 igeo->inode_cluster_size_raw = XFS_INODE_BIG_CLUSTER_SIZE;
2873 if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
2874 int new_size = igeo->inode_cluster_size_raw;
2876 new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE;
2877 if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size))
2878 igeo->inode_cluster_size_raw = new_size;
2881 /* Calculate inode cluster ratios. */
2882 if (igeo->inode_cluster_size_raw > mp->m_sb.sb_blocksize)
2883 igeo->blocks_per_cluster = XFS_B_TO_FSBT(mp,
2884 igeo->inode_cluster_size_raw);
2886 igeo->blocks_per_cluster = 1;
2887 igeo->inode_cluster_size = XFS_FSB_TO_B(mp, igeo->blocks_per_cluster);
2888 igeo->inodes_per_cluster = XFS_FSB_TO_INO(mp, igeo->blocks_per_cluster);
2890 /* Calculate inode cluster alignment. */
2891 if (xfs_sb_version_hasalign(&mp->m_sb) &&
2892 mp->m_sb.sb_inoalignmt >= igeo->blocks_per_cluster)
2893 igeo->cluster_align = mp->m_sb.sb_inoalignmt;
2895 igeo->cluster_align = 1;
2896 igeo->inoalign_mask = igeo->cluster_align - 1;
2897 igeo->cluster_align_inodes = XFS_FSB_TO_INO(mp, igeo->cluster_align);
2900 * If we are using stripe alignment, check whether
2901 * the stripe unit is a multiple of the inode alignment
2903 if (mp->m_dalign && igeo->inoalign_mask &&
2904 !(mp->m_dalign & igeo->inoalign_mask))
2905 igeo->ialloc_align = mp->m_dalign;
2907 igeo->ialloc_align = 0;
2910 /* Compute the location of the root directory inode that is laid out by mkfs. */
2912 xfs_ialloc_calc_rootino(
2913 struct xfs_mount *mp,
2916 struct xfs_ino_geometry *igeo = M_IGEO(mp);
2917 xfs_agblock_t first_bno;
2920 * Pre-calculate the geometry of AG 0. We know what it looks like
2921 * because libxfs knows how to create allocation groups now.
2923 * first_bno is the first block in which mkfs could possibly have
2924 * allocated the root directory inode, once we factor in the metadata
2925 * that mkfs formats before it. Namely, the four AG headers...
2927 first_bno = howmany(4 * mp->m_sb.sb_sectsize, mp->m_sb.sb_blocksize);
2929 /* ...the two free space btree roots... */
2932 /* ...the inode btree root... */
2935 /* ...the initial AGFL... */
2936 first_bno += xfs_alloc_min_freelist(mp, NULL);
2938 /* ...the free inode btree root... */
2939 if (xfs_sb_version_hasfinobt(&mp->m_sb))
2942 /* ...the reverse mapping btree root... */
2943 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
2946 /* ...the reference count btree... */
2947 if (xfs_sb_version_hasreflink(&mp->m_sb))
2951 * ...and the log, if it is allocated in the first allocation group.
2953 * This can happen with filesystems that only have a single
2954 * allocation group, or very odd geometries created by old mkfs
2955 * versions on very small filesystems.
2957 if (mp->m_sb.sb_logstart &&
2958 XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == 0)
2959 first_bno += mp->m_sb.sb_logblocks;
2962 * Now round first_bno up to whatever allocation alignment is given
2963 * by the filesystem or was passed in.
2965 if (xfs_sb_version_hasdalign(&mp->m_sb) && igeo->ialloc_align > 0)
2966 first_bno = roundup(first_bno, sunit);
2967 else if (xfs_sb_version_hasalign(&mp->m_sb) &&
2968 mp->m_sb.sb_inoalignmt > 1)
2969 first_bno = roundup(first_bno, mp->m_sb.sb_inoalignmt);
2971 return XFS_AGINO_TO_INO(mp, 0, XFS_AGB_TO_AGINO(mp, first_bno));