1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
17 #include "xfs_inode.h"
18 #include "xfs_btree.h"
19 #include "xfs_trans.h"
20 #include "xfs_alloc.h"
22 #include "xfs_bmap_util.h"
23 #include "xfs_bmap_btree.h"
24 #include "xfs_rtbitmap.h"
25 #include "xfs_errortag.h"
26 #include "xfs_error.h"
27 #include "xfs_quota.h"
28 #include "xfs_trans_space.h"
29 #include "xfs_buf_item.h"
30 #include "xfs_trace.h"
31 #include "xfs_attr_leaf.h"
32 #include "xfs_filestream.h"
35 #include "xfs_ag_resv.h"
36 #include "xfs_refcount.h"
37 #include "xfs_icache.h"
38 #include "xfs_iomap.h"
40 struct kmem_cache *xfs_bmap_intent_cache;
43 * Miscellaneous helper functions
47 * Compute and fill in the value of the maximum depth of a bmap btree
48 * in this filesystem. Done once, during mount.
51 xfs_bmap_compute_maxlevels(
52 xfs_mount_t *mp, /* file system mount structure */
53 int whichfork) /* data or attr fork */
55 uint64_t maxblocks; /* max blocks at this level */
56 xfs_extnum_t maxleafents; /* max leaf entries possible */
57 int level; /* btree level */
58 int maxrootrecs; /* max records in root block */
59 int minleafrecs; /* min records in leaf block */
60 int minnoderecs; /* min records in node block */
61 int sz; /* root block size */
64 * The maximum number of extents in a fork, hence the maximum number of
65 * leaf entries, is controlled by the size of the on-disk extent count.
67 * Note that we can no longer assume that if we are in ATTR1 that the
68 * fork offset of all the inodes will be
69 * (xfs_default_attroffset(ip) >> 3) because we could have mounted with
70 * ATTR2 and then mounted back with ATTR1, keeping the i_forkoff's fixed
71 * but probably at various positions. Therefore, for both ATTR1 and
72 * ATTR2 we have to assume the worst case scenario of a minimum size
75 maxleafents = xfs_iext_max_nextents(xfs_has_large_extent_counts(mp),
77 if (whichfork == XFS_DATA_FORK)
78 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
80 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
82 maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
83 minleafrecs = mp->m_bmap_dmnr[0];
84 minnoderecs = mp->m_bmap_dmnr[1];
85 maxblocks = howmany_64(maxleafents, minleafrecs);
86 for (level = 1; maxblocks > 1; level++) {
87 if (maxblocks <= maxrootrecs)
90 maxblocks = howmany_64(maxblocks, minnoderecs);
92 mp->m_bm_maxlevels[whichfork] = level;
93 ASSERT(mp->m_bm_maxlevels[whichfork] <= xfs_bmbt_maxlevels_ondisk());
97 xfs_bmap_compute_attr_offset(
100 if (mp->m_sb.sb_inodesize == 256)
101 return XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
102 return XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
105 STATIC int /* error */
107 struct xfs_btree_cur *cur,
108 struct xfs_bmbt_irec *irec,
109 int *stat) /* success/failure */
111 cur->bc_rec.b = *irec;
112 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
115 STATIC int /* error */
116 xfs_bmbt_lookup_first(
117 struct xfs_btree_cur *cur,
118 int *stat) /* success/failure */
120 cur->bc_rec.b.br_startoff = 0;
121 cur->bc_rec.b.br_startblock = 0;
122 cur->bc_rec.b.br_blockcount = 0;
123 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
127 * Check if the inode needs to be converted to btree format.
129 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
131 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
133 return whichfork != XFS_COW_FORK &&
134 ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
135 ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork);
139 * Check if the inode should be converted to extent format.
141 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
143 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
145 return whichfork != XFS_COW_FORK &&
146 ifp->if_format == XFS_DINODE_FMT_BTREE &&
147 ifp->if_nextents <= XFS_IFORK_MAXEXT(ip, whichfork);
151 * Update the record referred to by cur to the value given by irec
152 * This either works (return 0) or gets an EFSCORRUPTED error.
156 struct xfs_btree_cur *cur,
157 struct xfs_bmbt_irec *irec)
159 union xfs_btree_rec rec;
161 xfs_bmbt_disk_set_all(&rec.bmbt, irec);
162 return xfs_btree_update(cur, &rec);
166 * Compute the worst-case number of indirect blocks that will be used
167 * for ip's delayed extent of length "len".
170 xfs_bmap_worst_indlen(
171 xfs_inode_t *ip, /* incore inode pointer */
172 xfs_filblks_t len) /* delayed extent length */
174 int level; /* btree level number */
175 int maxrecs; /* maximum record count at this level */
176 xfs_mount_t *mp; /* mount structure */
177 xfs_filblks_t rval; /* return value */
180 maxrecs = mp->m_bmap_dmxr[0];
181 for (level = 0, rval = 0;
182 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
185 do_div(len, maxrecs);
188 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
191 maxrecs = mp->m_bmap_dmxr[1];
197 * Calculate the default attribute fork offset for newly created inodes.
200 xfs_default_attroffset(
201 struct xfs_inode *ip)
203 if (ip->i_df.if_format == XFS_DINODE_FMT_DEV)
204 return roundup(sizeof(xfs_dev_t), 8);
205 return M_IGEO(ip->i_mount)->attr_fork_offset;
209 * Helper routine to reset inode i_forkoff field when switching attribute fork
210 * from local to extent format - we reset it where possible to make space
211 * available for inline data fork extents.
214 xfs_bmap_forkoff_reset(
218 if (whichfork == XFS_ATTR_FORK &&
219 ip->i_df.if_format != XFS_DINODE_FMT_DEV &&
220 ip->i_df.if_format != XFS_DINODE_FMT_BTREE) {
221 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
223 if (dfl_forkoff > ip->i_forkoff)
224 ip->i_forkoff = dfl_forkoff;
229 STATIC struct xfs_buf *
231 struct xfs_btree_cur *cur,
234 struct xfs_log_item *lip;
240 for (i = 0; i < cur->bc_maxlevels; i++) {
241 if (!cur->bc_levels[i].bp)
243 if (xfs_buf_daddr(cur->bc_levels[i].bp) == bno)
244 return cur->bc_levels[i].bp;
247 /* Chase down all the log items to see if the bp is there */
248 list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) {
249 struct xfs_buf_log_item *bip = (struct xfs_buf_log_item *)lip;
251 if (bip->bli_item.li_type == XFS_LI_BUF &&
252 xfs_buf_daddr(bip->bli_buf) == bno)
261 struct xfs_btree_block *block,
267 __be64 *pp, *thispa; /* pointer to block address */
268 xfs_bmbt_key_t *prevp, *keyp;
270 ASSERT(be16_to_cpu(block->bb_level) > 0);
273 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
274 dmxr = mp->m_bmap_dmxr[0];
275 keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
278 ASSERT(be64_to_cpu(prevp->br_startoff) <
279 be64_to_cpu(keyp->br_startoff));
284 * Compare the block numbers to see if there are dups.
287 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
289 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
291 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
293 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
295 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
296 if (*thispa == *pp) {
297 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %lld",
299 (unsigned long long)be64_to_cpu(*thispa));
300 xfs_err(mp, "%s: ptrs are equal in node\n",
302 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
309 * Check that the extents for the inode ip are in the right order in all
310 * btree leaves. THis becomes prohibitively expensive for large extent count
311 * files, so don't bother with inodes that have more than 10,000 extents in
312 * them. The btree record ordering checks will still be done, so for such large
313 * bmapbt constructs that is going to catch most corruptions.
316 xfs_bmap_check_leaf_extents(
317 struct xfs_btree_cur *cur, /* btree cursor or null */
318 xfs_inode_t *ip, /* incore inode pointer */
319 int whichfork) /* data or attr fork */
321 struct xfs_mount *mp = ip->i_mount;
322 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
323 struct xfs_btree_block *block; /* current btree block */
324 xfs_fsblock_t bno; /* block # of "block" */
325 struct xfs_buf *bp; /* buffer for "block" */
326 int error; /* error return value */
327 xfs_extnum_t i=0, j; /* index into the extents list */
328 int level; /* btree level, for checking */
329 __be64 *pp; /* pointer to block address */
330 xfs_bmbt_rec_t *ep; /* pointer to current extent */
331 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
332 xfs_bmbt_rec_t *nextp; /* pointer to next extent */
335 if (ifp->if_format != XFS_DINODE_FMT_BTREE)
338 /* skip large extent count inodes */
339 if (ip->i_df.if_nextents > 10000)
343 block = ifp->if_broot;
345 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
347 level = be16_to_cpu(block->bb_level);
349 xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
350 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
351 bno = be64_to_cpu(*pp);
353 ASSERT(bno != NULLFSBLOCK);
354 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
355 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
358 * Go down the tree until leaf level is reached, following the first
359 * pointer (leftmost) at each level.
361 while (level-- > 0) {
362 /* See if buf is in cur first */
364 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
367 error = xfs_btree_read_bufl(mp, NULL, bno, &bp,
373 block = XFS_BUF_TO_BLOCK(bp);
378 * Check this block for basic sanity (increasing keys and
379 * no duplicate blocks).
382 xfs_check_block(block, mp, 0, 0);
383 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
384 bno = be64_to_cpu(*pp);
385 if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, bno))) {
386 error = -EFSCORRUPTED;
391 xfs_trans_brelse(NULL, bp);
396 * Here with bp and block set to the leftmost leaf node in the tree.
401 * Loop over all leaf nodes checking that all extents are in the right order.
404 xfs_fsblock_t nextbno;
405 xfs_extnum_t num_recs;
408 num_recs = xfs_btree_get_numrecs(block);
411 * Read-ahead the next leaf block, if any.
414 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
417 * Check all the extents to make sure they are OK.
418 * If we had a previous block, the last entry should
419 * conform with the first entry in this one.
422 ep = XFS_BMBT_REC_ADDR(mp, block, 1);
424 ASSERT(xfs_bmbt_disk_get_startoff(&last) +
425 xfs_bmbt_disk_get_blockcount(&last) <=
426 xfs_bmbt_disk_get_startoff(ep));
428 for (j = 1; j < num_recs; j++) {
429 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
430 ASSERT(xfs_bmbt_disk_get_startoff(ep) +
431 xfs_bmbt_disk_get_blockcount(ep) <=
432 xfs_bmbt_disk_get_startoff(nextp));
440 xfs_trans_brelse(NULL, bp);
444 * If we've reached the end, stop.
446 if (bno == NULLFSBLOCK)
450 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
453 error = xfs_btree_read_bufl(mp, NULL, bno, &bp,
459 block = XFS_BUF_TO_BLOCK(bp);
465 xfs_warn(mp, "%s: at error0", __func__);
467 xfs_trans_brelse(NULL, bp);
469 xfs_warn(mp, "%s: BAD after btree leaves for %llu extents",
471 xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__);
472 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
477 * Validate that the bmbt_irecs being returned from bmapi are valid
478 * given the caller's original parameters. Specifically check the
479 * ranges of the returned irecs to ensure that they only extend beyond
480 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
483 xfs_bmap_validate_ret(
487 xfs_bmbt_irec_t *mval,
491 int i; /* index to map values */
493 ASSERT(ret_nmap <= nmap);
495 for (i = 0; i < ret_nmap; i++) {
496 ASSERT(mval[i].br_blockcount > 0);
497 if (!(flags & XFS_BMAPI_ENTIRE)) {
498 ASSERT(mval[i].br_startoff >= bno);
499 ASSERT(mval[i].br_blockcount <= len);
500 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
503 ASSERT(mval[i].br_startoff < bno + len);
504 ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
508 mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
509 mval[i].br_startoff);
510 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
511 mval[i].br_startblock != HOLESTARTBLOCK);
512 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
513 mval[i].br_state == XFS_EXT_UNWRITTEN);
518 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
519 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
523 * Inode fork format manipulation functions
527 * Convert the inode format to extent format if it currently is in btree format,
528 * but the extent list is small enough that it fits into the extent format.
530 * Since the extents are already in-core, all we have to do is give up the space
531 * for the btree root and pitch the leaf block.
533 STATIC int /* error */
534 xfs_bmap_btree_to_extents(
535 struct xfs_trans *tp, /* transaction pointer */
536 struct xfs_inode *ip, /* incore inode pointer */
537 struct xfs_btree_cur *cur, /* btree cursor */
538 int *logflagsp, /* inode logging flags */
539 int whichfork) /* data or attr fork */
541 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
542 struct xfs_mount *mp = ip->i_mount;
543 struct xfs_btree_block *rblock = ifp->if_broot;
544 struct xfs_btree_block *cblock;/* child btree block */
545 xfs_fsblock_t cbno; /* child block number */
546 struct xfs_buf *cbp; /* child block's buffer */
547 int error; /* error return value */
548 __be64 *pp; /* ptr to block address */
549 struct xfs_owner_info oinfo;
551 /* check if we actually need the extent format first: */
552 if (!xfs_bmap_wants_extents(ip, whichfork))
556 ASSERT(whichfork != XFS_COW_FORK);
557 ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
558 ASSERT(be16_to_cpu(rblock->bb_level) == 1);
559 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
560 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
562 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
563 cbno = be64_to_cpu(*pp);
565 if (XFS_IS_CORRUPT(cur->bc_mp, !xfs_btree_check_lptr(cur, cbno, 1)))
566 return -EFSCORRUPTED;
568 error = xfs_btree_read_bufl(mp, tp, cbno, &cbp, XFS_BMAP_BTREE_REF,
572 cblock = XFS_BUF_TO_BLOCK(cbp);
573 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
576 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
577 error = xfs_free_extent_later(cur->bc_tp, cbno, 1, &oinfo,
578 XFS_AG_RESV_NONE, false);
583 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
584 xfs_trans_binval(tp, cbp);
585 if (cur->bc_levels[0].bp == cbp)
586 cur->bc_levels[0].bp = NULL;
587 xfs_iroot_realloc(ip, -1, whichfork);
588 ASSERT(ifp->if_broot == NULL);
589 ifp->if_format = XFS_DINODE_FMT_EXTENTS;
590 *logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
595 * Convert an extents-format file into a btree-format file.
596 * The new file will have a root block (in the inode) and a single child block.
598 STATIC int /* error */
599 xfs_bmap_extents_to_btree(
600 struct xfs_trans *tp, /* transaction pointer */
601 struct xfs_inode *ip, /* incore inode pointer */
602 struct xfs_btree_cur **curp, /* cursor returned to caller */
603 int wasdel, /* converting a delayed alloc */
604 int *logflagsp, /* inode logging flags */
605 int whichfork) /* data or attr fork */
607 struct xfs_btree_block *ablock; /* allocated (child) bt block */
608 struct xfs_buf *abp; /* buffer for ablock */
609 struct xfs_alloc_arg args; /* allocation arguments */
610 struct xfs_bmbt_rec *arp; /* child record pointer */
611 struct xfs_btree_block *block; /* btree root block */
612 struct xfs_btree_cur *cur; /* bmap btree cursor */
613 int error; /* error return value */
614 struct xfs_ifork *ifp; /* inode fork pointer */
615 struct xfs_bmbt_key *kp; /* root block key pointer */
616 struct xfs_mount *mp; /* mount structure */
617 xfs_bmbt_ptr_t *pp; /* root block address pointer */
618 struct xfs_iext_cursor icur;
619 struct xfs_bmbt_irec rec;
620 xfs_extnum_t cnt = 0;
623 ASSERT(whichfork != XFS_COW_FORK);
624 ifp = xfs_ifork_ptr(ip, whichfork);
625 ASSERT(ifp->if_format == XFS_DINODE_FMT_EXTENTS);
628 * Make space in the inode incore. This needs to be undone if we fail
629 * to expand the root.
631 xfs_iroot_realloc(ip, 1, whichfork);
636 block = ifp->if_broot;
637 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
638 XFS_BTNUM_BMAP, 1, 1, ip->i_ino,
639 XFS_BTREE_LONG_PTRS);
641 * Need a cursor. Can't allocate until bb_level is filled in.
643 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
644 cur->bc_ino.flags = wasdel ? XFS_BTCUR_BMBT_WASDEL : 0;
646 * Convert to a btree with two levels, one record in root.
648 ifp->if_format = XFS_DINODE_FMT_BTREE;
649 memset(&args, 0, sizeof(args));
652 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
654 args.minlen = args.maxlen = args.prod = 1;
655 args.wasdel = wasdel;
657 error = xfs_alloc_vextent_start_ag(&args,
658 XFS_INO_TO_FSB(mp, ip->i_ino));
660 goto out_root_realloc;
663 * Allocation can't fail, the space was reserved.
665 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
667 goto out_root_realloc;
670 cur->bc_ino.allocated++;
672 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
673 error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
674 XFS_FSB_TO_DADDR(mp, args.fsbno),
675 mp->m_bsize, 0, &abp);
677 goto out_unreserve_dquot;
680 * Fill in the child block.
682 abp->b_ops = &xfs_bmbt_buf_ops;
683 ablock = XFS_BUF_TO_BLOCK(abp);
684 xfs_btree_init_block_int(mp, ablock, xfs_buf_daddr(abp),
685 XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
686 XFS_BTREE_LONG_PTRS);
688 for_each_xfs_iext(ifp, &icur, &rec) {
689 if (isnullstartblock(rec.br_startblock))
691 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt);
692 xfs_bmbt_disk_set_all(arp, &rec);
695 ASSERT(cnt == ifp->if_nextents);
696 xfs_btree_set_numrecs(ablock, cnt);
699 * Fill in the root key and pointer.
701 kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
702 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
703 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
704 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
705 be16_to_cpu(block->bb_level)));
706 *pp = cpu_to_be64(args.fsbno);
709 * Do all this logging at the end so that
710 * the root is at the right level.
712 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
713 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
714 ASSERT(*curp == NULL);
716 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
720 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
722 xfs_iroot_realloc(ip, -1, whichfork);
723 ifp->if_format = XFS_DINODE_FMT_EXTENTS;
724 ASSERT(ifp->if_broot == NULL);
725 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
731 * Convert a local file to an extents file.
732 * This code is out of bounds for data forks of regular files,
733 * since the file data needs to get logged so things will stay consistent.
734 * (The bmap-level manipulations are ok, though).
737 xfs_bmap_local_to_extents_empty(
738 struct xfs_trans *tp,
739 struct xfs_inode *ip,
742 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
744 ASSERT(whichfork != XFS_COW_FORK);
745 ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
746 ASSERT(ifp->if_bytes == 0);
747 ASSERT(ifp->if_nextents == 0);
749 xfs_bmap_forkoff_reset(ip, whichfork);
752 ifp->if_format = XFS_DINODE_FMT_EXTENTS;
753 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
757 STATIC int /* error */
758 xfs_bmap_local_to_extents(
759 xfs_trans_t *tp, /* transaction pointer */
760 xfs_inode_t *ip, /* incore inode pointer */
761 xfs_extlen_t total, /* total blocks needed by transaction */
762 int *logflagsp, /* inode logging flags */
764 void (*init_fn)(struct xfs_trans *tp,
766 struct xfs_inode *ip,
767 struct xfs_ifork *ifp))
770 int flags; /* logging flags returned */
771 struct xfs_ifork *ifp; /* inode fork pointer */
772 xfs_alloc_arg_t args; /* allocation arguments */
773 struct xfs_buf *bp; /* buffer for extent block */
774 struct xfs_bmbt_irec rec;
775 struct xfs_iext_cursor icur;
778 * We don't want to deal with the case of keeping inode data inline yet.
779 * So sending the data fork of a regular inode is invalid.
781 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
782 ifp = xfs_ifork_ptr(ip, whichfork);
783 ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
785 if (!ifp->if_bytes) {
786 xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
787 flags = XFS_ILOG_CORE;
793 memset(&args, 0, sizeof(args));
795 args.mp = ip->i_mount;
797 args.minlen = args.maxlen = args.prod = 1;
798 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
801 * Allocate a block. We know we need only one, since the
802 * file currently fits in an inode.
805 args.minlen = args.maxlen = args.prod = 1;
806 error = xfs_alloc_vextent_start_ag(&args,
807 XFS_INO_TO_FSB(args.mp, ip->i_ino));
811 /* Can't fail, the space was reserved. */
812 ASSERT(args.fsbno != NULLFSBLOCK);
813 ASSERT(args.len == 1);
814 error = xfs_trans_get_buf(tp, args.mp->m_ddev_targp,
815 XFS_FSB_TO_DADDR(args.mp, args.fsbno),
816 args.mp->m_bsize, 0, &bp);
821 * Initialize the block, copy the data and log the remote buffer.
823 * The callout is responsible for logging because the remote format
824 * might differ from the local format and thus we don't know how much to
825 * log here. Note that init_fn must also set the buffer log item type
828 init_fn(tp, bp, ip, ifp);
830 /* account for the change in fork size */
831 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
832 xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
833 flags |= XFS_ILOG_CORE;
839 rec.br_startblock = args.fsbno;
840 rec.br_blockcount = 1;
841 rec.br_state = XFS_EXT_NORM;
842 xfs_iext_first(ifp, &icur);
843 xfs_iext_insert(ip, &icur, &rec, 0);
845 ifp->if_nextents = 1;
847 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
848 flags |= xfs_ilog_fext(whichfork);
856 * Called from xfs_bmap_add_attrfork to handle btree format files.
858 STATIC int /* error */
859 xfs_bmap_add_attrfork_btree(
860 xfs_trans_t *tp, /* transaction pointer */
861 xfs_inode_t *ip, /* incore inode pointer */
862 int *flags) /* inode logging flags */
864 struct xfs_btree_block *block = ip->i_df.if_broot;
865 struct xfs_btree_cur *cur; /* btree cursor */
866 int error; /* error return value */
867 xfs_mount_t *mp; /* file system mount struct */
868 int stat; /* newroot status */
872 if (XFS_BMAP_BMDR_SPACE(block) <= xfs_inode_data_fork_size(ip))
873 *flags |= XFS_ILOG_DBROOT;
875 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
876 error = xfs_bmbt_lookup_first(cur, &stat);
879 /* must be at least one entry */
880 if (XFS_IS_CORRUPT(mp, stat != 1)) {
881 error = -EFSCORRUPTED;
884 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
887 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
890 cur->bc_ino.allocated = 0;
891 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
895 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
900 * Called from xfs_bmap_add_attrfork to handle extents format files.
902 STATIC int /* error */
903 xfs_bmap_add_attrfork_extents(
904 struct xfs_trans *tp, /* transaction pointer */
905 struct xfs_inode *ip, /* incore inode pointer */
906 int *flags) /* inode logging flags */
908 struct xfs_btree_cur *cur; /* bmap btree cursor */
909 int error; /* error return value */
911 if (ip->i_df.if_nextents * sizeof(struct xfs_bmbt_rec) <=
912 xfs_inode_data_fork_size(ip))
915 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags,
918 cur->bc_ino.allocated = 0;
919 xfs_btree_del_cursor(cur, error);
925 * Called from xfs_bmap_add_attrfork to handle local format files. Each
926 * different data fork content type needs a different callout to do the
927 * conversion. Some are basic and only require special block initialisation
928 * callouts for the data formating, others (directories) are so specialised they
929 * handle everything themselves.
931 * XXX (dgc): investigate whether directory conversion can use the generic
932 * formatting callout. It should be possible - it's just a very complex
935 STATIC int /* error */
936 xfs_bmap_add_attrfork_local(
937 struct xfs_trans *tp, /* transaction pointer */
938 struct xfs_inode *ip, /* incore inode pointer */
939 int *flags) /* inode logging flags */
941 struct xfs_da_args dargs; /* args for dir/attr code */
943 if (ip->i_df.if_bytes <= xfs_inode_data_fork_size(ip))
946 if (S_ISDIR(VFS_I(ip)->i_mode)) {
947 memset(&dargs, 0, sizeof(dargs));
948 dargs.geo = ip->i_mount->m_dir_geo;
950 dargs.total = dargs.geo->fsbcount;
951 dargs.whichfork = XFS_DATA_FORK;
953 return xfs_dir2_sf_to_block(&dargs);
956 if (S_ISLNK(VFS_I(ip)->i_mode))
957 return xfs_bmap_local_to_extents(tp, ip, 1, flags,
959 xfs_symlink_local_to_remote);
961 /* should only be called for types that support local format data */
963 return -EFSCORRUPTED;
967 * Set an inode attr fork offset based on the format of the data fork.
970 xfs_bmap_set_attrforkoff(
971 struct xfs_inode *ip,
975 int default_size = xfs_default_attroffset(ip) >> 3;
977 switch (ip->i_df.if_format) {
978 case XFS_DINODE_FMT_DEV:
979 ip->i_forkoff = default_size;
981 case XFS_DINODE_FMT_LOCAL:
982 case XFS_DINODE_FMT_EXTENTS:
983 case XFS_DINODE_FMT_BTREE:
984 ip->i_forkoff = xfs_attr_shortform_bytesfit(ip, size);
986 ip->i_forkoff = default_size;
987 else if (xfs_has_attr2(ip->i_mount) && version)
999 * Convert inode from non-attributed to attributed.
1000 * Must not be in a transaction, ip must not be locked.
1002 int /* error code */
1003 xfs_bmap_add_attrfork(
1004 xfs_inode_t *ip, /* incore inode pointer */
1005 int size, /* space new attribute needs */
1006 int rsvd) /* xact may use reserved blks */
1008 xfs_mount_t *mp; /* mount structure */
1009 xfs_trans_t *tp; /* transaction pointer */
1010 int blks; /* space reservation */
1011 int version = 1; /* superblock attr version */
1012 int logflags; /* logging flags */
1013 int error; /* error return value */
1015 ASSERT(xfs_inode_has_attr_fork(ip) == 0);
1018 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1020 blks = XFS_ADDAFORK_SPACE_RES(mp);
1022 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_addafork, blks, 0,
1026 if (xfs_inode_has_attr_fork(ip))
1029 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1030 error = xfs_bmap_set_attrforkoff(ip, size, &version);
1034 xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0);
1036 switch (ip->i_df.if_format) {
1037 case XFS_DINODE_FMT_LOCAL:
1038 error = xfs_bmap_add_attrfork_local(tp, ip, &logflags);
1040 case XFS_DINODE_FMT_EXTENTS:
1041 error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags);
1043 case XFS_DINODE_FMT_BTREE:
1044 error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags);
1051 xfs_trans_log_inode(tp, ip, logflags);
1054 if (!xfs_has_attr(mp) ||
1055 (!xfs_has_attr2(mp) && version == 2)) {
1056 bool log_sb = false;
1058 spin_lock(&mp->m_sb_lock);
1059 if (!xfs_has_attr(mp)) {
1063 if (!xfs_has_attr2(mp) && version == 2) {
1067 spin_unlock(&mp->m_sb_lock);
1072 error = xfs_trans_commit(tp);
1073 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1077 xfs_trans_cancel(tp);
1078 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1083 * Internal and external extent tree search functions.
1086 struct xfs_iread_state {
1087 struct xfs_iext_cursor icur;
1088 xfs_extnum_t loaded;
1092 xfs_bmap_complain_bad_rec(
1093 struct xfs_inode *ip,
1096 const struct xfs_bmbt_irec *irec)
1098 struct xfs_mount *mp = ip->i_mount;
1099 const char *forkname;
1101 switch (whichfork) {
1102 case XFS_DATA_FORK: forkname = "data"; break;
1103 case XFS_ATTR_FORK: forkname = "attr"; break;
1104 case XFS_COW_FORK: forkname = "CoW"; break;
1105 default: forkname = "???"; break;
1109 "Bmap BTree record corruption in inode 0x%llx %s fork detected at %pS!",
1110 ip->i_ino, forkname, fa);
1112 "Offset 0x%llx, start block 0x%llx, block count 0x%llx state 0x%x",
1113 irec->br_startoff, irec->br_startblock, irec->br_blockcount,
1116 return -EFSCORRUPTED;
1119 /* Stuff every bmbt record from this block into the incore extent map. */
1121 xfs_iread_bmbt_block(
1122 struct xfs_btree_cur *cur,
1126 struct xfs_iread_state *ir = priv;
1127 struct xfs_mount *mp = cur->bc_mp;
1128 struct xfs_inode *ip = cur->bc_ino.ip;
1129 struct xfs_btree_block *block;
1131 struct xfs_bmbt_rec *frp;
1132 xfs_extnum_t num_recs;
1134 int whichfork = cur->bc_ino.whichfork;
1135 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
1137 block = xfs_btree_get_block(cur, level, &bp);
1139 /* Abort if we find more records than nextents. */
1140 num_recs = xfs_btree_get_numrecs(block);
1141 if (unlikely(ir->loaded + num_recs > ifp->if_nextents)) {
1142 xfs_warn(ip->i_mount, "corrupt dinode %llu, (btree extents).",
1143 (unsigned long long)ip->i_ino);
1144 xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, block,
1145 sizeof(*block), __this_address);
1146 return -EFSCORRUPTED;
1149 /* Copy records into the incore cache. */
1150 frp = XFS_BMBT_REC_ADDR(mp, block, 1);
1151 for (j = 0; j < num_recs; j++, frp++, ir->loaded++) {
1152 struct xfs_bmbt_irec new;
1155 xfs_bmbt_disk_get_all(frp, &new);
1156 fa = xfs_bmap_validate_extent(ip, whichfork, &new);
1158 xfs_inode_verifier_error(ip, -EFSCORRUPTED,
1159 "xfs_iread_extents(2)", frp,
1161 return xfs_bmap_complain_bad_rec(ip, whichfork, fa,
1164 xfs_iext_insert(ip, &ir->icur, &new,
1165 xfs_bmap_fork_to_state(whichfork));
1166 trace_xfs_read_extent(ip, &ir->icur,
1167 xfs_bmap_fork_to_state(whichfork), _THIS_IP_);
1168 xfs_iext_next(ifp, &ir->icur);
1175 * Read in extents from a btree-format inode.
1179 struct xfs_trans *tp,
1180 struct xfs_inode *ip,
1183 struct xfs_iread_state ir;
1184 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
1185 struct xfs_mount *mp = ip->i_mount;
1186 struct xfs_btree_cur *cur;
1189 if (!xfs_need_iread_extents(ifp))
1192 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1195 xfs_iext_first(ifp, &ir.icur);
1196 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
1197 error = xfs_btree_visit_blocks(cur, xfs_iread_bmbt_block,
1198 XFS_BTREE_VISIT_RECORDS, &ir);
1199 xfs_btree_del_cursor(cur, error);
1203 if (XFS_IS_CORRUPT(mp, ir.loaded != ifp->if_nextents)) {
1204 error = -EFSCORRUPTED;
1207 ASSERT(ir.loaded == xfs_iext_count(ifp));
1209 * Use release semantics so that we can use acquire semantics in
1210 * xfs_need_iread_extents and be guaranteed to see a valid mapping tree
1213 smp_store_release(&ifp->if_needextents, 0);
1216 xfs_iext_destroy(ifp);
1221 * Returns the relative block number of the first unused block(s) in the given
1222 * fork with at least "len" logically contiguous blocks free. This is the
1223 * lowest-address hole if the fork has holes, else the first block past the end
1224 * of fork. Return 0 if the fork is currently local (in-inode).
1227 xfs_bmap_first_unused(
1228 struct xfs_trans *tp, /* transaction pointer */
1229 struct xfs_inode *ip, /* incore inode */
1230 xfs_extlen_t len, /* size of hole to find */
1231 xfs_fileoff_t *first_unused, /* unused block */
1232 int whichfork) /* data or attr fork */
1234 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
1235 struct xfs_bmbt_irec got;
1236 struct xfs_iext_cursor icur;
1237 xfs_fileoff_t lastaddr = 0;
1238 xfs_fileoff_t lowest, max;
1241 if (ifp->if_format == XFS_DINODE_FMT_LOCAL) {
1246 ASSERT(xfs_ifork_has_extents(ifp));
1248 error = xfs_iread_extents(tp, ip, whichfork);
1252 lowest = max = *first_unused;
1253 for_each_xfs_iext(ifp, &icur, &got) {
1255 * See if the hole before this extent will work.
1257 if (got.br_startoff >= lowest + len &&
1258 got.br_startoff - max >= len)
1260 lastaddr = got.br_startoff + got.br_blockcount;
1261 max = XFS_FILEOFF_MAX(lastaddr, lowest);
1264 *first_unused = max;
1269 * Returns the file-relative block number of the last block - 1 before
1270 * last_block (input value) in the file.
1271 * This is not based on i_size, it is based on the extent records.
1272 * Returns 0 for local files, as they do not have extent records.
1275 xfs_bmap_last_before(
1276 struct xfs_trans *tp, /* transaction pointer */
1277 struct xfs_inode *ip, /* incore inode */
1278 xfs_fileoff_t *last_block, /* last block */
1279 int whichfork) /* data or attr fork */
1281 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
1282 struct xfs_bmbt_irec got;
1283 struct xfs_iext_cursor icur;
1286 switch (ifp->if_format) {
1287 case XFS_DINODE_FMT_LOCAL:
1290 case XFS_DINODE_FMT_BTREE:
1291 case XFS_DINODE_FMT_EXTENTS:
1295 return -EFSCORRUPTED;
1298 error = xfs_iread_extents(tp, ip, whichfork);
1302 if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got))
1308 xfs_bmap_last_extent(
1309 struct xfs_trans *tp,
1310 struct xfs_inode *ip,
1312 struct xfs_bmbt_irec *rec,
1315 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
1316 struct xfs_iext_cursor icur;
1319 error = xfs_iread_extents(tp, ip, whichfork);
1323 xfs_iext_last(ifp, &icur);
1324 if (!xfs_iext_get_extent(ifp, &icur, rec))
1332 * Check the last inode extent to determine whether this allocation will result
1333 * in blocks being allocated at the end of the file. When we allocate new data
1334 * blocks at the end of the file which do not start at the previous data block,
1335 * we will try to align the new blocks at stripe unit boundaries.
1337 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1338 * at, or past the EOF.
1342 struct xfs_bmalloca *bma,
1345 struct xfs_bmbt_irec rec;
1350 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1361 * Check if we are allocation or past the last extent, or at least into
1362 * the last delayed allocated extent.
1364 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1365 (bma->offset >= rec.br_startoff &&
1366 isnullstartblock(rec.br_startblock));
1371 * Returns the file-relative block number of the first block past eof in
1372 * the file. This is not based on i_size, it is based on the extent records.
1373 * Returns 0 for local files, as they do not have extent records.
1376 xfs_bmap_last_offset(
1377 struct xfs_inode *ip,
1378 xfs_fileoff_t *last_block,
1381 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
1382 struct xfs_bmbt_irec rec;
1388 if (ifp->if_format == XFS_DINODE_FMT_LOCAL)
1391 if (XFS_IS_CORRUPT(ip->i_mount, !xfs_ifork_has_extents(ifp)))
1392 return -EFSCORRUPTED;
1394 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1395 if (error || is_empty)
1398 *last_block = rec.br_startoff + rec.br_blockcount;
1403 * Extent tree manipulation functions used during allocation.
1407 * Convert a delayed allocation to a real allocation.
1409 STATIC int /* error */
1410 xfs_bmap_add_extent_delay_real(
1411 struct xfs_bmalloca *bma,
1414 struct xfs_mount *mp = bma->ip->i_mount;
1415 struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork);
1416 struct xfs_bmbt_irec *new = &bma->got;
1417 int error; /* error return value */
1418 int i; /* temp state */
1419 xfs_fileoff_t new_endoff; /* end offset of new entry */
1420 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
1421 /* left is 0, right is 1, prev is 2 */
1422 int rval=0; /* return value (logging flags) */
1423 uint32_t state = xfs_bmap_fork_to_state(whichfork);
1424 xfs_filblks_t da_new; /* new count del alloc blocks used */
1425 xfs_filblks_t da_old; /* old count del alloc blocks used */
1426 xfs_filblks_t temp=0; /* value for da_new calculations */
1427 int tmp_rval; /* partial logging flags */
1428 struct xfs_bmbt_irec old;
1430 ASSERT(whichfork != XFS_ATTR_FORK);
1431 ASSERT(!isnullstartblock(new->br_startblock));
1433 (bma->cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL));
1435 XFS_STATS_INC(mp, xs_add_exlist);
1442 * Set up a bunch of variables to make the tests simpler.
1444 xfs_iext_get_extent(ifp, &bma->icur, &PREV);
1445 new_endoff = new->br_startoff + new->br_blockcount;
1446 ASSERT(isnullstartblock(PREV.br_startblock));
1447 ASSERT(PREV.br_startoff <= new->br_startoff);
1448 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1450 da_old = startblockval(PREV.br_startblock);
1454 * Set flags determining what part of the previous delayed allocation
1455 * extent is being replaced by a real allocation.
1457 if (PREV.br_startoff == new->br_startoff)
1458 state |= BMAP_LEFT_FILLING;
1459 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1460 state |= BMAP_RIGHT_FILLING;
1463 * Check and set flags if this segment has a left neighbor.
1464 * Don't set contiguous if the combined extent would be too large.
1466 if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) {
1467 state |= BMAP_LEFT_VALID;
1468 if (isnullstartblock(LEFT.br_startblock))
1469 state |= BMAP_LEFT_DELAY;
1472 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1473 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1474 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1475 LEFT.br_state == new->br_state &&
1476 LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
1477 state |= BMAP_LEFT_CONTIG;
1480 * Check and set flags if this segment has a right neighbor.
1481 * Don't set contiguous if the combined extent would be too large.
1482 * Also check for all-three-contiguous being too large.
1484 if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) {
1485 state |= BMAP_RIGHT_VALID;
1486 if (isnullstartblock(RIGHT.br_startblock))
1487 state |= BMAP_RIGHT_DELAY;
1490 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1491 new_endoff == RIGHT.br_startoff &&
1492 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1493 new->br_state == RIGHT.br_state &&
1494 new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
1495 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1496 BMAP_RIGHT_FILLING)) !=
1497 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1498 BMAP_RIGHT_FILLING) ||
1499 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1500 <= XFS_MAX_BMBT_EXTLEN))
1501 state |= BMAP_RIGHT_CONTIG;
1505 * Switch out based on the FILLING and CONTIG state bits.
1507 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1508 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1509 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1510 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1512 * Filling in all of a previously delayed allocation extent.
1513 * The left and right neighbors are both contiguous with new.
1515 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
1517 xfs_iext_remove(bma->ip, &bma->icur, state);
1518 xfs_iext_remove(bma->ip, &bma->icur, state);
1519 xfs_iext_prev(ifp, &bma->icur);
1520 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1523 if (bma->cur == NULL)
1524 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1526 rval = XFS_ILOG_CORE;
1527 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1530 if (XFS_IS_CORRUPT(mp, i != 1)) {
1531 error = -EFSCORRUPTED;
1534 error = xfs_btree_delete(bma->cur, &i);
1537 if (XFS_IS_CORRUPT(mp, i != 1)) {
1538 error = -EFSCORRUPTED;
1541 error = xfs_btree_decrement(bma->cur, 0, &i);
1544 if (XFS_IS_CORRUPT(mp, i != 1)) {
1545 error = -EFSCORRUPTED;
1548 error = xfs_bmbt_update(bma->cur, &LEFT);
1554 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1556 * Filling in all of a previously delayed allocation extent.
1557 * The left neighbor is contiguous, the right is not.
1560 LEFT.br_blockcount += PREV.br_blockcount;
1562 xfs_iext_remove(bma->ip, &bma->icur, state);
1563 xfs_iext_prev(ifp, &bma->icur);
1564 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1566 if (bma->cur == NULL)
1567 rval = XFS_ILOG_DEXT;
1570 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1573 if (XFS_IS_CORRUPT(mp, i != 1)) {
1574 error = -EFSCORRUPTED;
1577 error = xfs_bmbt_update(bma->cur, &LEFT);
1583 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1585 * Filling in all of a previously delayed allocation extent.
1586 * The right neighbor is contiguous, the left is not. Take care
1587 * with delay -> unwritten extent allocation here because the
1588 * delalloc record we are overwriting is always written.
1590 PREV.br_startblock = new->br_startblock;
1591 PREV.br_blockcount += RIGHT.br_blockcount;
1592 PREV.br_state = new->br_state;
1594 xfs_iext_next(ifp, &bma->icur);
1595 xfs_iext_remove(bma->ip, &bma->icur, state);
1596 xfs_iext_prev(ifp, &bma->icur);
1597 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1599 if (bma->cur == NULL)
1600 rval = XFS_ILOG_DEXT;
1603 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1606 if (XFS_IS_CORRUPT(mp, i != 1)) {
1607 error = -EFSCORRUPTED;
1610 error = xfs_bmbt_update(bma->cur, &PREV);
1616 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1618 * Filling in all of a previously delayed allocation extent.
1619 * Neither the left nor right neighbors are contiguous with
1622 PREV.br_startblock = new->br_startblock;
1623 PREV.br_state = new->br_state;
1624 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1627 if (bma->cur == NULL)
1628 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1630 rval = XFS_ILOG_CORE;
1631 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1634 if (XFS_IS_CORRUPT(mp, i != 0)) {
1635 error = -EFSCORRUPTED;
1638 error = xfs_btree_insert(bma->cur, &i);
1641 if (XFS_IS_CORRUPT(mp, i != 1)) {
1642 error = -EFSCORRUPTED;
1648 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1650 * Filling in the first part of a previous delayed allocation.
1651 * The left neighbor is contiguous.
1654 temp = PREV.br_blockcount - new->br_blockcount;
1655 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1656 startblockval(PREV.br_startblock));
1658 LEFT.br_blockcount += new->br_blockcount;
1660 PREV.br_blockcount = temp;
1661 PREV.br_startoff += new->br_blockcount;
1662 PREV.br_startblock = nullstartblock(da_new);
1664 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1665 xfs_iext_prev(ifp, &bma->icur);
1666 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1668 if (bma->cur == NULL)
1669 rval = XFS_ILOG_DEXT;
1672 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1675 if (XFS_IS_CORRUPT(mp, i != 1)) {
1676 error = -EFSCORRUPTED;
1679 error = xfs_bmbt_update(bma->cur, &LEFT);
1685 case BMAP_LEFT_FILLING:
1687 * Filling in the first part of a previous delayed allocation.
1688 * The left neighbor is not contiguous.
1690 xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1693 if (bma->cur == NULL)
1694 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1696 rval = XFS_ILOG_CORE;
1697 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1700 if (XFS_IS_CORRUPT(mp, i != 0)) {
1701 error = -EFSCORRUPTED;
1704 error = xfs_btree_insert(bma->cur, &i);
1707 if (XFS_IS_CORRUPT(mp, i != 1)) {
1708 error = -EFSCORRUPTED;
1713 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1714 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1715 &bma->cur, 1, &tmp_rval, whichfork);
1721 temp = PREV.br_blockcount - new->br_blockcount;
1722 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1723 startblockval(PREV.br_startblock) -
1724 (bma->cur ? bma->cur->bc_ino.allocated : 0));
1726 PREV.br_startoff = new_endoff;
1727 PREV.br_blockcount = temp;
1728 PREV.br_startblock = nullstartblock(da_new);
1729 xfs_iext_next(ifp, &bma->icur);
1730 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1731 xfs_iext_prev(ifp, &bma->icur);
1734 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1736 * Filling in the last part of a previous delayed allocation.
1737 * The right neighbor is contiguous with the new allocation.
1740 RIGHT.br_startoff = new->br_startoff;
1741 RIGHT.br_startblock = new->br_startblock;
1742 RIGHT.br_blockcount += new->br_blockcount;
1744 if (bma->cur == NULL)
1745 rval = XFS_ILOG_DEXT;
1748 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1751 if (XFS_IS_CORRUPT(mp, i != 1)) {
1752 error = -EFSCORRUPTED;
1755 error = xfs_bmbt_update(bma->cur, &RIGHT);
1760 temp = PREV.br_blockcount - new->br_blockcount;
1761 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1762 startblockval(PREV.br_startblock));
1764 PREV.br_blockcount = temp;
1765 PREV.br_startblock = nullstartblock(da_new);
1767 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1768 xfs_iext_next(ifp, &bma->icur);
1769 xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT);
1772 case BMAP_RIGHT_FILLING:
1774 * Filling in the last part of a previous delayed allocation.
1775 * The right neighbor is not contiguous.
1777 xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1780 if (bma->cur == NULL)
1781 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1783 rval = XFS_ILOG_CORE;
1784 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1787 if (XFS_IS_CORRUPT(mp, i != 0)) {
1788 error = -EFSCORRUPTED;
1791 error = xfs_btree_insert(bma->cur, &i);
1794 if (XFS_IS_CORRUPT(mp, i != 1)) {
1795 error = -EFSCORRUPTED;
1800 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1801 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1802 &bma->cur, 1, &tmp_rval, whichfork);
1808 temp = PREV.br_blockcount - new->br_blockcount;
1809 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1810 startblockval(PREV.br_startblock) -
1811 (bma->cur ? bma->cur->bc_ino.allocated : 0));
1813 PREV.br_startblock = nullstartblock(da_new);
1814 PREV.br_blockcount = temp;
1815 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1816 xfs_iext_next(ifp, &bma->icur);
1821 * Filling in the middle part of a previous delayed allocation.
1822 * Contiguity is impossible here.
1823 * This case is avoided almost all the time.
1825 * We start with a delayed allocation:
1827 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
1830 * and we are allocating:
1831 * +rrrrrrrrrrrrrrrrr+
1834 * and we set it up for insertion as:
1835 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
1837 * PREV @ idx LEFT RIGHT
1838 * inserted at idx + 1
1842 /* LEFT is the new middle */
1845 /* RIGHT is the new right */
1846 RIGHT.br_state = PREV.br_state;
1847 RIGHT.br_startoff = new_endoff;
1848 RIGHT.br_blockcount =
1849 PREV.br_startoff + PREV.br_blockcount - new_endoff;
1850 RIGHT.br_startblock =
1851 nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1852 RIGHT.br_blockcount));
1855 PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
1856 PREV.br_startblock =
1857 nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1858 PREV.br_blockcount));
1859 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1861 xfs_iext_next(ifp, &bma->icur);
1862 xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state);
1863 xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state);
1866 if (bma->cur == NULL)
1867 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1869 rval = XFS_ILOG_CORE;
1870 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1873 if (XFS_IS_CORRUPT(mp, i != 0)) {
1874 error = -EFSCORRUPTED;
1877 error = xfs_btree_insert(bma->cur, &i);
1880 if (XFS_IS_CORRUPT(mp, i != 1)) {
1881 error = -EFSCORRUPTED;
1886 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1887 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1888 &bma->cur, 1, &tmp_rval, whichfork);
1894 da_new = startblockval(PREV.br_startblock) +
1895 startblockval(RIGHT.br_startblock);
1898 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1899 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1900 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1901 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1902 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1903 case BMAP_LEFT_CONTIG:
1904 case BMAP_RIGHT_CONTIG:
1906 * These cases are all impossible.
1911 /* add reverse mapping unless caller opted out */
1912 if (!(bma->flags & XFS_BMAPI_NORMAP))
1913 xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new);
1915 /* convert to a btree if necessary */
1916 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1917 int tmp_logflags; /* partial log flag return val */
1919 ASSERT(bma->cur == NULL);
1920 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1921 &bma->cur, da_old > 0, &tmp_logflags,
1923 bma->logflags |= tmp_logflags;
1928 if (da_new != da_old)
1929 xfs_mod_delalloc(mp, (int64_t)da_new - da_old);
1932 da_new += bma->cur->bc_ino.allocated;
1933 bma->cur->bc_ino.allocated = 0;
1936 /* adjust for changes in reserved delayed indirect blocks */
1937 if (da_new != da_old) {
1938 ASSERT(state == 0 || da_new < da_old);
1939 error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new),
1943 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
1945 if (whichfork != XFS_COW_FORK)
1946 bma->logflags |= rval;
1954 * Convert an unwritten allocation to a real allocation or vice versa.
1957 xfs_bmap_add_extent_unwritten_real(
1958 struct xfs_trans *tp,
1959 xfs_inode_t *ip, /* incore inode pointer */
1961 struct xfs_iext_cursor *icur,
1962 struct xfs_btree_cur **curp, /* if *curp is null, not a btree */
1963 xfs_bmbt_irec_t *new, /* new data to add to file extents */
1964 int *logflagsp) /* inode logging flags */
1966 struct xfs_btree_cur *cur; /* btree cursor */
1967 int error; /* error return value */
1968 int i; /* temp state */
1969 struct xfs_ifork *ifp; /* inode fork pointer */
1970 xfs_fileoff_t new_endoff; /* end offset of new entry */
1971 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
1972 /* left is 0, right is 1, prev is 2 */
1973 int rval=0; /* return value (logging flags) */
1974 uint32_t state = xfs_bmap_fork_to_state(whichfork);
1975 struct xfs_mount *mp = ip->i_mount;
1976 struct xfs_bmbt_irec old;
1981 ifp = xfs_ifork_ptr(ip, whichfork);
1983 ASSERT(!isnullstartblock(new->br_startblock));
1985 XFS_STATS_INC(mp, xs_add_exlist);
1992 * Set up a bunch of variables to make the tests simpler.
1995 xfs_iext_get_extent(ifp, icur, &PREV);
1996 ASSERT(new->br_state != PREV.br_state);
1997 new_endoff = new->br_startoff + new->br_blockcount;
1998 ASSERT(PREV.br_startoff <= new->br_startoff);
1999 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
2002 * Set flags determining what part of the previous oldext allocation
2003 * extent is being replaced by a newext allocation.
2005 if (PREV.br_startoff == new->br_startoff)
2006 state |= BMAP_LEFT_FILLING;
2007 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2008 state |= BMAP_RIGHT_FILLING;
2011 * Check and set flags if this segment has a left neighbor.
2012 * Don't set contiguous if the combined extent would be too large.
2014 if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) {
2015 state |= BMAP_LEFT_VALID;
2016 if (isnullstartblock(LEFT.br_startblock))
2017 state |= BMAP_LEFT_DELAY;
2020 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2021 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2022 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2023 LEFT.br_state == new->br_state &&
2024 LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
2025 state |= BMAP_LEFT_CONTIG;
2028 * Check and set flags if this segment has a right neighbor.
2029 * Don't set contiguous if the combined extent would be too large.
2030 * Also check for all-three-contiguous being too large.
2032 if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) {
2033 state |= BMAP_RIGHT_VALID;
2034 if (isnullstartblock(RIGHT.br_startblock))
2035 state |= BMAP_RIGHT_DELAY;
2038 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2039 new_endoff == RIGHT.br_startoff &&
2040 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2041 new->br_state == RIGHT.br_state &&
2042 new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2043 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2044 BMAP_RIGHT_FILLING)) !=
2045 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2046 BMAP_RIGHT_FILLING) ||
2047 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2048 <= XFS_MAX_BMBT_EXTLEN))
2049 state |= BMAP_RIGHT_CONTIG;
2052 * Switch out based on the FILLING and CONTIG state bits.
2054 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2055 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2056 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2057 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2059 * Setting all of a previous oldext extent to newext.
2060 * The left and right neighbors are both contiguous with new.
2062 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
2064 xfs_iext_remove(ip, icur, state);
2065 xfs_iext_remove(ip, icur, state);
2066 xfs_iext_prev(ifp, icur);
2067 xfs_iext_update_extent(ip, state, icur, &LEFT);
2068 ifp->if_nextents -= 2;
2070 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2072 rval = XFS_ILOG_CORE;
2073 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2076 if (XFS_IS_CORRUPT(mp, i != 1)) {
2077 error = -EFSCORRUPTED;
2080 if ((error = xfs_btree_delete(cur, &i)))
2082 if (XFS_IS_CORRUPT(mp, i != 1)) {
2083 error = -EFSCORRUPTED;
2086 if ((error = xfs_btree_decrement(cur, 0, &i)))
2088 if (XFS_IS_CORRUPT(mp, i != 1)) {
2089 error = -EFSCORRUPTED;
2092 if ((error = xfs_btree_delete(cur, &i)))
2094 if (XFS_IS_CORRUPT(mp, i != 1)) {
2095 error = -EFSCORRUPTED;
2098 if ((error = xfs_btree_decrement(cur, 0, &i)))
2100 if (XFS_IS_CORRUPT(mp, i != 1)) {
2101 error = -EFSCORRUPTED;
2104 error = xfs_bmbt_update(cur, &LEFT);
2110 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2112 * Setting all of a previous oldext extent to newext.
2113 * The left neighbor is contiguous, the right is not.
2115 LEFT.br_blockcount += PREV.br_blockcount;
2117 xfs_iext_remove(ip, icur, state);
2118 xfs_iext_prev(ifp, icur);
2119 xfs_iext_update_extent(ip, state, icur, &LEFT);
2122 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2124 rval = XFS_ILOG_CORE;
2125 error = xfs_bmbt_lookup_eq(cur, &PREV, &i);
2128 if (XFS_IS_CORRUPT(mp, i != 1)) {
2129 error = -EFSCORRUPTED;
2132 if ((error = xfs_btree_delete(cur, &i)))
2134 if (XFS_IS_CORRUPT(mp, i != 1)) {
2135 error = -EFSCORRUPTED;
2138 if ((error = xfs_btree_decrement(cur, 0, &i)))
2140 if (XFS_IS_CORRUPT(mp, i != 1)) {
2141 error = -EFSCORRUPTED;
2144 error = xfs_bmbt_update(cur, &LEFT);
2150 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2152 * Setting all of a previous oldext extent to newext.
2153 * The right neighbor is contiguous, the left is not.
2155 PREV.br_blockcount += RIGHT.br_blockcount;
2156 PREV.br_state = new->br_state;
2158 xfs_iext_next(ifp, icur);
2159 xfs_iext_remove(ip, icur, state);
2160 xfs_iext_prev(ifp, icur);
2161 xfs_iext_update_extent(ip, state, icur, &PREV);
2165 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2167 rval = XFS_ILOG_CORE;
2168 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2171 if (XFS_IS_CORRUPT(mp, i != 1)) {
2172 error = -EFSCORRUPTED;
2175 if ((error = xfs_btree_delete(cur, &i)))
2177 if (XFS_IS_CORRUPT(mp, i != 1)) {
2178 error = -EFSCORRUPTED;
2181 if ((error = xfs_btree_decrement(cur, 0, &i)))
2183 if (XFS_IS_CORRUPT(mp, i != 1)) {
2184 error = -EFSCORRUPTED;
2187 error = xfs_bmbt_update(cur, &PREV);
2193 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2195 * Setting all of a previous oldext extent to newext.
2196 * Neither the left nor right neighbors are contiguous with
2199 PREV.br_state = new->br_state;
2200 xfs_iext_update_extent(ip, state, icur, &PREV);
2203 rval = XFS_ILOG_DEXT;
2206 error = xfs_bmbt_lookup_eq(cur, new, &i);
2209 if (XFS_IS_CORRUPT(mp, i != 1)) {
2210 error = -EFSCORRUPTED;
2213 error = xfs_bmbt_update(cur, &PREV);
2219 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2221 * Setting the first part of a previous oldext extent to newext.
2222 * The left neighbor is contiguous.
2224 LEFT.br_blockcount += new->br_blockcount;
2227 PREV.br_startoff += new->br_blockcount;
2228 PREV.br_startblock += new->br_blockcount;
2229 PREV.br_blockcount -= new->br_blockcount;
2231 xfs_iext_update_extent(ip, state, icur, &PREV);
2232 xfs_iext_prev(ifp, icur);
2233 xfs_iext_update_extent(ip, state, icur, &LEFT);
2236 rval = XFS_ILOG_DEXT;
2239 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2242 if (XFS_IS_CORRUPT(mp, i != 1)) {
2243 error = -EFSCORRUPTED;
2246 error = xfs_bmbt_update(cur, &PREV);
2249 error = xfs_btree_decrement(cur, 0, &i);
2252 error = xfs_bmbt_update(cur, &LEFT);
2258 case BMAP_LEFT_FILLING:
2260 * Setting the first part of a previous oldext extent to newext.
2261 * The left neighbor is not contiguous.
2264 PREV.br_startoff += new->br_blockcount;
2265 PREV.br_startblock += new->br_blockcount;
2266 PREV.br_blockcount -= new->br_blockcount;
2268 xfs_iext_update_extent(ip, state, icur, &PREV);
2269 xfs_iext_insert(ip, icur, new, state);
2273 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2275 rval = XFS_ILOG_CORE;
2276 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2279 if (XFS_IS_CORRUPT(mp, i != 1)) {
2280 error = -EFSCORRUPTED;
2283 error = xfs_bmbt_update(cur, &PREV);
2286 cur->bc_rec.b = *new;
2287 if ((error = xfs_btree_insert(cur, &i)))
2289 if (XFS_IS_CORRUPT(mp, i != 1)) {
2290 error = -EFSCORRUPTED;
2296 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2298 * Setting the last part of a previous oldext extent to newext.
2299 * The right neighbor is contiguous with the new allocation.
2302 PREV.br_blockcount -= new->br_blockcount;
2304 RIGHT.br_startoff = new->br_startoff;
2305 RIGHT.br_startblock = new->br_startblock;
2306 RIGHT.br_blockcount += new->br_blockcount;
2308 xfs_iext_update_extent(ip, state, icur, &PREV);
2309 xfs_iext_next(ifp, icur);
2310 xfs_iext_update_extent(ip, state, icur, &RIGHT);
2313 rval = XFS_ILOG_DEXT;
2316 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2319 if (XFS_IS_CORRUPT(mp, i != 1)) {
2320 error = -EFSCORRUPTED;
2323 error = xfs_bmbt_update(cur, &PREV);
2326 error = xfs_btree_increment(cur, 0, &i);
2329 error = xfs_bmbt_update(cur, &RIGHT);
2335 case BMAP_RIGHT_FILLING:
2337 * Setting the last part of a previous oldext extent to newext.
2338 * The right neighbor is not contiguous.
2341 PREV.br_blockcount -= new->br_blockcount;
2343 xfs_iext_update_extent(ip, state, icur, &PREV);
2344 xfs_iext_next(ifp, icur);
2345 xfs_iext_insert(ip, icur, new, state);
2349 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2351 rval = XFS_ILOG_CORE;
2352 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2355 if (XFS_IS_CORRUPT(mp, i != 1)) {
2356 error = -EFSCORRUPTED;
2359 error = xfs_bmbt_update(cur, &PREV);
2362 error = xfs_bmbt_lookup_eq(cur, new, &i);
2365 if (XFS_IS_CORRUPT(mp, i != 0)) {
2366 error = -EFSCORRUPTED;
2369 if ((error = xfs_btree_insert(cur, &i)))
2371 if (XFS_IS_CORRUPT(mp, i != 1)) {
2372 error = -EFSCORRUPTED;
2380 * Setting the middle part of a previous oldext extent to
2381 * newext. Contiguity is impossible here.
2382 * One extent becomes three extents.
2385 PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
2388 r[1].br_startoff = new_endoff;
2389 r[1].br_blockcount =
2390 old.br_startoff + old.br_blockcount - new_endoff;
2391 r[1].br_startblock = new->br_startblock + new->br_blockcount;
2392 r[1].br_state = PREV.br_state;
2394 xfs_iext_update_extent(ip, state, icur, &PREV);
2395 xfs_iext_next(ifp, icur);
2396 xfs_iext_insert(ip, icur, &r[1], state);
2397 xfs_iext_insert(ip, icur, &r[0], state);
2398 ifp->if_nextents += 2;
2401 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2403 rval = XFS_ILOG_CORE;
2404 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2407 if (XFS_IS_CORRUPT(mp, i != 1)) {
2408 error = -EFSCORRUPTED;
2411 /* new right extent - oldext */
2412 error = xfs_bmbt_update(cur, &r[1]);
2415 /* new left extent - oldext */
2416 cur->bc_rec.b = PREV;
2417 if ((error = xfs_btree_insert(cur, &i)))
2419 if (XFS_IS_CORRUPT(mp, i != 1)) {
2420 error = -EFSCORRUPTED;
2424 * Reset the cursor to the position of the new extent
2425 * we are about to insert as we can't trust it after
2426 * the previous insert.
2428 error = xfs_bmbt_lookup_eq(cur, new, &i);
2431 if (XFS_IS_CORRUPT(mp, i != 0)) {
2432 error = -EFSCORRUPTED;
2435 /* new middle extent - newext */
2436 if ((error = xfs_btree_insert(cur, &i)))
2438 if (XFS_IS_CORRUPT(mp, i != 1)) {
2439 error = -EFSCORRUPTED;
2445 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2446 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2447 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2448 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2449 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2450 case BMAP_LEFT_CONTIG:
2451 case BMAP_RIGHT_CONTIG:
2453 * These cases are all impossible.
2458 /* update reverse mappings */
2459 xfs_rmap_convert_extent(mp, tp, ip, whichfork, new);
2461 /* convert to a btree if necessary */
2462 if (xfs_bmap_needs_btree(ip, whichfork)) {
2463 int tmp_logflags; /* partial log flag return val */
2465 ASSERT(cur == NULL);
2466 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
2467 &tmp_logflags, whichfork);
2468 *logflagsp |= tmp_logflags;
2473 /* clear out the allocated field, done with it now in any case. */
2475 cur->bc_ino.allocated = 0;
2479 xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
2489 * Convert a hole to a delayed allocation.
2492 xfs_bmap_add_extent_hole_delay(
2493 xfs_inode_t *ip, /* incore inode pointer */
2495 struct xfs_iext_cursor *icur,
2496 xfs_bmbt_irec_t *new) /* new data to add to file extents */
2498 struct xfs_ifork *ifp; /* inode fork pointer */
2499 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2500 xfs_filblks_t newlen=0; /* new indirect size */
2501 xfs_filblks_t oldlen=0; /* old indirect size */
2502 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2503 uint32_t state = xfs_bmap_fork_to_state(whichfork);
2504 xfs_filblks_t temp; /* temp for indirect calculations */
2506 ifp = xfs_ifork_ptr(ip, whichfork);
2507 ASSERT(isnullstartblock(new->br_startblock));
2510 * Check and set flags if this segment has a left neighbor
2512 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2513 state |= BMAP_LEFT_VALID;
2514 if (isnullstartblock(left.br_startblock))
2515 state |= BMAP_LEFT_DELAY;
2519 * Check and set flags if the current (right) segment exists.
2520 * If it doesn't exist, we're converting the hole at end-of-file.
2522 if (xfs_iext_get_extent(ifp, icur, &right)) {
2523 state |= BMAP_RIGHT_VALID;
2524 if (isnullstartblock(right.br_startblock))
2525 state |= BMAP_RIGHT_DELAY;
2529 * Set contiguity flags on the left and right neighbors.
2530 * Don't let extents get too large, even if the pieces are contiguous.
2532 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
2533 left.br_startoff + left.br_blockcount == new->br_startoff &&
2534 left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
2535 state |= BMAP_LEFT_CONTIG;
2537 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
2538 new->br_startoff + new->br_blockcount == right.br_startoff &&
2539 new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2540 (!(state & BMAP_LEFT_CONTIG) ||
2541 (left.br_blockcount + new->br_blockcount +
2542 right.br_blockcount <= XFS_MAX_BMBT_EXTLEN)))
2543 state |= BMAP_RIGHT_CONTIG;
2546 * Switch out based on the contiguity flags.
2548 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2549 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2551 * New allocation is contiguous with delayed allocations
2552 * on the left and on the right.
2553 * Merge all three into a single extent record.
2555 temp = left.br_blockcount + new->br_blockcount +
2556 right.br_blockcount;
2558 oldlen = startblockval(left.br_startblock) +
2559 startblockval(new->br_startblock) +
2560 startblockval(right.br_startblock);
2561 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2563 left.br_startblock = nullstartblock(newlen);
2564 left.br_blockcount = temp;
2566 xfs_iext_remove(ip, icur, state);
2567 xfs_iext_prev(ifp, icur);
2568 xfs_iext_update_extent(ip, state, icur, &left);
2571 case BMAP_LEFT_CONTIG:
2573 * New allocation is contiguous with a delayed allocation
2575 * Merge the new allocation with the left neighbor.
2577 temp = left.br_blockcount + new->br_blockcount;
2579 oldlen = startblockval(left.br_startblock) +
2580 startblockval(new->br_startblock);
2581 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2583 left.br_blockcount = temp;
2584 left.br_startblock = nullstartblock(newlen);
2586 xfs_iext_prev(ifp, icur);
2587 xfs_iext_update_extent(ip, state, icur, &left);
2590 case BMAP_RIGHT_CONTIG:
2592 * New allocation is contiguous with a delayed allocation
2594 * Merge the new allocation with the right neighbor.
2596 temp = new->br_blockcount + right.br_blockcount;
2597 oldlen = startblockval(new->br_startblock) +
2598 startblockval(right.br_startblock);
2599 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2601 right.br_startoff = new->br_startoff;
2602 right.br_startblock = nullstartblock(newlen);
2603 right.br_blockcount = temp;
2604 xfs_iext_update_extent(ip, state, icur, &right);
2609 * New allocation is not contiguous with another
2610 * delayed allocation.
2611 * Insert a new entry.
2613 oldlen = newlen = 0;
2614 xfs_iext_insert(ip, icur, new, state);
2617 if (oldlen != newlen) {
2618 ASSERT(oldlen > newlen);
2619 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
2622 * Nothing to do for disk quota accounting here.
2624 xfs_mod_delalloc(ip->i_mount, (int64_t)newlen - oldlen);
2629 * Convert a hole to a real allocation.
2631 STATIC int /* error */
2632 xfs_bmap_add_extent_hole_real(
2633 struct xfs_trans *tp,
2634 struct xfs_inode *ip,
2636 struct xfs_iext_cursor *icur,
2637 struct xfs_btree_cur **curp,
2638 struct xfs_bmbt_irec *new,
2642 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
2643 struct xfs_mount *mp = ip->i_mount;
2644 struct xfs_btree_cur *cur = *curp;
2645 int error; /* error return value */
2646 int i; /* temp state */
2647 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2648 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2649 int rval=0; /* return value (logging flags) */
2650 uint32_t state = xfs_bmap_fork_to_state(whichfork);
2651 struct xfs_bmbt_irec old;
2653 ASSERT(!isnullstartblock(new->br_startblock));
2654 ASSERT(!cur || !(cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL));
2656 XFS_STATS_INC(mp, xs_add_exlist);
2659 * Check and set flags if this segment has a left neighbor.
2661 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2662 state |= BMAP_LEFT_VALID;
2663 if (isnullstartblock(left.br_startblock))
2664 state |= BMAP_LEFT_DELAY;
2668 * Check and set flags if this segment has a current value.
2669 * Not true if we're inserting into the "hole" at eof.
2671 if (xfs_iext_get_extent(ifp, icur, &right)) {
2672 state |= BMAP_RIGHT_VALID;
2673 if (isnullstartblock(right.br_startblock))
2674 state |= BMAP_RIGHT_DELAY;
2678 * We're inserting a real allocation between "left" and "right".
2679 * Set the contiguity flags. Don't let extents get too large.
2681 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2682 left.br_startoff + left.br_blockcount == new->br_startoff &&
2683 left.br_startblock + left.br_blockcount == new->br_startblock &&
2684 left.br_state == new->br_state &&
2685 left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
2686 state |= BMAP_LEFT_CONTIG;
2688 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2689 new->br_startoff + new->br_blockcount == right.br_startoff &&
2690 new->br_startblock + new->br_blockcount == right.br_startblock &&
2691 new->br_state == right.br_state &&
2692 new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2693 (!(state & BMAP_LEFT_CONTIG) ||
2694 left.br_blockcount + new->br_blockcount +
2695 right.br_blockcount <= XFS_MAX_BMBT_EXTLEN))
2696 state |= BMAP_RIGHT_CONTIG;
2700 * Select which case we're in here, and implement it.
2702 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2703 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2705 * New allocation is contiguous with real allocations on the
2706 * left and on the right.
2707 * Merge all three into a single extent record.
2709 left.br_blockcount += new->br_blockcount + right.br_blockcount;
2711 xfs_iext_remove(ip, icur, state);
2712 xfs_iext_prev(ifp, icur);
2713 xfs_iext_update_extent(ip, state, icur, &left);
2717 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2719 rval = XFS_ILOG_CORE;
2720 error = xfs_bmbt_lookup_eq(cur, &right, &i);
2723 if (XFS_IS_CORRUPT(mp, i != 1)) {
2724 error = -EFSCORRUPTED;
2727 error = xfs_btree_delete(cur, &i);
2730 if (XFS_IS_CORRUPT(mp, i != 1)) {
2731 error = -EFSCORRUPTED;
2734 error = xfs_btree_decrement(cur, 0, &i);
2737 if (XFS_IS_CORRUPT(mp, i != 1)) {
2738 error = -EFSCORRUPTED;
2741 error = xfs_bmbt_update(cur, &left);
2747 case BMAP_LEFT_CONTIG:
2749 * New allocation is contiguous with a real allocation
2751 * Merge the new allocation with the left neighbor.
2754 left.br_blockcount += new->br_blockcount;
2756 xfs_iext_prev(ifp, icur);
2757 xfs_iext_update_extent(ip, state, icur, &left);
2760 rval = xfs_ilog_fext(whichfork);
2763 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2766 if (XFS_IS_CORRUPT(mp, i != 1)) {
2767 error = -EFSCORRUPTED;
2770 error = xfs_bmbt_update(cur, &left);
2776 case BMAP_RIGHT_CONTIG:
2778 * New allocation is contiguous with a real allocation
2780 * Merge the new allocation with the right neighbor.
2784 right.br_startoff = new->br_startoff;
2785 right.br_startblock = new->br_startblock;
2786 right.br_blockcount += new->br_blockcount;
2787 xfs_iext_update_extent(ip, state, icur, &right);
2790 rval = xfs_ilog_fext(whichfork);
2793 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2796 if (XFS_IS_CORRUPT(mp, i != 1)) {
2797 error = -EFSCORRUPTED;
2800 error = xfs_bmbt_update(cur, &right);
2808 * New allocation is not contiguous with another
2810 * Insert a new entry.
2812 xfs_iext_insert(ip, icur, new, state);
2816 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2818 rval = XFS_ILOG_CORE;
2819 error = xfs_bmbt_lookup_eq(cur, new, &i);
2822 if (XFS_IS_CORRUPT(mp, i != 0)) {
2823 error = -EFSCORRUPTED;
2826 error = xfs_btree_insert(cur, &i);
2829 if (XFS_IS_CORRUPT(mp, i != 1)) {
2830 error = -EFSCORRUPTED;
2837 /* add reverse mapping unless caller opted out */
2838 if (!(flags & XFS_BMAPI_NORMAP))
2839 xfs_rmap_map_extent(tp, ip, whichfork, new);
2841 /* convert to a btree if necessary */
2842 if (xfs_bmap_needs_btree(ip, whichfork)) {
2843 int tmp_logflags; /* partial log flag return val */
2845 ASSERT(cur == NULL);
2846 error = xfs_bmap_extents_to_btree(tp, ip, curp, 0,
2847 &tmp_logflags, whichfork);
2848 *logflagsp |= tmp_logflags;
2854 /* clear out the allocated field, done with it now in any case. */
2856 cur->bc_ino.allocated = 0;
2858 xfs_bmap_check_leaf_extents(cur, ip, whichfork);
2865 * Functions used in the extent read, allocate and remove paths
2869 * Adjust the size of the new extent based on i_extsize and rt extsize.
2872 xfs_bmap_extsize_align(
2874 xfs_bmbt_irec_t *gotp, /* next extent pointer */
2875 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
2876 xfs_extlen_t extsz, /* align to this extent size */
2877 int rt, /* is this a realtime inode? */
2878 int eof, /* is extent at end-of-file? */
2879 int delay, /* creating delalloc extent? */
2880 int convert, /* overwriting unwritten extent? */
2881 xfs_fileoff_t *offp, /* in/out: aligned offset */
2882 xfs_extlen_t *lenp) /* in/out: aligned length */
2884 xfs_fileoff_t orig_off; /* original offset */
2885 xfs_extlen_t orig_alen; /* original length */
2886 xfs_fileoff_t orig_end; /* original off+len */
2887 xfs_fileoff_t nexto; /* next file offset */
2888 xfs_fileoff_t prevo; /* previous file offset */
2889 xfs_fileoff_t align_off; /* temp for offset */
2890 xfs_extlen_t align_alen; /* temp for length */
2891 xfs_extlen_t temp; /* temp for calculations */
2896 orig_off = align_off = *offp;
2897 orig_alen = align_alen = *lenp;
2898 orig_end = orig_off + orig_alen;
2901 * If this request overlaps an existing extent, then don't
2902 * attempt to perform any additional alignment.
2904 if (!delay && !eof &&
2905 (orig_off >= gotp->br_startoff) &&
2906 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
2911 * If the file offset is unaligned vs. the extent size
2912 * we need to align it. This will be possible unless
2913 * the file was previously written with a kernel that didn't
2914 * perform this alignment, or if a truncate shot us in the
2917 div_u64_rem(orig_off, extsz, &temp);
2923 /* Same adjustment for the end of the requested area. */
2924 temp = (align_alen % extsz);
2926 align_alen += extsz - temp;
2929 * For large extent hint sizes, the aligned extent might be larger than
2930 * XFS_BMBT_MAX_EXTLEN. In that case, reduce the size by an extsz so
2931 * that it pulls the length back under XFS_BMBT_MAX_EXTLEN. The outer
2932 * allocation loops handle short allocation just fine, so it is safe to
2933 * do this. We only want to do it when we are forced to, though, because
2934 * it means more allocation operations are required.
2936 while (align_alen > XFS_MAX_BMBT_EXTLEN)
2937 align_alen -= extsz;
2938 ASSERT(align_alen <= XFS_MAX_BMBT_EXTLEN);
2941 * If the previous block overlaps with this proposed allocation
2942 * then move the start forward without adjusting the length.
2944 if (prevp->br_startoff != NULLFILEOFF) {
2945 if (prevp->br_startblock == HOLESTARTBLOCK)
2946 prevo = prevp->br_startoff;
2948 prevo = prevp->br_startoff + prevp->br_blockcount;
2951 if (align_off != orig_off && align_off < prevo)
2954 * If the next block overlaps with this proposed allocation
2955 * then move the start back without adjusting the length,
2956 * but not before offset 0.
2957 * This may of course make the start overlap previous block,
2958 * and if we hit the offset 0 limit then the next block
2959 * can still overlap too.
2961 if (!eof && gotp->br_startoff != NULLFILEOFF) {
2962 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
2963 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
2964 nexto = gotp->br_startoff + gotp->br_blockcount;
2966 nexto = gotp->br_startoff;
2968 nexto = NULLFILEOFF;
2970 align_off + align_alen != orig_end &&
2971 align_off + align_alen > nexto)
2972 align_off = nexto > align_alen ? nexto - align_alen : 0;
2974 * If we're now overlapping the next or previous extent that
2975 * means we can't fit an extsz piece in this hole. Just move
2976 * the start forward to the first valid spot and set
2977 * the length so we hit the end.
2979 if (align_off != orig_off && align_off < prevo)
2981 if (align_off + align_alen != orig_end &&
2982 align_off + align_alen > nexto &&
2983 nexto != NULLFILEOFF) {
2984 ASSERT(nexto > prevo);
2985 align_alen = nexto - align_off;
2989 * If realtime, and the result isn't a multiple of the realtime
2990 * extent size we need to remove blocks until it is.
2992 if (rt && (temp = xfs_extlen_to_rtxmod(mp, align_alen))) {
2994 * We're not covering the original request, or
2995 * we won't be able to once we fix the length.
2997 if (orig_off < align_off ||
2998 orig_end > align_off + align_alen ||
2999 align_alen - temp < orig_alen)
3002 * Try to fix it by moving the start up.
3004 if (align_off + temp <= orig_off) {
3009 * Try to fix it by moving the end in.
3011 else if (align_off + align_alen - temp >= orig_end)
3014 * Set the start to the minimum then trim the length.
3017 align_alen -= orig_off - align_off;
3018 align_off = orig_off;
3019 align_alen -= xfs_extlen_to_rtxmod(mp, align_alen);
3022 * Result doesn't cover the request, fail it.
3024 if (orig_off < align_off || orig_end > align_off + align_alen)
3027 ASSERT(orig_off >= align_off);
3028 /* see XFS_BMBT_MAX_EXTLEN handling above */
3029 ASSERT(orig_end <= align_off + align_alen ||
3030 align_alen + extsz > XFS_MAX_BMBT_EXTLEN);
3034 if (!eof && gotp->br_startoff != NULLFILEOFF)
3035 ASSERT(align_off + align_alen <= gotp->br_startoff);
3036 if (prevp->br_startoff != NULLFILEOFF)
3037 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
3045 #define XFS_ALLOC_GAP_UNITS 4
3047 /* returns true if ap->blkno was modified */
3050 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3052 xfs_fsblock_t adjust; /* adjustment to block numbers */
3053 xfs_mount_t *mp; /* mount point structure */
3054 int rt; /* true if inode is realtime */
3056 #define ISVALID(x,y) \
3058 (x) < mp->m_sb.sb_rblocks : \
3059 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3060 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3061 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3063 mp = ap->ip->i_mount;
3064 rt = XFS_IS_REALTIME_INODE(ap->ip) &&
3065 (ap->datatype & XFS_ALLOC_USERDATA);
3067 * If allocating at eof, and there's a previous real block,
3068 * try to use its last block as our starting point.
3070 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3071 !isnullstartblock(ap->prev.br_startblock) &&
3072 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
3073 ap->prev.br_startblock)) {
3074 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3076 * Adjust for the gap between prevp and us.
3078 adjust = ap->offset -
3079 (ap->prev.br_startoff + ap->prev.br_blockcount);
3081 ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
3082 ap->blkno += adjust;
3086 * If not at eof, then compare the two neighbor blocks.
3087 * Figure out whether either one gives us a good starting point,
3088 * and pick the better one.
3091 xfs_fsblock_t gotbno; /* right side block number */
3092 xfs_fsblock_t gotdiff=0; /* right side difference */
3093 xfs_fsblock_t prevbno; /* left side block number */
3094 xfs_fsblock_t prevdiff=0; /* left side difference */
3097 * If there's a previous (left) block, select a requested
3098 * start block based on it.
3100 if (ap->prev.br_startoff != NULLFILEOFF &&
3101 !isnullstartblock(ap->prev.br_startblock) &&
3102 (prevbno = ap->prev.br_startblock +
3103 ap->prev.br_blockcount) &&
3104 ISVALID(prevbno, ap->prev.br_startblock)) {
3106 * Calculate gap to end of previous block.
3108 adjust = prevdiff = ap->offset -
3109 (ap->prev.br_startoff +
3110 ap->prev.br_blockcount);
3112 * Figure the startblock based on the previous block's
3113 * end and the gap size.
3115 * If the gap is large relative to the piece we're
3116 * allocating, or using it gives us an invalid block
3117 * number, then just use the end of the previous block.
3119 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3120 ISVALID(prevbno + prevdiff,
3121 ap->prev.br_startblock))
3127 * No previous block or can't follow it, just default.
3130 prevbno = NULLFSBLOCK;
3132 * If there's a following (right) block, select a requested
3133 * start block based on it.
3135 if (!isnullstartblock(ap->got.br_startblock)) {
3137 * Calculate gap to start of next block.
3139 adjust = gotdiff = ap->got.br_startoff - ap->offset;
3141 * Figure the startblock based on the next block's
3142 * start and the gap size.
3144 gotbno = ap->got.br_startblock;
3147 * If the gap is large relative to the piece we're
3148 * allocating, or using it gives us an invalid block
3149 * number, then just use the start of the next block
3150 * offset by our length.
3152 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3153 ISVALID(gotbno - gotdiff, gotbno))
3155 else if (ISVALID(gotbno - ap->length, gotbno)) {
3156 gotbno -= ap->length;
3157 gotdiff += adjust - ap->length;
3162 * No next block, just default.
3165 gotbno = NULLFSBLOCK;
3167 * If both valid, pick the better one, else the only good
3168 * one, else ap->blkno is already set (to 0 or the inode block).
3170 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK) {
3171 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3174 if (prevbno != NULLFSBLOCK) {
3175 ap->blkno = prevbno;
3178 if (gotbno != NULLFSBLOCK) {
3188 xfs_bmap_longest_free_extent(
3189 struct xfs_perag *pag,
3190 struct xfs_trans *tp,
3193 xfs_extlen_t longest;
3196 if (!xfs_perag_initialised_agf(pag)) {
3197 error = xfs_alloc_read_agf(pag, tp, XFS_ALLOC_FLAG_TRYLOCK,
3203 longest = xfs_alloc_longest_free_extent(pag,
3204 xfs_alloc_min_freelist(pag->pag_mount, pag),
3205 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
3206 if (*blen < longest)
3213 xfs_bmap_select_minlen(
3214 struct xfs_bmalloca *ap,
3215 struct xfs_alloc_arg *args,
3220 * Since we used XFS_ALLOC_FLAG_TRYLOCK in _longest_free_extent(), it is
3221 * possible that there is enough contiguous free space for this request.
3223 if (blen < ap->minlen)
3227 * If the best seen length is less than the request length,
3228 * use the best as the minimum, otherwise we've got the maxlen we
3231 if (blen < args->maxlen)
3233 return args->maxlen;
3237 xfs_bmap_btalloc_select_lengths(
3238 struct xfs_bmalloca *ap,
3239 struct xfs_alloc_arg *args,
3242 struct xfs_mount *mp = args->mp;
3243 struct xfs_perag *pag;
3244 xfs_agnumber_t agno, startag;
3247 if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
3248 args->total = ap->minlen;
3249 args->minlen = ap->minlen;
3253 args->total = ap->total;
3254 startag = XFS_FSB_TO_AGNO(mp, ap->blkno);
3255 if (startag == NULLAGNUMBER)
3259 for_each_perag_wrap(mp, startag, agno, pag) {
3260 error = xfs_bmap_longest_free_extent(pag, args->tp, blen);
3261 if (error && error != -EAGAIN)
3264 if (*blen >= args->maxlen)
3268 xfs_perag_rele(pag);
3270 args->minlen = xfs_bmap_select_minlen(ap, args, *blen);
3274 /* Update all inode and quota accounting for the allocation we just did. */
3276 xfs_bmap_alloc_account(
3277 struct xfs_bmalloca *ap)
3279 bool isrt = XFS_IS_REALTIME_INODE(ap->ip) &&
3280 !(ap->flags & XFS_BMAPI_ATTRFORK);
3283 if (ap->flags & XFS_BMAPI_COWFORK) {
3285 * COW fork blocks are in-core only and thus are treated as
3286 * in-core quota reservation (like delalloc blocks) even when
3287 * converted to real blocks. The quota reservation is not
3288 * accounted to disk until blocks are remapped to the data
3289 * fork. So if these blocks were previously delalloc, we
3290 * already have quota reservation and there's nothing to do
3294 xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)ap->length);
3299 * Otherwise, we've allocated blocks in a hole. The transaction
3300 * has acquired in-core quota reservation for this extent.
3301 * Rather than account these as real blocks, however, we reduce
3302 * the transaction quota reservation based on the allocation.
3303 * This essentially transfers the transaction quota reservation
3304 * to that of a delalloc extent.
3306 ap->ip->i_delayed_blks += ap->length;
3307 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, isrt ?
3308 XFS_TRANS_DQ_RES_RTBLKS : XFS_TRANS_DQ_RES_BLKS,
3313 /* data/attr fork only */
3314 ap->ip->i_nblocks += ap->length;
3315 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3317 ap->ip->i_delayed_blks -= ap->length;
3318 xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)ap->length);
3319 fld = isrt ? XFS_TRANS_DQ_DELRTBCOUNT : XFS_TRANS_DQ_DELBCOUNT;
3321 fld = isrt ? XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
3324 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, fld, ap->length);
3328 xfs_bmap_compute_alignments(
3329 struct xfs_bmalloca *ap,
3330 struct xfs_alloc_arg *args)
3332 struct xfs_mount *mp = args->mp;
3333 xfs_extlen_t align = 0; /* minimum allocation alignment */
3334 int stripe_align = 0;
3336 /* stripe alignment for allocation is determined by mount parameters */
3337 if (mp->m_swidth && xfs_has_swalloc(mp))
3338 stripe_align = mp->m_swidth;
3339 else if (mp->m_dalign)
3340 stripe_align = mp->m_dalign;
3342 if (ap->flags & XFS_BMAPI_COWFORK)
3343 align = xfs_get_cowextsz_hint(ap->ip);
3344 else if (ap->datatype & XFS_ALLOC_USERDATA)
3345 align = xfs_get_extsz_hint(ap->ip);
3347 if (xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, align, 0,
3348 ap->eof, 0, ap->conv, &ap->offset,
3354 /* apply extent size hints if obtained earlier */
3357 div_u64_rem(ap->offset, args->prod, &args->mod);
3359 args->mod = args->prod - args->mod;
3360 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3364 args->prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3365 div_u64_rem(ap->offset, args->prod, &args->mod);
3367 args->mod = args->prod - args->mod;
3370 return stripe_align;
3374 xfs_bmap_process_allocated_extent(
3375 struct xfs_bmalloca *ap,
3376 struct xfs_alloc_arg *args,
3377 xfs_fileoff_t orig_offset,
3378 xfs_extlen_t orig_length)
3380 ap->blkno = args->fsbno;
3381 ap->length = args->len;
3383 * If the extent size hint is active, we tried to round the
3384 * caller's allocation request offset down to extsz and the
3385 * length up to another extsz boundary. If we found a free
3386 * extent we mapped it in starting at this new offset. If the
3387 * newly mapped space isn't long enough to cover any of the
3388 * range of offsets that was originally requested, move the
3389 * mapping up so that we can fill as much of the caller's
3390 * original request as possible. Free space is apparently
3391 * very fragmented so we're unlikely to be able to satisfy the
3394 if (ap->length <= orig_length)
3395 ap->offset = orig_offset;
3396 else if (ap->offset + ap->length < orig_offset + orig_length)
3397 ap->offset = orig_offset + orig_length - ap->length;
3398 xfs_bmap_alloc_account(ap);
3403 xfs_bmap_exact_minlen_extent_alloc(
3404 struct xfs_bmalloca *ap)
3406 struct xfs_mount *mp = ap->ip->i_mount;
3407 struct xfs_alloc_arg args = { .tp = ap->tp, .mp = mp };
3408 xfs_fileoff_t orig_offset;
3409 xfs_extlen_t orig_length;
3414 if (ap->minlen != 1) {
3415 ap->blkno = NULLFSBLOCK;
3420 orig_offset = ap->offset;
3421 orig_length = ap->length;
3423 args.alloc_minlen_only = 1;
3425 xfs_bmap_compute_alignments(ap, &args);
3428 * Unlike the longest extent available in an AG, we don't track
3429 * the length of an AG's shortest extent.
3430 * XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT is a debug only knob and
3431 * hence we can afford to start traversing from the 0th AG since
3432 * we need not be concerned about a drop in performance in
3433 * "debug only" code paths.
3435 ap->blkno = XFS_AGB_TO_FSB(mp, 0, 0);
3437 args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
3438 args.minlen = args.maxlen = ap->minlen;
3439 args.total = ap->total;
3442 args.minalignslop = 0;
3444 args.minleft = ap->minleft;
3445 args.wasdel = ap->wasdel;
3446 args.resv = XFS_AG_RESV_NONE;
3447 args.datatype = ap->datatype;
3449 error = xfs_alloc_vextent_first_ag(&args, ap->blkno);
3453 if (args.fsbno != NULLFSBLOCK) {
3454 xfs_bmap_process_allocated_extent(ap, &args, orig_offset,
3457 ap->blkno = NULLFSBLOCK;
3465 #define xfs_bmap_exact_minlen_extent_alloc(bma) (-EFSCORRUPTED)
3470 * If we are not low on available data blocks and we are allocating at
3471 * EOF, optimise allocation for contiguous file extension and/or stripe
3472 * alignment of the new extent.
3474 * NOTE: ap->aeof is only set if the allocation length is >= the
3475 * stripe unit and the allocation offset is at the end of file.
3478 xfs_bmap_btalloc_at_eof(
3479 struct xfs_bmalloca *ap,
3480 struct xfs_alloc_arg *args,
3485 struct xfs_mount *mp = args->mp;
3486 struct xfs_perag *caller_pag = args->pag;
3490 * If there are already extents in the file, try an exact EOF block
3491 * allocation to extend the file as a contiguous extent. If that fails,
3492 * or it's the first allocation in a file, just try for a stripe aligned
3496 xfs_extlen_t nextminlen = 0;
3499 * Compute the minlen+alignment for the next case. Set slop so
3500 * that the value of minlen+alignment+slop doesn't go up between
3503 args->alignment = 1;
3504 if (blen > stripe_align && blen <= args->maxlen)
3505 nextminlen = blen - stripe_align;
3507 nextminlen = args->minlen;
3508 if (nextminlen + stripe_align > args->minlen + 1)
3509 args->minalignslop = nextminlen + stripe_align -
3512 args->minalignslop = 0;
3515 args->pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, ap->blkno));
3516 error = xfs_alloc_vextent_exact_bno(args, ap->blkno);
3518 xfs_perag_put(args->pag);
3524 if (args->fsbno != NULLFSBLOCK)
3527 * Exact allocation failed. Reset to try an aligned allocation
3528 * according to the original allocation specification.
3530 args->alignment = stripe_align;
3531 args->minlen = nextminlen;
3532 args->minalignslop = 0;
3535 * Adjust minlen to try and preserve alignment if we
3536 * can't guarantee an aligned maxlen extent.
3538 args->alignment = stripe_align;
3539 if (blen > args->alignment &&
3540 blen <= args->maxlen + args->alignment)
3541 args->minlen = blen - args->alignment;
3542 args->minalignslop = 0;
3546 error = xfs_alloc_vextent_near_bno(args, ap->blkno);
3549 error = xfs_alloc_vextent_start_ag(args, ap->blkno);
3550 ASSERT(args->pag == NULL);
3551 args->pag = caller_pag;
3556 if (args->fsbno != NULLFSBLOCK)
3560 * Allocation failed, so turn return the allocation args to their
3561 * original non-aligned state so the caller can proceed on allocation
3562 * failure as if this function was never called.
3564 args->alignment = 1;
3569 * We have failed multiple allocation attempts so now are in a low space
3570 * allocation situation. Try a locality first full filesystem minimum length
3571 * allocation whilst still maintaining necessary total block reservation
3574 * If that fails, we are now critically low on space, so perform a last resort
3575 * allocation attempt: no reserve, no locality, blocking, minimum length, full
3576 * filesystem free space scan. We also indicate to future allocations in this
3577 * transaction that we are critically low on space so they don't waste time on
3578 * allocation modes that are unlikely to succeed.
3581 xfs_bmap_btalloc_low_space(
3582 struct xfs_bmalloca *ap,
3583 struct xfs_alloc_arg *args)
3587 if (args->minlen > ap->minlen) {
3588 args->minlen = ap->minlen;
3589 error = xfs_alloc_vextent_start_ag(args, ap->blkno);
3590 if (error || args->fsbno != NULLFSBLOCK)
3594 /* Last ditch attempt before failure is declared. */
3595 args->total = ap->minlen;
3596 error = xfs_alloc_vextent_first_ag(args, 0);
3599 ap->tp->t_flags |= XFS_TRANS_LOWMODE;
3604 xfs_bmap_btalloc_filestreams(
3605 struct xfs_bmalloca *ap,
3606 struct xfs_alloc_arg *args,
3609 xfs_extlen_t blen = 0;
3613 error = xfs_filestream_select_ag(ap, args, &blen);
3619 * If we are in low space mode, then optimal allocation will fail so
3620 * prepare for minimal allocation and jump to the low space algorithm
3623 if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
3624 args->minlen = ap->minlen;
3625 ASSERT(args->fsbno == NULLFSBLOCK);
3629 args->minlen = xfs_bmap_select_minlen(ap, args, blen);
3631 error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align,
3634 if (!error && args->fsbno == NULLFSBLOCK)
3635 error = xfs_alloc_vextent_near_bno(args, ap->blkno);
3639 * We are now done with the perag reference for the filestreams
3640 * association provided by xfs_filestream_select_ag(). Release it now as
3641 * we've either succeeded, had a fatal error or we are out of space and
3642 * need to do a full filesystem scan for free space which will take it's
3645 xfs_perag_rele(args->pag);
3647 if (error || args->fsbno != NULLFSBLOCK)
3650 return xfs_bmap_btalloc_low_space(ap, args);
3654 xfs_bmap_btalloc_best_length(
3655 struct xfs_bmalloca *ap,
3656 struct xfs_alloc_arg *args,
3659 xfs_extlen_t blen = 0;
3662 ap->blkno = XFS_INO_TO_FSB(args->mp, ap->ip->i_ino);
3663 xfs_bmap_adjacent(ap);
3666 * Search for an allocation group with a single extent large enough for
3667 * the request. If one isn't found, then adjust the minimum allocation
3668 * size to the largest space found.
3670 error = xfs_bmap_btalloc_select_lengths(ap, args, &blen);
3675 * Don't attempt optimal EOF allocation if previous allocations barely
3676 * succeeded due to being near ENOSPC. It is highly unlikely we'll get
3677 * optimal or even aligned allocations in this case, so don't waste time
3680 if (ap->aeof && !(ap->tp->t_flags & XFS_TRANS_LOWMODE)) {
3681 error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align,
3683 if (error || args->fsbno != NULLFSBLOCK)
3687 error = xfs_alloc_vextent_start_ag(args, ap->blkno);
3688 if (error || args->fsbno != NULLFSBLOCK)
3691 return xfs_bmap_btalloc_low_space(ap, args);
3696 struct xfs_bmalloca *ap)
3698 struct xfs_mount *mp = ap->ip->i_mount;
3699 struct xfs_alloc_arg args = {
3702 .fsbno = NULLFSBLOCK,
3703 .oinfo = XFS_RMAP_OINFO_SKIP_UPDATE,
3704 .minleft = ap->minleft,
3705 .wasdel = ap->wasdel,
3706 .resv = XFS_AG_RESV_NONE,
3707 .datatype = ap->datatype,
3711 xfs_fileoff_t orig_offset;
3712 xfs_extlen_t orig_length;
3717 orig_offset = ap->offset;
3718 orig_length = ap->length;
3720 stripe_align = xfs_bmap_compute_alignments(ap, &args);
3722 /* Trim the allocation back to the maximum an AG can fit. */
3723 args.maxlen = min(ap->length, mp->m_ag_max_usable);
3725 if ((ap->datatype & XFS_ALLOC_USERDATA) &&
3726 xfs_inode_is_filestream(ap->ip))
3727 error = xfs_bmap_btalloc_filestreams(ap, &args, stripe_align);
3729 error = xfs_bmap_btalloc_best_length(ap, &args, stripe_align);
3733 if (args.fsbno != NULLFSBLOCK) {
3734 xfs_bmap_process_allocated_extent(ap, &args, orig_offset,
3737 ap->blkno = NULLFSBLOCK;
3743 /* Trim extent to fit a logical block range. */
3746 struct xfs_bmbt_irec *irec,
3750 xfs_fileoff_t distance;
3751 xfs_fileoff_t end = bno + len;
3753 if (irec->br_startoff + irec->br_blockcount <= bno ||
3754 irec->br_startoff >= end) {
3755 irec->br_blockcount = 0;
3759 if (irec->br_startoff < bno) {
3760 distance = bno - irec->br_startoff;
3761 if (isnullstartblock(irec->br_startblock))
3762 irec->br_startblock = DELAYSTARTBLOCK;
3763 if (irec->br_startblock != DELAYSTARTBLOCK &&
3764 irec->br_startblock != HOLESTARTBLOCK)
3765 irec->br_startblock += distance;
3766 irec->br_startoff += distance;
3767 irec->br_blockcount -= distance;
3770 if (end < irec->br_startoff + irec->br_blockcount) {
3771 distance = irec->br_startoff + irec->br_blockcount - end;
3772 irec->br_blockcount -= distance;
3777 * Trim the returned map to the required bounds
3781 struct xfs_bmbt_irec *mval,
3782 struct xfs_bmbt_irec *got,
3790 if ((flags & XFS_BMAPI_ENTIRE) ||
3791 got->br_startoff + got->br_blockcount <= obno) {
3793 if (isnullstartblock(got->br_startblock))
3794 mval->br_startblock = DELAYSTARTBLOCK;
3800 ASSERT((*bno >= obno) || (n == 0));
3802 mval->br_startoff = *bno;
3803 if (isnullstartblock(got->br_startblock))
3804 mval->br_startblock = DELAYSTARTBLOCK;
3806 mval->br_startblock = got->br_startblock +
3807 (*bno - got->br_startoff);
3809 * Return the minimum of what we got and what we asked for for
3810 * the length. We can use the len variable here because it is
3811 * modified below and we could have been there before coming
3812 * here if the first part of the allocation didn't overlap what
3815 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
3816 got->br_blockcount - (*bno - got->br_startoff));
3817 mval->br_state = got->br_state;
3818 ASSERT(mval->br_blockcount <= len);
3823 * Update and validate the extent map to return
3826 xfs_bmapi_update_map(
3827 struct xfs_bmbt_irec **map,
3835 xfs_bmbt_irec_t *mval = *map;
3837 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
3838 ((mval->br_startoff + mval->br_blockcount) <= end));
3839 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
3840 (mval->br_startoff < obno));
3842 *bno = mval->br_startoff + mval->br_blockcount;
3844 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
3845 /* update previous map with new information */
3846 ASSERT(mval->br_startblock == mval[-1].br_startblock);
3847 ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
3848 ASSERT(mval->br_state == mval[-1].br_state);
3849 mval[-1].br_blockcount = mval->br_blockcount;
3850 mval[-1].br_state = mval->br_state;
3851 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
3852 mval[-1].br_startblock != DELAYSTARTBLOCK &&
3853 mval[-1].br_startblock != HOLESTARTBLOCK &&
3854 mval->br_startblock == mval[-1].br_startblock +
3855 mval[-1].br_blockcount &&
3856 mval[-1].br_state == mval->br_state) {
3857 ASSERT(mval->br_startoff ==
3858 mval[-1].br_startoff + mval[-1].br_blockcount);
3859 mval[-1].br_blockcount += mval->br_blockcount;
3860 } else if (*n > 0 &&
3861 mval->br_startblock == DELAYSTARTBLOCK &&
3862 mval[-1].br_startblock == DELAYSTARTBLOCK &&
3863 mval->br_startoff ==
3864 mval[-1].br_startoff + mval[-1].br_blockcount) {
3865 mval[-1].br_blockcount += mval->br_blockcount;
3866 mval[-1].br_state = mval->br_state;
3867 } else if (!((*n == 0) &&
3868 ((mval->br_startoff + mval->br_blockcount) <=
3877 * Map file blocks to filesystem blocks without allocation.
3881 struct xfs_inode *ip,
3884 struct xfs_bmbt_irec *mval,
3888 struct xfs_mount *mp = ip->i_mount;
3889 int whichfork = xfs_bmapi_whichfork(flags);
3890 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
3891 struct xfs_bmbt_irec got;
3894 struct xfs_iext_cursor icur;
3900 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_ENTIRE)));
3901 xfs_assert_ilocked(ip, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL);
3903 if (WARN_ON_ONCE(!ifp))
3904 return -EFSCORRUPTED;
3906 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
3907 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT))
3908 return -EFSCORRUPTED;
3910 if (xfs_is_shutdown(mp))
3913 XFS_STATS_INC(mp, xs_blk_mapr);
3915 error = xfs_iread_extents(NULL, ip, whichfork);
3919 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got))
3924 while (bno < end && n < *nmap) {
3925 /* Reading past eof, act as though there's a hole up to end. */
3927 got.br_startoff = end;
3928 if (got.br_startoff > bno) {
3929 /* Reading in a hole. */
3930 mval->br_startoff = bno;
3931 mval->br_startblock = HOLESTARTBLOCK;
3932 mval->br_blockcount =
3933 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
3934 mval->br_state = XFS_EXT_NORM;
3935 bno += mval->br_blockcount;
3936 len -= mval->br_blockcount;
3942 /* set up the extent map to return. */
3943 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
3944 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
3946 /* If we're done, stop now. */
3947 if (bno >= end || n >= *nmap)
3950 /* Else go on to the next record. */
3951 if (!xfs_iext_next_extent(ifp, &icur, &got))
3959 * Add a delayed allocation extent to an inode. Blocks are reserved from the
3960 * global pool and the extent inserted into the inode in-core extent tree.
3962 * On entry, got refers to the first extent beyond the offset of the extent to
3963 * allocate or eof is specified if no such extent exists. On return, got refers
3964 * to the extent record that was inserted to the inode fork.
3966 * Note that the allocated extent may have been merged with contiguous extents
3967 * during insertion into the inode fork. Thus, got does not reflect the current
3968 * state of the inode fork on return. If necessary, the caller can use lastx to
3969 * look up the updated record in the inode fork.
3972 xfs_bmapi_reserve_delalloc(
3973 struct xfs_inode *ip,
3977 xfs_filblks_t prealloc,
3978 struct xfs_bmbt_irec *got,
3979 struct xfs_iext_cursor *icur,
3982 struct xfs_mount *mp = ip->i_mount;
3983 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
3985 xfs_extlen_t indlen;
3987 xfs_fileoff_t aoff = off;
3990 * Cap the alloc length. Keep track of prealloc so we know whether to
3991 * tag the inode before we return.
3993 alen = XFS_FILBLKS_MIN(len + prealloc, XFS_MAX_BMBT_EXTLEN);
3995 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
3996 if (prealloc && alen >= len)
3997 prealloc = alen - len;
3999 /* Figure out the extent size, adjust alen */
4000 if (whichfork == XFS_COW_FORK) {
4001 struct xfs_bmbt_irec prev;
4002 xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip);
4004 if (!xfs_iext_peek_prev_extent(ifp, icur, &prev))
4005 prev.br_startoff = NULLFILEOFF;
4007 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof,
4008 1, 0, &aoff, &alen);
4013 * Make a transaction-less quota reservation for delayed allocation
4014 * blocks. This number gets adjusted later. We return if we haven't
4015 * allocated blocks already inside this loop.
4017 error = xfs_quota_reserve_blkres(ip, alen);
4022 * Split changing sb for alen and indlen since they could be coming
4023 * from different places.
4025 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
4028 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
4030 goto out_unreserve_quota;
4032 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
4034 goto out_unreserve_blocks;
4037 ip->i_delayed_blks += alen;
4038 xfs_mod_delalloc(ip->i_mount, alen + indlen);
4040 got->br_startoff = aoff;
4041 got->br_startblock = nullstartblock(indlen);
4042 got->br_blockcount = alen;
4043 got->br_state = XFS_EXT_NORM;
4045 xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got);
4048 * Tag the inode if blocks were preallocated. Note that COW fork
4049 * preallocation can occur at the start or end of the extent, even when
4050 * prealloc == 0, so we must also check the aligned offset and length.
4052 if (whichfork == XFS_DATA_FORK && prealloc)
4053 xfs_inode_set_eofblocks_tag(ip);
4054 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
4055 xfs_inode_set_cowblocks_tag(ip);
4059 out_unreserve_blocks:
4060 xfs_mod_fdblocks(mp, alen, false);
4061 out_unreserve_quota:
4062 if (XFS_IS_QUOTA_ON(mp))
4063 xfs_quota_unreserve_blkres(ip, alen);
4068 xfs_bmap_alloc_userdata(
4069 struct xfs_bmalloca *bma)
4071 struct xfs_mount *mp = bma->ip->i_mount;
4072 int whichfork = xfs_bmapi_whichfork(bma->flags);
4076 * Set the data type being allocated. For the data fork, the first data
4077 * in the file is treated differently to all other allocations. For the
4078 * attribute fork, we only need to ensure the allocated range is not on
4081 bma->datatype = XFS_ALLOC_NOBUSY;
4082 if (whichfork == XFS_DATA_FORK || whichfork == XFS_COW_FORK) {
4083 bma->datatype |= XFS_ALLOC_USERDATA;
4084 if (bma->offset == 0)
4085 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
4087 if (mp->m_dalign && bma->length >= mp->m_dalign) {
4088 error = xfs_bmap_isaeof(bma, whichfork);
4093 if (XFS_IS_REALTIME_INODE(bma->ip))
4094 return xfs_bmap_rtalloc(bma);
4097 if (unlikely(XFS_TEST_ERROR(false, mp,
4098 XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT)))
4099 return xfs_bmap_exact_minlen_extent_alloc(bma);
4101 return xfs_bmap_btalloc(bma);
4106 struct xfs_bmalloca *bma)
4108 struct xfs_mount *mp = bma->ip->i_mount;
4109 int whichfork = xfs_bmapi_whichfork(bma->flags);
4110 struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork);
4111 int tmp_logflags = 0;
4114 ASSERT(bma->length > 0);
4117 * For the wasdelay case, we could also just allocate the stuff asked
4118 * for in this bmap call but that wouldn't be as good.
4121 bma->length = (xfs_extlen_t)bma->got.br_blockcount;
4122 bma->offset = bma->got.br_startoff;
4123 if (!xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev))
4124 bma->prev.br_startoff = NULLFILEOFF;
4126 bma->length = XFS_FILBLKS_MIN(bma->length, XFS_MAX_BMBT_EXTLEN);
4128 bma->length = XFS_FILBLKS_MIN(bma->length,
4129 bma->got.br_startoff - bma->offset);
4132 if (bma->flags & XFS_BMAPI_CONTIG)
4133 bma->minlen = bma->length;
4137 if (bma->flags & XFS_BMAPI_METADATA) {
4138 if (unlikely(XFS_TEST_ERROR(false, mp,
4139 XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT)))
4140 error = xfs_bmap_exact_minlen_extent_alloc(bma);
4142 error = xfs_bmap_btalloc(bma);
4144 error = xfs_bmap_alloc_userdata(bma);
4146 if (error || bma->blkno == NULLFSBLOCK)
4149 if (bma->flags & XFS_BMAPI_ZERO) {
4150 error = xfs_zero_extent(bma->ip, bma->blkno, bma->length);
4155 if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur)
4156 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4158 * Bump the number of extents we've allocated
4164 bma->cur->bc_ino.flags =
4165 bma->wasdel ? XFS_BTCUR_BMBT_WASDEL : 0;
4167 bma->got.br_startoff = bma->offset;
4168 bma->got.br_startblock = bma->blkno;
4169 bma->got.br_blockcount = bma->length;
4170 bma->got.br_state = XFS_EXT_NORM;
4172 if (bma->flags & XFS_BMAPI_PREALLOC)
4173 bma->got.br_state = XFS_EXT_UNWRITTEN;
4176 error = xfs_bmap_add_extent_delay_real(bma, whichfork);
4178 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
4179 whichfork, &bma->icur, &bma->cur, &bma->got,
4180 &bma->logflags, bma->flags);
4182 bma->logflags |= tmp_logflags;
4187 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4188 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4189 * the neighbouring ones.
4191 xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4193 ASSERT(bma->got.br_startoff <= bma->offset);
4194 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4195 bma->offset + bma->length);
4196 ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4197 bma->got.br_state == XFS_EXT_UNWRITTEN);
4202 xfs_bmapi_convert_unwritten(
4203 struct xfs_bmalloca *bma,
4204 struct xfs_bmbt_irec *mval,
4208 int whichfork = xfs_bmapi_whichfork(flags);
4209 struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork);
4210 int tmp_logflags = 0;
4213 /* check if we need to do unwritten->real conversion */
4214 if (mval->br_state == XFS_EXT_UNWRITTEN &&
4215 (flags & XFS_BMAPI_PREALLOC))
4218 /* check if we need to do real->unwritten conversion */
4219 if (mval->br_state == XFS_EXT_NORM &&
4220 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4221 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4225 * Modify (by adding) the state flag, if writing.
4227 ASSERT(mval->br_blockcount <= len);
4228 if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur) {
4229 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4230 bma->ip, whichfork);
4232 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4233 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4236 * Before insertion into the bmbt, zero the range being converted
4239 if (flags & XFS_BMAPI_ZERO) {
4240 error = xfs_zero_extent(bma->ip, mval->br_startblock,
4241 mval->br_blockcount);
4246 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
4247 &bma->icur, &bma->cur, mval, &tmp_logflags);
4249 * Log the inode core unconditionally in the unwritten extent conversion
4250 * path because the conversion might not have done so (e.g., if the
4251 * extent count hasn't changed). We need to make sure the inode is dirty
4252 * in the transaction for the sake of fsync(), even if nothing has
4253 * changed, because fsync() will not force the log for this transaction
4254 * unless it sees the inode pinned.
4256 * Note: If we're only converting cow fork extents, there aren't
4257 * any on-disk updates to make, so we don't need to log anything.
4259 if (whichfork != XFS_COW_FORK)
4260 bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4265 * Update our extent pointer, given that
4266 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4267 * of the neighbouring ones.
4269 xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4272 * We may have combined previously unwritten space with written space,
4273 * so generate another request.
4275 if (mval->br_blockcount < len)
4282 struct xfs_trans *tp,
4283 struct xfs_inode *ip,
4286 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, fork);
4288 if (tp && tp->t_highest_agno != NULLAGNUMBER)
4290 if (ifp->if_format != XFS_DINODE_FMT_BTREE)
4292 return be16_to_cpu(ifp->if_broot->bb_level) + 1;
4296 * Log whatever the flags say, even if error. Otherwise we might miss detecting
4297 * a case where the data is changed, there's an error, and it's not logged so we
4298 * don't shutdown when we should. Don't bother logging extents/btree changes if
4299 * we converted to the other format.
4303 struct xfs_bmalloca *bma,
4307 struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork);
4309 if ((bma->logflags & xfs_ilog_fext(whichfork)) &&
4310 ifp->if_format != XFS_DINODE_FMT_EXTENTS)
4311 bma->logflags &= ~xfs_ilog_fext(whichfork);
4312 else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) &&
4313 ifp->if_format != XFS_DINODE_FMT_BTREE)
4314 bma->logflags &= ~xfs_ilog_fbroot(whichfork);
4317 xfs_trans_log_inode(bma->tp, bma->ip, bma->logflags);
4319 xfs_btree_del_cursor(bma->cur, error);
4323 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4324 * extent state if necessary. Details behaviour is controlled by the flags
4325 * parameter. Only allocates blocks from a single allocation group, to avoid
4330 struct xfs_trans *tp, /* transaction pointer */
4331 struct xfs_inode *ip, /* incore inode */
4332 xfs_fileoff_t bno, /* starting file offs. mapped */
4333 xfs_filblks_t len, /* length to map in file */
4334 uint32_t flags, /* XFS_BMAPI_... */
4335 xfs_extlen_t total, /* total blocks needed */
4336 struct xfs_bmbt_irec *mval, /* output: map values */
4337 int *nmap) /* i/o: mval size/count */
4339 struct xfs_bmalloca bma = {
4344 struct xfs_mount *mp = ip->i_mount;
4345 int whichfork = xfs_bmapi_whichfork(flags);
4346 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
4347 xfs_fileoff_t end; /* end of mapped file region */
4348 bool eof = false; /* after the end of extents */
4349 int error; /* error return */
4350 int n; /* current extent index */
4351 xfs_fileoff_t obno; /* old block number (offset) */
4354 xfs_fileoff_t orig_bno; /* original block number value */
4355 int orig_flags; /* original flags arg value */
4356 xfs_filblks_t orig_len; /* original value of len arg */
4357 struct xfs_bmbt_irec *orig_mval; /* original value of mval */
4358 int orig_nmap; /* original value of *nmap */
4368 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4371 ASSERT(ifp->if_format != XFS_DINODE_FMT_LOCAL);
4372 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
4373 ASSERT(!(flags & XFS_BMAPI_REMAP));
4375 /* zeroing is for currently only for data extents, not metadata */
4376 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4377 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4379 * we can allocate unwritten extents or pre-zero allocated blocks,
4380 * but it makes no sense to do both at once. This would result in
4381 * zeroing the unwritten extent twice, but it still being an
4382 * unwritten extent....
4384 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4385 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4387 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
4388 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
4389 return -EFSCORRUPTED;
4392 if (xfs_is_shutdown(mp))
4395 XFS_STATS_INC(mp, xs_blk_mapw);
4397 error = xfs_iread_extents(tp, ip, whichfork);
4401 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got))
4403 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4404 bma.prev.br_startoff = NULLFILEOFF;
4405 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4410 while (bno < end && n < *nmap) {
4411 bool need_alloc = false, wasdelay = false;
4413 /* in hole or beyond EOF? */
4414 if (eof || bma.got.br_startoff > bno) {
4416 * CoW fork conversions should /never/ hit EOF or
4417 * holes. There should always be something for us
4420 ASSERT(!((flags & XFS_BMAPI_CONVERT) &&
4421 (flags & XFS_BMAPI_COWFORK)));
4424 } else if (isnullstartblock(bma.got.br_startblock)) {
4429 * First, deal with the hole before the allocated space
4430 * that we found, if any.
4432 if (need_alloc || wasdelay) {
4434 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4435 bma.wasdel = wasdelay;
4440 * There's a 32/64 bit type mismatch between the
4441 * allocation length request (which can be 64 bits in
4442 * length) and the bma length request, which is
4443 * xfs_extlen_t and therefore 32 bits. Hence we have to
4444 * check for 32-bit overflows and handle them here.
4446 if (len > (xfs_filblks_t)XFS_MAX_BMBT_EXTLEN)
4447 bma.length = XFS_MAX_BMBT_EXTLEN;
4452 ASSERT(bma.length > 0);
4453 error = xfs_bmapi_allocate(&bma);
4456 if (bma.blkno == NULLFSBLOCK)
4460 * If this is a CoW allocation, record the data in
4461 * the refcount btree for orphan recovery.
4463 if (whichfork == XFS_COW_FORK)
4464 xfs_refcount_alloc_cow_extent(tp, bma.blkno,
4468 /* Deal with the allocated space we found. */
4469 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4472 /* Execute unwritten extent conversion if necessary */
4473 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4474 if (error == -EAGAIN)
4479 /* update the extent map to return */
4480 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4483 * If we're done, stop now. Stop when we've allocated
4484 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4485 * the transaction may get too big.
4487 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4490 /* Else go on to the next record. */
4492 if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got))
4497 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4502 ASSERT(ifp->if_format != XFS_DINODE_FMT_BTREE ||
4503 ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork));
4504 xfs_bmapi_finish(&bma, whichfork, 0);
4505 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4509 xfs_bmapi_finish(&bma, whichfork, error);
4514 * Convert an existing delalloc extent to real blocks based on file offset. This
4515 * attempts to allocate the entire delalloc extent and may require multiple
4516 * invocations to allocate the target offset if a large enough physical extent
4520 xfs_bmapi_convert_delalloc(
4521 struct xfs_inode *ip,
4524 struct iomap *iomap,
4527 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
4528 struct xfs_mount *mp = ip->i_mount;
4529 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
4530 struct xfs_bmalloca bma = { NULL };
4532 struct xfs_trans *tp;
4535 if (whichfork == XFS_COW_FORK)
4536 flags |= IOMAP_F_SHARED;
4539 * Space for the extent and indirect blocks was reserved when the
4540 * delalloc extent was created so there's no need to do so here.
4542 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0,
4543 XFS_TRANS_RESERVE, &tp);
4547 xfs_ilock(ip, XFS_ILOCK_EXCL);
4548 xfs_trans_ijoin(tp, ip, 0);
4550 error = xfs_iext_count_may_overflow(ip, whichfork,
4551 XFS_IEXT_ADD_NOSPLIT_CNT);
4552 if (error == -EFBIG)
4553 error = xfs_iext_count_upgrade(tp, ip,
4554 XFS_IEXT_ADD_NOSPLIT_CNT);
4556 goto out_trans_cancel;
4558 if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) ||
4559 bma.got.br_startoff > offset_fsb) {
4561 * No extent found in the range we are trying to convert. This
4562 * should only happen for the COW fork, where another thread
4563 * might have moved the extent to the data fork in the meantime.
4565 WARN_ON_ONCE(whichfork != XFS_COW_FORK);
4567 goto out_trans_cancel;
4571 * If we find a real extent here we raced with another thread converting
4572 * the extent. Just return the real extent at this offset.
4574 if (!isnullstartblock(bma.got.br_startblock)) {
4575 xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
4576 xfs_iomap_inode_sequence(ip, flags));
4577 *seq = READ_ONCE(ifp->if_seq);
4578 goto out_trans_cancel;
4584 bma.offset = bma.got.br_startoff;
4585 bma.length = max_t(xfs_filblks_t, bma.got.br_blockcount,
4586 XFS_MAX_BMBT_EXTLEN);
4587 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4590 * When we're converting the delalloc reservations backing dirty pages
4591 * in the page cache, we must be careful about how we create the new
4594 * New CoW fork extents are created unwritten, turned into real extents
4595 * when we're about to write the data to disk, and mapped into the data
4596 * fork after the write finishes. End of story.
4598 * New data fork extents must be mapped in as unwritten and converted
4599 * to real extents after the write succeeds to avoid exposing stale
4600 * disk contents if we crash.
4602 bma.flags = XFS_BMAPI_PREALLOC;
4603 if (whichfork == XFS_COW_FORK)
4604 bma.flags |= XFS_BMAPI_COWFORK;
4606 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4607 bma.prev.br_startoff = NULLFILEOFF;
4609 error = xfs_bmapi_allocate(&bma);
4614 if (WARN_ON_ONCE(bma.blkno == NULLFSBLOCK))
4616 error = -EFSCORRUPTED;
4617 if (WARN_ON_ONCE(!xfs_valid_startblock(ip, bma.got.br_startblock)))
4620 XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length));
4621 XFS_STATS_INC(mp, xs_xstrat_quick);
4623 ASSERT(!isnullstartblock(bma.got.br_startblock));
4624 xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
4625 xfs_iomap_inode_sequence(ip, flags));
4626 *seq = READ_ONCE(ifp->if_seq);
4628 if (whichfork == XFS_COW_FORK)
4629 xfs_refcount_alloc_cow_extent(tp, bma.blkno, bma.length);
4631 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4636 xfs_bmapi_finish(&bma, whichfork, 0);
4637 error = xfs_trans_commit(tp);
4638 xfs_iunlock(ip, XFS_ILOCK_EXCL);
4642 xfs_bmapi_finish(&bma, whichfork, error);
4644 xfs_trans_cancel(tp);
4645 xfs_iunlock(ip, XFS_ILOCK_EXCL);
4651 struct xfs_trans *tp,
4652 struct xfs_inode *ip,
4655 xfs_fsblock_t startblock,
4658 struct xfs_mount *mp = ip->i_mount;
4659 struct xfs_ifork *ifp;
4660 struct xfs_btree_cur *cur = NULL;
4661 struct xfs_bmbt_irec got;
4662 struct xfs_iext_cursor icur;
4663 int whichfork = xfs_bmapi_whichfork(flags);
4664 int logflags = 0, error;
4666 ifp = xfs_ifork_ptr(ip, whichfork);
4668 ASSERT(len <= (xfs_filblks_t)XFS_MAX_BMBT_EXTLEN);
4669 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
4670 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC |
4671 XFS_BMAPI_NORMAP)));
4672 ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) !=
4673 (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC));
4675 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
4676 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
4677 return -EFSCORRUPTED;
4680 if (xfs_is_shutdown(mp))
4683 error = xfs_iread_extents(tp, ip, whichfork);
4687 if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
4688 /* make sure we only reflink into a hole. */
4689 ASSERT(got.br_startoff > bno);
4690 ASSERT(got.br_startoff - bno >= len);
4693 ip->i_nblocks += len;
4694 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
4696 if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
4697 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
4698 cur->bc_ino.flags = 0;
4701 got.br_startoff = bno;
4702 got.br_startblock = startblock;
4703 got.br_blockcount = len;
4704 if (flags & XFS_BMAPI_PREALLOC)
4705 got.br_state = XFS_EXT_UNWRITTEN;
4707 got.br_state = XFS_EXT_NORM;
4709 error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur,
4710 &cur, &got, &logflags, flags);
4714 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork);
4717 if (ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS)
4718 logflags &= ~XFS_ILOG_DEXT;
4719 else if (ip->i_df.if_format != XFS_DINODE_FMT_BTREE)
4720 logflags &= ~XFS_ILOG_DBROOT;
4723 xfs_trans_log_inode(tp, ip, logflags);
4725 xfs_btree_del_cursor(cur, error);
4730 * When a delalloc extent is split (e.g., due to a hole punch), the original
4731 * indlen reservation must be shared across the two new extents that are left
4734 * Given the original reservation and the worst case indlen for the two new
4735 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4736 * reservation fairly across the two new extents. If necessary, steal available
4737 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4738 * ores == 1). The number of stolen blocks is returned. The availability and
4739 * subsequent accounting of stolen blocks is the responsibility of the caller.
4741 static xfs_filblks_t
4742 xfs_bmap_split_indlen(
4743 xfs_filblks_t ores, /* original res. */
4744 xfs_filblks_t *indlen1, /* ext1 worst indlen */
4745 xfs_filblks_t *indlen2, /* ext2 worst indlen */
4746 xfs_filblks_t avail) /* stealable blocks */
4748 xfs_filblks_t len1 = *indlen1;
4749 xfs_filblks_t len2 = *indlen2;
4750 xfs_filblks_t nres = len1 + len2; /* new total res. */
4751 xfs_filblks_t stolen = 0;
4752 xfs_filblks_t resfactor;
4755 * Steal as many blocks as we can to try and satisfy the worst case
4756 * indlen for both new extents.
4758 if (ores < nres && avail)
4759 stolen = XFS_FILBLKS_MIN(nres - ores, avail);
4762 /* nothing else to do if we've satisfied the new reservation */
4767 * We can't meet the total required reservation for the two extents.
4768 * Calculate the percent of the overall shortage between both extents
4769 * and apply this percentage to each of the requested indlen values.
4770 * This distributes the shortage fairly and reduces the chances that one
4771 * of the two extents is left with nothing when extents are repeatedly
4774 resfactor = (ores * 100);
4775 do_div(resfactor, nres);
4780 ASSERT(len1 + len2 <= ores);
4781 ASSERT(len1 < *indlen1 && len2 < *indlen2);
4784 * Hand out the remainder to each extent. If one of the two reservations
4785 * is zero, we want to make sure that one gets a block first. The loop
4786 * below starts with len1, so hand len2 a block right off the bat if it
4789 ores -= (len1 + len2);
4790 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
4791 if (ores && !len2 && *indlen2) {
4796 if (len1 < *indlen1) {
4802 if (len2 < *indlen2) {
4815 xfs_bmap_del_extent_delay(
4816 struct xfs_inode *ip,
4818 struct xfs_iext_cursor *icur,
4819 struct xfs_bmbt_irec *got,
4820 struct xfs_bmbt_irec *del)
4822 struct xfs_mount *mp = ip->i_mount;
4823 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
4824 struct xfs_bmbt_irec new;
4825 int64_t da_old, da_new, da_diff = 0;
4826 xfs_fileoff_t del_endoff, got_endoff;
4827 xfs_filblks_t got_indlen, new_indlen, stolen;
4828 uint32_t state = xfs_bmap_fork_to_state(whichfork);
4832 XFS_STATS_INC(mp, xs_del_exlist);
4834 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
4835 del_endoff = del->br_startoff + del->br_blockcount;
4836 got_endoff = got->br_startoff + got->br_blockcount;
4837 da_old = startblockval(got->br_startblock);
4840 ASSERT(del->br_blockcount > 0);
4841 ASSERT(got->br_startoff <= del->br_startoff);
4842 ASSERT(got_endoff >= del_endoff);
4845 xfs_mod_frextents(mp, xfs_rtb_to_rtx(mp, del->br_blockcount));
4848 * Update the inode delalloc counter now and wait to update the
4849 * sb counters as we might have to borrow some blocks for the
4850 * indirect block accounting.
4853 error = xfs_quota_unreserve_blkres(ip, del->br_blockcount);
4856 ip->i_delayed_blks -= del->br_blockcount;
4858 if (got->br_startoff == del->br_startoff)
4859 state |= BMAP_LEFT_FILLING;
4860 if (got_endoff == del_endoff)
4861 state |= BMAP_RIGHT_FILLING;
4863 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4864 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4866 * Matches the whole extent. Delete the entry.
4868 xfs_iext_remove(ip, icur, state);
4869 xfs_iext_prev(ifp, icur);
4871 case BMAP_LEFT_FILLING:
4873 * Deleting the first part of the extent.
4875 got->br_startoff = del_endoff;
4876 got->br_blockcount -= del->br_blockcount;
4877 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4878 got->br_blockcount), da_old);
4879 got->br_startblock = nullstartblock((int)da_new);
4880 xfs_iext_update_extent(ip, state, icur, got);
4882 case BMAP_RIGHT_FILLING:
4884 * Deleting the last part of the extent.
4886 got->br_blockcount = got->br_blockcount - del->br_blockcount;
4887 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4888 got->br_blockcount), da_old);
4889 got->br_startblock = nullstartblock((int)da_new);
4890 xfs_iext_update_extent(ip, state, icur, got);
4894 * Deleting the middle of the extent.
4896 * Distribute the original indlen reservation across the two new
4897 * extents. Steal blocks from the deleted extent if necessary.
4898 * Stealing blocks simply fudges the fdblocks accounting below.
4899 * Warn if either of the new indlen reservations is zero as this
4900 * can lead to delalloc problems.
4902 got->br_blockcount = del->br_startoff - got->br_startoff;
4903 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
4905 new.br_blockcount = got_endoff - del_endoff;
4906 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
4908 WARN_ON_ONCE(!got_indlen || !new_indlen);
4909 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
4910 del->br_blockcount);
4912 got->br_startblock = nullstartblock((int)got_indlen);
4914 new.br_startoff = del_endoff;
4915 new.br_state = got->br_state;
4916 new.br_startblock = nullstartblock((int)new_indlen);
4918 xfs_iext_update_extent(ip, state, icur, got);
4919 xfs_iext_next(ifp, icur);
4920 xfs_iext_insert(ip, icur, &new, state);
4922 da_new = got_indlen + new_indlen - stolen;
4923 del->br_blockcount -= stolen;
4927 ASSERT(da_old >= da_new);
4928 da_diff = da_old - da_new;
4930 da_diff += del->br_blockcount;
4932 xfs_mod_fdblocks(mp, da_diff, false);
4933 xfs_mod_delalloc(mp, -da_diff);
4939 xfs_bmap_del_extent_cow(
4940 struct xfs_inode *ip,
4941 struct xfs_iext_cursor *icur,
4942 struct xfs_bmbt_irec *got,
4943 struct xfs_bmbt_irec *del)
4945 struct xfs_mount *mp = ip->i_mount;
4946 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
4947 struct xfs_bmbt_irec new;
4948 xfs_fileoff_t del_endoff, got_endoff;
4949 uint32_t state = BMAP_COWFORK;
4951 XFS_STATS_INC(mp, xs_del_exlist);
4953 del_endoff = del->br_startoff + del->br_blockcount;
4954 got_endoff = got->br_startoff + got->br_blockcount;
4956 ASSERT(del->br_blockcount > 0);
4957 ASSERT(got->br_startoff <= del->br_startoff);
4958 ASSERT(got_endoff >= del_endoff);
4959 ASSERT(!isnullstartblock(got->br_startblock));
4961 if (got->br_startoff == del->br_startoff)
4962 state |= BMAP_LEFT_FILLING;
4963 if (got_endoff == del_endoff)
4964 state |= BMAP_RIGHT_FILLING;
4966 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4967 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4969 * Matches the whole extent. Delete the entry.
4971 xfs_iext_remove(ip, icur, state);
4972 xfs_iext_prev(ifp, icur);
4974 case BMAP_LEFT_FILLING:
4976 * Deleting the first part of the extent.
4978 got->br_startoff = del_endoff;
4979 got->br_blockcount -= del->br_blockcount;
4980 got->br_startblock = del->br_startblock + del->br_blockcount;
4981 xfs_iext_update_extent(ip, state, icur, got);
4983 case BMAP_RIGHT_FILLING:
4985 * Deleting the last part of the extent.
4987 got->br_blockcount -= del->br_blockcount;
4988 xfs_iext_update_extent(ip, state, icur, got);
4992 * Deleting the middle of the extent.
4994 got->br_blockcount = del->br_startoff - got->br_startoff;
4996 new.br_startoff = del_endoff;
4997 new.br_blockcount = got_endoff - del_endoff;
4998 new.br_state = got->br_state;
4999 new.br_startblock = del->br_startblock + del->br_blockcount;
5001 xfs_iext_update_extent(ip, state, icur, got);
5002 xfs_iext_next(ifp, icur);
5003 xfs_iext_insert(ip, icur, &new, state);
5006 ip->i_delayed_blks -= del->br_blockcount;
5010 * Called by xfs_bmapi to update file extent records and the btree
5011 * after removing space.
5013 STATIC int /* error */
5014 xfs_bmap_del_extent_real(
5015 xfs_inode_t *ip, /* incore inode pointer */
5016 xfs_trans_t *tp, /* current transaction pointer */
5017 struct xfs_iext_cursor *icur,
5018 struct xfs_btree_cur *cur, /* if null, not a btree */
5019 xfs_bmbt_irec_t *del, /* data to remove from extents */
5020 int *logflagsp, /* inode logging flags */
5021 int whichfork, /* data or attr fork */
5022 uint32_t bflags) /* bmapi flags */
5024 xfs_fsblock_t del_endblock=0; /* first block past del */
5025 xfs_fileoff_t del_endoff; /* first offset past del */
5026 int do_fx; /* free extent at end of routine */
5027 int error; /* error return value */
5028 struct xfs_bmbt_irec got; /* current extent entry */
5029 xfs_fileoff_t got_endoff; /* first offset past got */
5030 int i; /* temp state */
5031 struct xfs_ifork *ifp; /* inode fork pointer */
5032 xfs_mount_t *mp; /* mount structure */
5033 xfs_filblks_t nblks; /* quota/sb block count */
5034 xfs_bmbt_irec_t new; /* new record to be inserted */
5036 uint qfield; /* quota field to update */
5037 uint32_t state = xfs_bmap_fork_to_state(whichfork);
5038 struct xfs_bmbt_irec old;
5043 XFS_STATS_INC(mp, xs_del_exlist);
5045 ifp = xfs_ifork_ptr(ip, whichfork);
5046 ASSERT(del->br_blockcount > 0);
5047 xfs_iext_get_extent(ifp, icur, &got);
5048 ASSERT(got.br_startoff <= del->br_startoff);
5049 del_endoff = del->br_startoff + del->br_blockcount;
5050 got_endoff = got.br_startoff + got.br_blockcount;
5051 ASSERT(got_endoff >= del_endoff);
5052 ASSERT(!isnullstartblock(got.br_startblock));
5056 * If it's the case where the directory code is running with no block
5057 * reservation, and the deleted block is in the middle of its extent,
5058 * and the resulting insert of an extent would cause transformation to
5059 * btree format, then reject it. The calling code will then swap blocks
5060 * around instead. We have to do this now, rather than waiting for the
5061 * conversion to btree format, since the transaction will be dirty then.
5063 if (tp->t_blk_res == 0 &&
5064 ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
5065 ifp->if_nextents >= XFS_IFORK_MAXEXT(ip, whichfork) &&
5066 del->br_startoff > got.br_startoff && del_endoff < got_endoff)
5069 *logflagsp = XFS_ILOG_CORE;
5070 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
5071 if (!(bflags & XFS_BMAPI_REMAP)) {
5072 error = xfs_rtfree_blocks(tp, del->br_startblock,
5073 del->br_blockcount);
5079 qfield = XFS_TRANS_DQ_RTBCOUNT;
5082 qfield = XFS_TRANS_DQ_BCOUNT;
5084 nblks = del->br_blockcount;
5086 del_endblock = del->br_startblock + del->br_blockcount;
5088 error = xfs_bmbt_lookup_eq(cur, &got, &i);
5091 if (XFS_IS_CORRUPT(mp, i != 1))
5092 return -EFSCORRUPTED;
5095 if (got.br_startoff == del->br_startoff)
5096 state |= BMAP_LEFT_FILLING;
5097 if (got_endoff == del_endoff)
5098 state |= BMAP_RIGHT_FILLING;
5100 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
5101 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
5103 * Matches the whole extent. Delete the entry.
5105 xfs_iext_remove(ip, icur, state);
5106 xfs_iext_prev(ifp, icur);
5109 *logflagsp |= XFS_ILOG_CORE;
5111 *logflagsp |= xfs_ilog_fext(whichfork);
5114 if ((error = xfs_btree_delete(cur, &i)))
5116 if (XFS_IS_CORRUPT(mp, i != 1))
5117 return -EFSCORRUPTED;
5119 case BMAP_LEFT_FILLING:
5121 * Deleting the first part of the extent.
5123 got.br_startoff = del_endoff;
5124 got.br_startblock = del_endblock;
5125 got.br_blockcount -= del->br_blockcount;
5126 xfs_iext_update_extent(ip, state, icur, &got);
5128 *logflagsp |= xfs_ilog_fext(whichfork);
5131 error = xfs_bmbt_update(cur, &got);
5135 case BMAP_RIGHT_FILLING:
5137 * Deleting the last part of the extent.
5139 got.br_blockcount -= del->br_blockcount;
5140 xfs_iext_update_extent(ip, state, icur, &got);
5142 *logflagsp |= xfs_ilog_fext(whichfork);
5145 error = xfs_bmbt_update(cur, &got);
5151 * Deleting the middle of the extent.
5156 got.br_blockcount = del->br_startoff - got.br_startoff;
5157 xfs_iext_update_extent(ip, state, icur, &got);
5159 new.br_startoff = del_endoff;
5160 new.br_blockcount = got_endoff - del_endoff;
5161 new.br_state = got.br_state;
5162 new.br_startblock = del_endblock;
5164 *logflagsp |= XFS_ILOG_CORE;
5166 error = xfs_bmbt_update(cur, &got);
5169 error = xfs_btree_increment(cur, 0, &i);
5172 cur->bc_rec.b = new;
5173 error = xfs_btree_insert(cur, &i);
5174 if (error && error != -ENOSPC)
5177 * If get no-space back from btree insert, it tried a
5178 * split, and we have a zero block reservation. Fix up
5179 * our state and return the error.
5181 if (error == -ENOSPC) {
5183 * Reset the cursor, don't trust it after any
5186 error = xfs_bmbt_lookup_eq(cur, &got, &i);
5189 if (XFS_IS_CORRUPT(mp, i != 1))
5190 return -EFSCORRUPTED;
5192 * Update the btree record back
5193 * to the original value.
5195 error = xfs_bmbt_update(cur, &old);
5199 * Reset the extent record back
5200 * to the original value.
5202 xfs_iext_update_extent(ip, state, icur, &old);
5206 if (XFS_IS_CORRUPT(mp, i != 1))
5207 return -EFSCORRUPTED;
5209 *logflagsp |= xfs_ilog_fext(whichfork);
5212 xfs_iext_next(ifp, icur);
5213 xfs_iext_insert(ip, icur, &new, state);
5217 /* remove reverse mapping */
5218 xfs_rmap_unmap_extent(tp, ip, whichfork, del);
5221 * If we need to, add to list of extents to delete.
5223 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) {
5224 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
5225 xfs_refcount_decrease_extent(tp, del);
5227 error = xfs_free_extent_later(tp, del->br_startblock,
5228 del->br_blockcount, NULL,
5230 ((bflags & XFS_BMAPI_NODISCARD) ||
5231 del->br_state == XFS_EXT_UNWRITTEN));
5238 * Adjust inode # blocks in the file.
5241 ip->i_nblocks -= nblks;
5243 * Adjust quota data.
5245 if (qfield && !(bflags & XFS_BMAPI_REMAP))
5246 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5252 * Unmap (remove) blocks from a file.
5253 * If nexts is nonzero then the number of extents to remove is limited to
5254 * that value. If not all extents in the block range can be removed then
5259 struct xfs_trans *tp, /* transaction pointer */
5260 struct xfs_inode *ip, /* incore inode */
5261 xfs_fileoff_t start, /* first file offset deleted */
5262 xfs_filblks_t *rlen, /* i/o: amount remaining */
5263 uint32_t flags, /* misc flags */
5264 xfs_extnum_t nexts) /* number of extents max */
5266 struct xfs_btree_cur *cur; /* bmap btree cursor */
5267 struct xfs_bmbt_irec del; /* extent being deleted */
5268 int error; /* error return value */
5269 xfs_extnum_t extno; /* extent number in list */
5270 struct xfs_bmbt_irec got; /* current extent record */
5271 struct xfs_ifork *ifp; /* inode fork pointer */
5272 int isrt; /* freeing in rt area */
5273 int logflags; /* transaction logging flags */
5274 xfs_extlen_t mod; /* rt extent offset */
5275 struct xfs_mount *mp = ip->i_mount;
5276 int tmp_logflags; /* partial logging flags */
5277 int wasdel; /* was a delayed alloc extent */
5278 int whichfork; /* data or attribute fork */
5279 xfs_filblks_t len = *rlen; /* length to unmap in file */
5281 struct xfs_iext_cursor icur;
5284 trace_xfs_bunmap(ip, start, len, flags, _RET_IP_);
5286 whichfork = xfs_bmapi_whichfork(flags);
5287 ASSERT(whichfork != XFS_COW_FORK);
5288 ifp = xfs_ifork_ptr(ip, whichfork);
5289 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)))
5290 return -EFSCORRUPTED;
5291 if (xfs_is_shutdown(mp))
5294 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
5298 error = xfs_iread_extents(tp, ip, whichfork);
5302 if (xfs_iext_count(ifp) == 0) {
5306 XFS_STATS_INC(mp, xs_blk_unmap);
5307 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5310 if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) {
5317 if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
5318 ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
5319 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5320 cur->bc_ino.flags = 0;
5326 * Synchronize by locking the bitmap inode.
5328 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
5329 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
5330 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
5331 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
5335 while (end != (xfs_fileoff_t)-1 && end >= start &&
5336 (nexts == 0 || extno < nexts)) {
5338 * Is the found extent after a hole in which end lives?
5339 * Just back up to the previous extent, if so.
5341 if (got.br_startoff > end &&
5342 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5347 * Is the last block of this extent before the range
5348 * we're supposed to delete? If so, we're done.
5350 end = XFS_FILEOFF_MIN(end,
5351 got.br_startoff + got.br_blockcount - 1);
5355 * Then deal with the (possibly delayed) allocated space
5359 wasdel = isnullstartblock(del.br_startblock);
5361 if (got.br_startoff < start) {
5362 del.br_startoff = start;
5363 del.br_blockcount -= start - got.br_startoff;
5365 del.br_startblock += start - got.br_startoff;
5367 if (del.br_startoff + del.br_blockcount > end + 1)
5368 del.br_blockcount = end + 1 - del.br_startoff;
5373 mod = xfs_rtb_to_rtxoff(mp,
5374 del.br_startblock + del.br_blockcount);
5377 * Realtime extent not lined up at the end.
5378 * The extent could have been split into written
5379 * and unwritten pieces, or we could just be
5380 * unmapping part of it. But we can't really
5381 * get rid of part of a realtime extent.
5383 if (del.br_state == XFS_EXT_UNWRITTEN) {
5385 * This piece is unwritten, or we're not
5386 * using unwritten extents. Skip over it.
5389 end -= mod > del.br_blockcount ?
5390 del.br_blockcount : mod;
5391 if (end < got.br_startoff &&
5392 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5399 * It's written, turn it unwritten.
5400 * This is better than zeroing it.
5402 ASSERT(del.br_state == XFS_EXT_NORM);
5403 ASSERT(tp->t_blk_res > 0);
5405 * If this spans a realtime extent boundary,
5406 * chop it back to the start of the one we end at.
5408 if (del.br_blockcount > mod) {
5409 del.br_startoff += del.br_blockcount - mod;
5410 del.br_startblock += del.br_blockcount - mod;
5411 del.br_blockcount = mod;
5413 del.br_state = XFS_EXT_UNWRITTEN;
5414 error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5415 whichfork, &icur, &cur, &del,
5422 mod = xfs_rtb_to_rtxoff(mp, del.br_startblock);
5424 xfs_extlen_t off = mp->m_sb.sb_rextsize - mod;
5427 * Realtime extent is lined up at the end but not
5428 * at the front. We'll get rid of full extents if
5431 if (del.br_blockcount > off) {
5432 del.br_blockcount -= off;
5433 del.br_startoff += off;
5434 del.br_startblock += off;
5435 } else if (del.br_startoff == start &&
5436 (del.br_state == XFS_EXT_UNWRITTEN ||
5437 tp->t_blk_res == 0)) {
5439 * Can't make it unwritten. There isn't
5440 * a full extent here so just skip it.
5442 ASSERT(end >= del.br_blockcount);
5443 end -= del.br_blockcount;
5444 if (got.br_startoff > end &&
5445 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5450 } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5451 struct xfs_bmbt_irec prev;
5452 xfs_fileoff_t unwrite_start;
5455 * This one is already unwritten.
5456 * It must have a written left neighbor.
5457 * Unwrite the killed part of that one and
5460 if (!xfs_iext_prev_extent(ifp, &icur, &prev))
5462 ASSERT(prev.br_state == XFS_EXT_NORM);
5463 ASSERT(!isnullstartblock(prev.br_startblock));
5464 ASSERT(del.br_startblock ==
5465 prev.br_startblock + prev.br_blockcount);
5466 unwrite_start = max3(start,
5467 del.br_startoff - mod,
5469 mod = unwrite_start - prev.br_startoff;
5470 prev.br_startoff = unwrite_start;
5471 prev.br_startblock += mod;
5472 prev.br_blockcount -= mod;
5473 prev.br_state = XFS_EXT_UNWRITTEN;
5474 error = xfs_bmap_add_extent_unwritten_real(tp,
5475 ip, whichfork, &icur, &cur,
5481 ASSERT(del.br_state == XFS_EXT_NORM);
5482 del.br_state = XFS_EXT_UNWRITTEN;
5483 error = xfs_bmap_add_extent_unwritten_real(tp,
5484 ip, whichfork, &icur, &cur,
5494 error = xfs_bmap_del_extent_delay(ip, whichfork, &icur,
5497 error = xfs_bmap_del_extent_real(ip, tp, &icur, cur,
5498 &del, &tmp_logflags, whichfork,
5500 logflags |= tmp_logflags;
5506 end = del.br_startoff - 1;
5509 * If not done go on to the next (previous) record.
5511 if (end != (xfs_fileoff_t)-1 && end >= start) {
5512 if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5513 (got.br_startoff > end &&
5514 !xfs_iext_prev_extent(ifp, &icur, &got))) {
5521 if (done || end == (xfs_fileoff_t)-1 || end < start)
5524 *rlen = end - start + 1;
5527 * Convert to a btree if necessary.
5529 if (xfs_bmap_needs_btree(ip, whichfork)) {
5530 ASSERT(cur == NULL);
5531 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
5532 &tmp_logflags, whichfork);
5533 logflags |= tmp_logflags;
5535 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags,
5541 * Log everything. Do this after conversion, there's no point in
5542 * logging the extent records if we've converted to btree format.
5544 if ((logflags & xfs_ilog_fext(whichfork)) &&
5545 ifp->if_format != XFS_DINODE_FMT_EXTENTS)
5546 logflags &= ~xfs_ilog_fext(whichfork);
5547 else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5548 ifp->if_format != XFS_DINODE_FMT_BTREE)
5549 logflags &= ~xfs_ilog_fbroot(whichfork);
5551 * Log inode even in the error case, if the transaction
5552 * is dirty we'll need to shut down the filesystem.
5555 xfs_trans_log_inode(tp, ip, logflags);
5558 cur->bc_ino.allocated = 0;
5559 xfs_btree_del_cursor(cur, error);
5564 /* Unmap a range of a file. */
5568 struct xfs_inode *ip,
5577 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts);
5583 * Determine whether an extent shift can be accomplished by a merge with the
5584 * extent that precedes the target hole of the shift.
5588 struct xfs_bmbt_irec *left, /* preceding extent */
5589 struct xfs_bmbt_irec *got, /* current extent to shift */
5590 xfs_fileoff_t shift) /* shift fsb */
5592 xfs_fileoff_t startoff;
5594 startoff = got->br_startoff - shift;
5597 * The extent, once shifted, must be adjacent in-file and on-disk with
5598 * the preceding extent.
5600 if ((left->br_startoff + left->br_blockcount != startoff) ||
5601 (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5602 (left->br_state != got->br_state) ||
5603 (left->br_blockcount + got->br_blockcount > XFS_MAX_BMBT_EXTLEN))
5610 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5611 * hole in the file. If an extent shift would result in the extent being fully
5612 * adjacent to the extent that currently precedes the hole, we can merge with
5613 * the preceding extent rather than do the shift.
5615 * This function assumes the caller has verified a shift-by-merge is possible
5616 * with the provided extents via xfs_bmse_can_merge().
5620 struct xfs_trans *tp,
5621 struct xfs_inode *ip,
5623 xfs_fileoff_t shift, /* shift fsb */
5624 struct xfs_iext_cursor *icur,
5625 struct xfs_bmbt_irec *got, /* extent to shift */
5626 struct xfs_bmbt_irec *left, /* preceding extent */
5627 struct xfs_btree_cur *cur,
5628 int *logflags) /* output */
5630 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
5631 struct xfs_bmbt_irec new;
5632 xfs_filblks_t blockcount;
5634 struct xfs_mount *mp = ip->i_mount;
5636 blockcount = left->br_blockcount + got->br_blockcount;
5638 xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
5639 ASSERT(xfs_bmse_can_merge(left, got, shift));
5642 new.br_blockcount = blockcount;
5645 * Update the on-disk extent count, the btree if necessary and log the
5649 *logflags |= XFS_ILOG_CORE;
5651 *logflags |= XFS_ILOG_DEXT;
5655 /* lookup and remove the extent to merge */
5656 error = xfs_bmbt_lookup_eq(cur, got, &i);
5659 if (XFS_IS_CORRUPT(mp, i != 1))
5660 return -EFSCORRUPTED;
5662 error = xfs_btree_delete(cur, &i);
5665 if (XFS_IS_CORRUPT(mp, i != 1))
5666 return -EFSCORRUPTED;
5668 /* lookup and update size of the previous extent */
5669 error = xfs_bmbt_lookup_eq(cur, left, &i);
5672 if (XFS_IS_CORRUPT(mp, i != 1))
5673 return -EFSCORRUPTED;
5675 error = xfs_bmbt_update(cur, &new);
5679 /* change to extent format if required after extent removal */
5680 error = xfs_bmap_btree_to_extents(tp, ip, cur, logflags, whichfork);
5685 xfs_iext_remove(ip, icur, 0);
5686 xfs_iext_prev(ifp, icur);
5687 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5690 /* update reverse mapping. rmap functions merge the rmaps for us */
5691 xfs_rmap_unmap_extent(tp, ip, whichfork, got);
5692 memcpy(&new, got, sizeof(new));
5693 new.br_startoff = left->br_startoff + left->br_blockcount;
5694 xfs_rmap_map_extent(tp, ip, whichfork, &new);
5699 xfs_bmap_shift_update_extent(
5700 struct xfs_trans *tp,
5701 struct xfs_inode *ip,
5703 struct xfs_iext_cursor *icur,
5704 struct xfs_bmbt_irec *got,
5705 struct xfs_btree_cur *cur,
5707 xfs_fileoff_t startoff)
5709 struct xfs_mount *mp = ip->i_mount;
5710 struct xfs_bmbt_irec prev = *got;
5713 *logflags |= XFS_ILOG_CORE;
5715 got->br_startoff = startoff;
5718 error = xfs_bmbt_lookup_eq(cur, &prev, &i);
5721 if (XFS_IS_CORRUPT(mp, i != 1))
5722 return -EFSCORRUPTED;
5724 error = xfs_bmbt_update(cur, got);
5728 *logflags |= XFS_ILOG_DEXT;
5731 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5734 /* update reverse mapping */
5735 xfs_rmap_unmap_extent(tp, ip, whichfork, &prev);
5736 xfs_rmap_map_extent(tp, ip, whichfork, got);
5741 xfs_bmap_collapse_extents(
5742 struct xfs_trans *tp,
5743 struct xfs_inode *ip,
5744 xfs_fileoff_t *next_fsb,
5745 xfs_fileoff_t offset_shift_fsb,
5748 int whichfork = XFS_DATA_FORK;
5749 struct xfs_mount *mp = ip->i_mount;
5750 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
5751 struct xfs_btree_cur *cur = NULL;
5752 struct xfs_bmbt_irec got, prev;
5753 struct xfs_iext_cursor icur;
5754 xfs_fileoff_t new_startoff;
5758 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5759 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
5760 return -EFSCORRUPTED;
5763 if (xfs_is_shutdown(mp))
5766 xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
5768 error = xfs_iread_extents(tp, ip, whichfork);
5772 if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
5773 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5774 cur->bc_ino.flags = 0;
5777 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5781 if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
5782 error = -EFSCORRUPTED;
5786 new_startoff = got.br_startoff - offset_shift_fsb;
5787 if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
5788 if (new_startoff < prev.br_startoff + prev.br_blockcount) {
5793 if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) {
5794 error = xfs_bmse_merge(tp, ip, whichfork,
5795 offset_shift_fsb, &icur, &got, &prev,
5802 if (got.br_startoff < offset_shift_fsb) {
5808 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
5809 cur, &logflags, new_startoff);
5814 if (!xfs_iext_next_extent(ifp, &icur, &got)) {
5819 *next_fsb = got.br_startoff;
5822 xfs_btree_del_cursor(cur, error);
5824 xfs_trans_log_inode(tp, ip, logflags);
5828 /* Make sure we won't be right-shifting an extent past the maximum bound. */
5830 xfs_bmap_can_insert_extents(
5831 struct xfs_inode *ip,
5833 xfs_fileoff_t shift)
5835 struct xfs_bmbt_irec got;
5839 xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL);
5841 if (xfs_is_shutdown(ip->i_mount))
5844 xfs_ilock(ip, XFS_ILOCK_EXCL);
5845 error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty);
5846 if (!error && !is_empty && got.br_startoff >= off &&
5847 ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff)
5849 xfs_iunlock(ip, XFS_ILOCK_EXCL);
5855 xfs_bmap_insert_extents(
5856 struct xfs_trans *tp,
5857 struct xfs_inode *ip,
5858 xfs_fileoff_t *next_fsb,
5859 xfs_fileoff_t offset_shift_fsb,
5861 xfs_fileoff_t stop_fsb)
5863 int whichfork = XFS_DATA_FORK;
5864 struct xfs_mount *mp = ip->i_mount;
5865 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
5866 struct xfs_btree_cur *cur = NULL;
5867 struct xfs_bmbt_irec got, next;
5868 struct xfs_iext_cursor icur;
5869 xfs_fileoff_t new_startoff;
5873 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5874 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
5875 return -EFSCORRUPTED;
5878 if (xfs_is_shutdown(mp))
5881 xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
5883 error = xfs_iread_extents(tp, ip, whichfork);
5887 if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
5888 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5889 cur->bc_ino.flags = 0;
5892 if (*next_fsb == NULLFSBLOCK) {
5893 xfs_iext_last(ifp, &icur);
5894 if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5895 stop_fsb > got.br_startoff) {
5900 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5905 if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
5906 error = -EFSCORRUPTED;
5910 if (XFS_IS_CORRUPT(mp, stop_fsb > got.br_startoff)) {
5911 error = -EFSCORRUPTED;
5915 new_startoff = got.br_startoff + offset_shift_fsb;
5916 if (xfs_iext_peek_next_extent(ifp, &icur, &next)) {
5917 if (new_startoff + got.br_blockcount > next.br_startoff) {
5923 * Unlike a left shift (which involves a hole punch), a right
5924 * shift does not modify extent neighbors in any way. We should
5925 * never find mergeable extents in this scenario. Check anyways
5926 * and warn if we encounter two extents that could be one.
5928 if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb))
5932 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
5933 cur, &logflags, new_startoff);
5937 if (!xfs_iext_prev_extent(ifp, &icur, &got) ||
5938 stop_fsb >= got.br_startoff + got.br_blockcount) {
5943 *next_fsb = got.br_startoff;
5946 xfs_btree_del_cursor(cur, error);
5948 xfs_trans_log_inode(tp, ip, logflags);
5953 * Splits an extent into two extents at split_fsb block such that it is the
5954 * first block of the current_ext. @ext is a target extent to be split.
5955 * @split_fsb is a block where the extents is split. If split_fsb lies in a
5956 * hole or the first block of extents, just return 0.
5959 xfs_bmap_split_extent(
5960 struct xfs_trans *tp,
5961 struct xfs_inode *ip,
5962 xfs_fileoff_t split_fsb)
5964 int whichfork = XFS_DATA_FORK;
5965 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
5966 struct xfs_btree_cur *cur = NULL;
5967 struct xfs_bmbt_irec got;
5968 struct xfs_bmbt_irec new; /* split extent */
5969 struct xfs_mount *mp = ip->i_mount;
5970 xfs_fsblock_t gotblkcnt; /* new block count for got */
5971 struct xfs_iext_cursor icur;
5976 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5977 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
5978 return -EFSCORRUPTED;
5981 if (xfs_is_shutdown(mp))
5984 /* Read in all the extents */
5985 error = xfs_iread_extents(tp, ip, whichfork);
5990 * If there are not extents, or split_fsb lies in a hole we are done.
5992 if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) ||
5993 got.br_startoff >= split_fsb)
5996 gotblkcnt = split_fsb - got.br_startoff;
5997 new.br_startoff = split_fsb;
5998 new.br_startblock = got.br_startblock + gotblkcnt;
5999 new.br_blockcount = got.br_blockcount - gotblkcnt;
6000 new.br_state = got.br_state;
6002 if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
6003 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6004 cur->bc_ino.flags = 0;
6005 error = xfs_bmbt_lookup_eq(cur, &got, &i);
6008 if (XFS_IS_CORRUPT(mp, i != 1)) {
6009 error = -EFSCORRUPTED;
6014 got.br_blockcount = gotblkcnt;
6015 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur,
6018 logflags = XFS_ILOG_CORE;
6020 error = xfs_bmbt_update(cur, &got);
6024 logflags |= XFS_ILOG_DEXT;
6026 /* Add new extent */
6027 xfs_iext_next(ifp, &icur);
6028 xfs_iext_insert(ip, &icur, &new, 0);
6032 error = xfs_bmbt_lookup_eq(cur, &new, &i);
6035 if (XFS_IS_CORRUPT(mp, i != 0)) {
6036 error = -EFSCORRUPTED;
6039 error = xfs_btree_insert(cur, &i);
6042 if (XFS_IS_CORRUPT(mp, i != 1)) {
6043 error = -EFSCORRUPTED;
6049 * Convert to a btree if necessary.
6051 if (xfs_bmap_needs_btree(ip, whichfork)) {
6052 int tmp_logflags; /* partial log flag return val */
6054 ASSERT(cur == NULL);
6055 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
6056 &tmp_logflags, whichfork);
6057 logflags |= tmp_logflags;
6062 cur->bc_ino.allocated = 0;
6063 xfs_btree_del_cursor(cur, error);
6067 xfs_trans_log_inode(tp, ip, logflags);
6071 /* Deferred mapping is only for real extents in the data fork. */
6073 xfs_bmap_is_update_needed(
6074 struct xfs_bmbt_irec *bmap)
6076 return bmap->br_startblock != HOLESTARTBLOCK &&
6077 bmap->br_startblock != DELAYSTARTBLOCK;
6080 /* Record a bmap intent. */
6083 struct xfs_trans *tp,
6084 enum xfs_bmap_intent_type type,
6085 struct xfs_inode *ip,
6087 struct xfs_bmbt_irec *bmap)
6089 struct xfs_bmap_intent *bi;
6091 trace_xfs_bmap_defer(tp->t_mountp,
6092 XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock),
6094 XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock),
6095 ip->i_ino, whichfork,
6097 bmap->br_blockcount,
6100 bi = kmem_cache_alloc(xfs_bmap_intent_cache, GFP_KERNEL | __GFP_NOFAIL);
6101 INIT_LIST_HEAD(&bi->bi_list);
6104 bi->bi_whichfork = whichfork;
6105 bi->bi_bmap = *bmap;
6107 xfs_bmap_update_get_group(tp->t_mountp, bi);
6108 xfs_defer_add(tp, &bi->bi_list, &xfs_bmap_update_defer_type);
6112 /* Map an extent into a file. */
6114 xfs_bmap_map_extent(
6115 struct xfs_trans *tp,
6116 struct xfs_inode *ip,
6117 struct xfs_bmbt_irec *PREV)
6119 if (!xfs_bmap_is_update_needed(PREV))
6122 __xfs_bmap_add(tp, XFS_BMAP_MAP, ip, XFS_DATA_FORK, PREV);
6125 /* Unmap an extent out of a file. */
6127 xfs_bmap_unmap_extent(
6128 struct xfs_trans *tp,
6129 struct xfs_inode *ip,
6130 struct xfs_bmbt_irec *PREV)
6132 if (!xfs_bmap_is_update_needed(PREV))
6135 __xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, XFS_DATA_FORK, PREV);
6139 * Process one of the deferred bmap operations. We pass back the
6140 * btree cursor to maintain our lock on the bmapbt between calls.
6143 xfs_bmap_finish_one(
6144 struct xfs_trans *tp,
6145 struct xfs_bmap_intent *bi)
6147 struct xfs_bmbt_irec *bmap = &bi->bi_bmap;
6150 ASSERT(tp->t_highest_agno == NULLAGNUMBER);
6152 trace_xfs_bmap_deferred(tp->t_mountp,
6153 XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock),
6155 XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock),
6156 bi->bi_owner->i_ino, bi->bi_whichfork,
6157 bmap->br_startoff, bmap->br_blockcount,
6160 if (WARN_ON_ONCE(bi->bi_whichfork != XFS_DATA_FORK))
6161 return -EFSCORRUPTED;
6163 if (XFS_TEST_ERROR(false, tp->t_mountp,
6164 XFS_ERRTAG_BMAP_FINISH_ONE))
6167 switch (bi->bi_type) {
6169 error = xfs_bmapi_remap(tp, bi->bi_owner, bmap->br_startoff,
6170 bmap->br_blockcount, bmap->br_startblock, 0);
6171 bmap->br_blockcount = 0;
6173 case XFS_BMAP_UNMAP:
6174 error = __xfs_bunmapi(tp, bi->bi_owner, bmap->br_startoff,
6175 &bmap->br_blockcount, XFS_BMAPI_REMAP, 1);
6179 error = -EFSCORRUPTED;
6185 /* Check that an extent does not have invalid flags or bad ranges. */
6187 xfs_bmap_validate_extent_raw(
6188 struct xfs_mount *mp,
6191 struct xfs_bmbt_irec *irec)
6193 if (!xfs_verify_fileext(mp, irec->br_startoff, irec->br_blockcount))
6194 return __this_address;
6196 if (rtfile && whichfork == XFS_DATA_FORK) {
6197 if (!xfs_verify_rtbext(mp, irec->br_startblock,
6198 irec->br_blockcount))
6199 return __this_address;
6201 if (!xfs_verify_fsbext(mp, irec->br_startblock,
6202 irec->br_blockcount))
6203 return __this_address;
6205 if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK)
6206 return __this_address;
6211 xfs_bmap_intent_init_cache(void)
6213 xfs_bmap_intent_cache = kmem_cache_create("xfs_bmap_intent",
6214 sizeof(struct xfs_bmap_intent),
6217 return xfs_bmap_intent_cache != NULL ? 0 : -ENOMEM;
6221 xfs_bmap_intent_destroy_cache(void)
6223 kmem_cache_destroy(xfs_bmap_intent_cache);
6224 xfs_bmap_intent_cache = NULL;
6227 /* Check that an inode's extent does not have invalid flags or bad ranges. */
6229 xfs_bmap_validate_extent(
6230 struct xfs_inode *ip,
6232 struct xfs_bmbt_irec *irec)
6234 return xfs_bmap_validate_extent_raw(ip->i_mount,
6235 XFS_IS_REALTIME_INODE(ip), whichfork, irec);
6239 * Used in xfs_itruncate_extents(). This is the maximum number of extents
6240 * freed from a file in a single transaction.
6242 #define XFS_ITRUNC_MAX_EXTENTS 2
6245 * Unmap every extent in part of an inode's fork. We don't do any higher level
6246 * invalidation work at all.
6250 struct xfs_trans **tpp,
6251 struct xfs_inode *ip,
6253 xfs_fileoff_t startoff,
6254 xfs_fileoff_t endoff)
6256 xfs_filblks_t unmap_len = endoff - startoff + 1;
6259 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
6261 while (unmap_len > 0) {
6262 ASSERT((*tpp)->t_highest_agno == NULLAGNUMBER);
6263 error = __xfs_bunmapi(*tpp, ip, startoff, &unmap_len, flags,
6264 XFS_ITRUNC_MAX_EXTENTS);
6268 /* free the just unmapped extents */
6269 error = xfs_defer_finish(tpp);