1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
17 #include "xfs_inode.h"
18 #include "xfs_btree.h"
19 #include "xfs_trans.h"
20 #include "xfs_alloc.h"
22 #include "xfs_bmap_util.h"
23 #include "xfs_bmap_btree.h"
24 #include "xfs_rtalloc.h"
25 #include "xfs_errortag.h"
26 #include "xfs_error.h"
27 #include "xfs_quota.h"
28 #include "xfs_trans_space.h"
29 #include "xfs_buf_item.h"
30 #include "xfs_trace.h"
31 #include "xfs_attr_leaf.h"
32 #include "xfs_filestream.h"
34 #include "xfs_ag_resv.h"
35 #include "xfs_refcount.h"
36 #include "xfs_icache.h"
37 #include "xfs_iomap.h"
40 kmem_zone_t *xfs_bmap_free_item_zone;
43 * Miscellaneous helper functions
47 * Compute and fill in the value of the maximum depth of a bmap btree
48 * in this filesystem. Done once, during mount.
51 xfs_bmap_compute_maxlevels(
52 xfs_mount_t *mp, /* file system mount structure */
53 int whichfork) /* data or attr fork */
55 int level; /* btree level */
56 uint maxblocks; /* max blocks at this level */
57 uint maxleafents; /* max leaf entries possible */
58 int maxrootrecs; /* max records in root block */
59 int minleafrecs; /* min records in leaf block */
60 int minnoderecs; /* min records in node block */
61 int sz; /* root block size */
64 * The maximum number of extents in a file, hence the maximum number of
65 * leaf entries, is controlled by the size of the on-disk extent count,
66 * either a signed 32-bit number for the data fork, or a signed 16-bit
67 * number for the attr fork.
69 * Note that we can no longer assume that if we are in ATTR1 that the
70 * fork offset of all the inodes will be
71 * (xfs_default_attroffset(ip) >> 3) because we could have mounted with
72 * ATTR2 and then mounted back with ATTR1, keeping the i_forkoff's fixed
73 * but probably at various positions. Therefore, for both ATTR1 and
74 * ATTR2 we have to assume the worst case scenario of a minimum size
77 if (whichfork == XFS_DATA_FORK) {
78 maxleafents = MAXEXTNUM;
79 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
81 maxleafents = MAXAEXTNUM;
82 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
84 maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
85 minleafrecs = mp->m_bmap_dmnr[0];
86 minnoderecs = mp->m_bmap_dmnr[1];
87 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
88 for (level = 1; maxblocks > 1; level++) {
89 if (maxblocks <= maxrootrecs)
92 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
94 mp->m_bm_maxlevels[whichfork] = level;
98 xfs_bmap_compute_attr_offset(
101 if (mp->m_sb.sb_inodesize == 256)
102 return XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
103 return XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
106 STATIC int /* error */
108 struct xfs_btree_cur *cur,
109 struct xfs_bmbt_irec *irec,
110 int *stat) /* success/failure */
112 cur->bc_rec.b = *irec;
113 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
116 STATIC int /* error */
117 xfs_bmbt_lookup_first(
118 struct xfs_btree_cur *cur,
119 int *stat) /* success/failure */
121 cur->bc_rec.b.br_startoff = 0;
122 cur->bc_rec.b.br_startblock = 0;
123 cur->bc_rec.b.br_blockcount = 0;
124 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
128 * Check if the inode needs to be converted to btree format.
130 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
132 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
134 return whichfork != XFS_COW_FORK &&
135 ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
136 ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork);
140 * Check if the inode should be converted to extent format.
142 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
144 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
146 return whichfork != XFS_COW_FORK &&
147 ifp->if_format == XFS_DINODE_FMT_BTREE &&
148 ifp->if_nextents <= XFS_IFORK_MAXEXT(ip, whichfork);
152 * Update the record referred to by cur to the value given by irec
153 * This either works (return 0) or gets an EFSCORRUPTED error.
157 struct xfs_btree_cur *cur,
158 struct xfs_bmbt_irec *irec)
160 union xfs_btree_rec rec;
162 xfs_bmbt_disk_set_all(&rec.bmbt, irec);
163 return xfs_btree_update(cur, &rec);
167 * Compute the worst-case number of indirect blocks that will be used
168 * for ip's delayed extent of length "len".
171 xfs_bmap_worst_indlen(
172 xfs_inode_t *ip, /* incore inode pointer */
173 xfs_filblks_t len) /* delayed extent length */
175 int level; /* btree level number */
176 int maxrecs; /* maximum record count at this level */
177 xfs_mount_t *mp; /* mount structure */
178 xfs_filblks_t rval; /* return value */
181 maxrecs = mp->m_bmap_dmxr[0];
182 for (level = 0, rval = 0;
183 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
186 do_div(len, maxrecs);
189 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
192 maxrecs = mp->m_bmap_dmxr[1];
198 * Calculate the default attribute fork offset for newly created inodes.
201 xfs_default_attroffset(
202 struct xfs_inode *ip)
204 if (ip->i_df.if_format == XFS_DINODE_FMT_DEV)
205 return roundup(sizeof(xfs_dev_t), 8);
206 return M_IGEO(ip->i_mount)->attr_fork_offset;
210 * Helper routine to reset inode i_forkoff field when switching attribute fork
211 * from local to extent format - we reset it where possible to make space
212 * available for inline data fork extents.
215 xfs_bmap_forkoff_reset(
219 if (whichfork == XFS_ATTR_FORK &&
220 ip->i_df.if_format != XFS_DINODE_FMT_DEV &&
221 ip->i_df.if_format != XFS_DINODE_FMT_BTREE) {
222 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
224 if (dfl_forkoff > ip->i_forkoff)
225 ip->i_forkoff = dfl_forkoff;
230 STATIC struct xfs_buf *
232 struct xfs_btree_cur *cur,
235 struct xfs_log_item *lip;
241 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
242 if (!cur->bc_bufs[i])
244 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
245 return cur->bc_bufs[i];
248 /* Chase down all the log items to see if the bp is there */
249 list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) {
250 struct xfs_buf_log_item *bip = (struct xfs_buf_log_item *)lip;
252 if (bip->bli_item.li_type == XFS_LI_BUF &&
253 XFS_BUF_ADDR(bip->bli_buf) == bno)
262 struct xfs_btree_block *block,
268 __be64 *pp, *thispa; /* pointer to block address */
269 xfs_bmbt_key_t *prevp, *keyp;
271 ASSERT(be16_to_cpu(block->bb_level) > 0);
274 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
275 dmxr = mp->m_bmap_dmxr[0];
276 keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
279 ASSERT(be64_to_cpu(prevp->br_startoff) <
280 be64_to_cpu(keyp->br_startoff));
285 * Compare the block numbers to see if there are dups.
288 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
290 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
292 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
294 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
296 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
297 if (*thispa == *pp) {
298 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld",
300 (unsigned long long)be64_to_cpu(*thispa));
301 xfs_err(mp, "%s: ptrs are equal in node\n",
303 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
310 * Check that the extents for the inode ip are in the right order in all
311 * btree leaves. THis becomes prohibitively expensive for large extent count
312 * files, so don't bother with inodes that have more than 10,000 extents in
313 * them. The btree record ordering checks will still be done, so for such large
314 * bmapbt constructs that is going to catch most corruptions.
317 xfs_bmap_check_leaf_extents(
318 xfs_btree_cur_t *cur, /* btree cursor or null */
319 xfs_inode_t *ip, /* incore inode pointer */
320 int whichfork) /* data or attr fork */
322 struct xfs_mount *mp = ip->i_mount;
323 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
324 struct xfs_btree_block *block; /* current btree block */
325 xfs_fsblock_t bno; /* block # of "block" */
326 struct xfs_buf *bp; /* buffer for "block" */
327 int error; /* error return value */
328 xfs_extnum_t i=0, j; /* index into the extents list */
329 int level; /* btree level, for checking */
330 __be64 *pp; /* pointer to block address */
331 xfs_bmbt_rec_t *ep; /* pointer to current extent */
332 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
333 xfs_bmbt_rec_t *nextp; /* pointer to next extent */
336 if (ifp->if_format != XFS_DINODE_FMT_BTREE)
339 /* skip large extent count inodes */
340 if (ip->i_df.if_nextents > 10000)
344 block = ifp->if_broot;
346 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
348 level = be16_to_cpu(block->bb_level);
350 xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
351 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
352 bno = be64_to_cpu(*pp);
354 ASSERT(bno != NULLFSBLOCK);
355 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
356 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
359 * Go down the tree until leaf level is reached, following the first
360 * pointer (leftmost) at each level.
362 while (level-- > 0) {
363 /* See if buf is in cur first */
365 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
368 error = xfs_btree_read_bufl(mp, NULL, bno, &bp,
374 block = XFS_BUF_TO_BLOCK(bp);
379 * Check this block for basic sanity (increasing keys and
380 * no duplicate blocks).
383 xfs_check_block(block, mp, 0, 0);
384 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
385 bno = be64_to_cpu(*pp);
386 if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, bno))) {
387 error = -EFSCORRUPTED;
392 xfs_trans_brelse(NULL, bp);
397 * Here with bp and block set to the leftmost leaf node in the tree.
402 * Loop over all leaf nodes checking that all extents are in the right order.
405 xfs_fsblock_t nextbno;
406 xfs_extnum_t num_recs;
409 num_recs = xfs_btree_get_numrecs(block);
412 * Read-ahead the next leaf block, if any.
415 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
418 * Check all the extents to make sure they are OK.
419 * If we had a previous block, the last entry should
420 * conform with the first entry in this one.
423 ep = XFS_BMBT_REC_ADDR(mp, block, 1);
425 ASSERT(xfs_bmbt_disk_get_startoff(&last) +
426 xfs_bmbt_disk_get_blockcount(&last) <=
427 xfs_bmbt_disk_get_startoff(ep));
429 for (j = 1; j < num_recs; j++) {
430 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
431 ASSERT(xfs_bmbt_disk_get_startoff(ep) +
432 xfs_bmbt_disk_get_blockcount(ep) <=
433 xfs_bmbt_disk_get_startoff(nextp));
441 xfs_trans_brelse(NULL, bp);
445 * If we've reached the end, stop.
447 if (bno == NULLFSBLOCK)
451 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
454 error = xfs_btree_read_bufl(mp, NULL, bno, &bp,
460 block = XFS_BUF_TO_BLOCK(bp);
466 xfs_warn(mp, "%s: at error0", __func__);
468 xfs_trans_brelse(NULL, bp);
470 xfs_warn(mp, "%s: BAD after btree leaves for %d extents",
472 xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__);
473 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
478 * Validate that the bmbt_irecs being returned from bmapi are valid
479 * given the caller's original parameters. Specifically check the
480 * ranges of the returned irecs to ensure that they only extend beyond
481 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
484 xfs_bmap_validate_ret(
488 xfs_bmbt_irec_t *mval,
492 int i; /* index to map values */
494 ASSERT(ret_nmap <= nmap);
496 for (i = 0; i < ret_nmap; i++) {
497 ASSERT(mval[i].br_blockcount > 0);
498 if (!(flags & XFS_BMAPI_ENTIRE)) {
499 ASSERT(mval[i].br_startoff >= bno);
500 ASSERT(mval[i].br_blockcount <= len);
501 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
504 ASSERT(mval[i].br_startoff < bno + len);
505 ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
509 mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
510 mval[i].br_startoff);
511 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
512 mval[i].br_startblock != HOLESTARTBLOCK);
513 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
514 mval[i].br_state == XFS_EXT_UNWRITTEN);
519 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
520 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
524 * bmap free list manipulation functions
528 * Add the extent to the list of extents to be free at transaction end.
529 * The list is maintained sorted (by block number).
533 struct xfs_trans *tp,
536 const struct xfs_owner_info *oinfo,
539 struct xfs_extent_free_item *new; /* new element */
541 struct xfs_mount *mp = tp->t_mountp;
545 ASSERT(bno != NULLFSBLOCK);
547 ASSERT(len <= MAXEXTLEN);
548 ASSERT(!isnullstartblock(bno));
549 agno = XFS_FSB_TO_AGNO(mp, bno);
550 agbno = XFS_FSB_TO_AGBNO(mp, bno);
551 ASSERT(agno < mp->m_sb.sb_agcount);
552 ASSERT(agbno < mp->m_sb.sb_agblocks);
553 ASSERT(len < mp->m_sb.sb_agblocks);
554 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
556 ASSERT(xfs_bmap_free_item_zone != NULL);
558 new = kmem_cache_alloc(xfs_bmap_free_item_zone,
559 GFP_KERNEL | __GFP_NOFAIL);
560 new->xefi_startblock = bno;
561 new->xefi_blockcount = (xfs_extlen_t)len;
563 new->xefi_oinfo = *oinfo;
565 new->xefi_oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
566 new->xefi_skip_discard = skip_discard;
567 trace_xfs_bmap_free_defer(tp->t_mountp,
568 XFS_FSB_TO_AGNO(tp->t_mountp, bno), 0,
569 XFS_FSB_TO_AGBNO(tp->t_mountp, bno), len);
570 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list);
574 * Inode fork format manipulation functions
578 * Convert the inode format to extent format if it currently is in btree format,
579 * but the extent list is small enough that it fits into the extent format.
581 * Since the extents are already in-core, all we have to do is give up the space
582 * for the btree root and pitch the leaf block.
584 STATIC int /* error */
585 xfs_bmap_btree_to_extents(
586 struct xfs_trans *tp, /* transaction pointer */
587 struct xfs_inode *ip, /* incore inode pointer */
588 struct xfs_btree_cur *cur, /* btree cursor */
589 int *logflagsp, /* inode logging flags */
590 int whichfork) /* data or attr fork */
592 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
593 struct xfs_mount *mp = ip->i_mount;
594 struct xfs_btree_block *rblock = ifp->if_broot;
595 struct xfs_btree_block *cblock;/* child btree block */
596 xfs_fsblock_t cbno; /* child block number */
597 struct xfs_buf *cbp; /* child block's buffer */
598 int error; /* error return value */
599 __be64 *pp; /* ptr to block address */
600 struct xfs_owner_info oinfo;
602 /* check if we actually need the extent format first: */
603 if (!xfs_bmap_wants_extents(ip, whichfork))
607 ASSERT(whichfork != XFS_COW_FORK);
608 ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
609 ASSERT(be16_to_cpu(rblock->bb_level) == 1);
610 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
611 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
613 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
614 cbno = be64_to_cpu(*pp);
616 if (XFS_IS_CORRUPT(cur->bc_mp, !xfs_btree_check_lptr(cur, cbno, 1)))
617 return -EFSCORRUPTED;
619 error = xfs_btree_read_bufl(mp, tp, cbno, &cbp, XFS_BMAP_BTREE_REF,
623 cblock = XFS_BUF_TO_BLOCK(cbp);
624 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
626 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
627 xfs_bmap_add_free(cur->bc_tp, cbno, 1, &oinfo);
629 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
630 xfs_trans_binval(tp, cbp);
631 if (cur->bc_bufs[0] == cbp)
632 cur->bc_bufs[0] = NULL;
633 xfs_iroot_realloc(ip, -1, whichfork);
634 ASSERT(ifp->if_broot == NULL);
635 ifp->if_format = XFS_DINODE_FMT_EXTENTS;
636 *logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
641 * Convert an extents-format file into a btree-format file.
642 * The new file will have a root block (in the inode) and a single child block.
644 STATIC int /* error */
645 xfs_bmap_extents_to_btree(
646 struct xfs_trans *tp, /* transaction pointer */
647 struct xfs_inode *ip, /* incore inode pointer */
648 struct xfs_btree_cur **curp, /* cursor returned to caller */
649 int wasdel, /* converting a delayed alloc */
650 int *logflagsp, /* inode logging flags */
651 int whichfork) /* data or attr fork */
653 struct xfs_btree_block *ablock; /* allocated (child) bt block */
654 struct xfs_buf *abp; /* buffer for ablock */
655 struct xfs_alloc_arg args; /* allocation arguments */
656 struct xfs_bmbt_rec *arp; /* child record pointer */
657 struct xfs_btree_block *block; /* btree root block */
658 struct xfs_btree_cur *cur; /* bmap btree cursor */
659 int error; /* error return value */
660 struct xfs_ifork *ifp; /* inode fork pointer */
661 struct xfs_bmbt_key *kp; /* root block key pointer */
662 struct xfs_mount *mp; /* mount structure */
663 xfs_bmbt_ptr_t *pp; /* root block address pointer */
664 struct xfs_iext_cursor icur;
665 struct xfs_bmbt_irec rec;
666 xfs_extnum_t cnt = 0;
669 ASSERT(whichfork != XFS_COW_FORK);
670 ifp = XFS_IFORK_PTR(ip, whichfork);
671 ASSERT(ifp->if_format == XFS_DINODE_FMT_EXTENTS);
674 * Make space in the inode incore. This needs to be undone if we fail
675 * to expand the root.
677 xfs_iroot_realloc(ip, 1, whichfork);
682 block = ifp->if_broot;
683 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
684 XFS_BTNUM_BMAP, 1, 1, ip->i_ino,
685 XFS_BTREE_LONG_PTRS);
687 * Need a cursor. Can't allocate until bb_level is filled in.
689 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
690 cur->bc_ino.flags = wasdel ? XFS_BTCUR_BMBT_WASDEL : 0;
692 * Convert to a btree with two levels, one record in root.
694 ifp->if_format = XFS_DINODE_FMT_BTREE;
695 memset(&args, 0, sizeof(args));
698 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
699 if (tp->t_firstblock == NULLFSBLOCK) {
700 args.type = XFS_ALLOCTYPE_START_BNO;
701 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
702 } else if (tp->t_flags & XFS_TRANS_LOWMODE) {
703 args.type = XFS_ALLOCTYPE_START_BNO;
704 args.fsbno = tp->t_firstblock;
706 args.type = XFS_ALLOCTYPE_NEAR_BNO;
707 args.fsbno = tp->t_firstblock;
709 args.minlen = args.maxlen = args.prod = 1;
710 args.wasdel = wasdel;
712 error = xfs_alloc_vextent(&args);
714 goto out_root_realloc;
716 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
718 goto out_root_realloc;
722 * Allocation can't fail, the space was reserved.
724 ASSERT(tp->t_firstblock == NULLFSBLOCK ||
725 args.agno >= XFS_FSB_TO_AGNO(mp, tp->t_firstblock));
726 tp->t_firstblock = args.fsbno;
727 cur->bc_ino.allocated++;
729 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
730 error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
731 XFS_FSB_TO_DADDR(mp, args.fsbno),
732 mp->m_bsize, 0, &abp);
734 goto out_unreserve_dquot;
737 * Fill in the child block.
739 abp->b_ops = &xfs_bmbt_buf_ops;
740 ablock = XFS_BUF_TO_BLOCK(abp);
741 xfs_btree_init_block_int(mp, ablock, abp->b_bn,
742 XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
743 XFS_BTREE_LONG_PTRS);
745 for_each_xfs_iext(ifp, &icur, &rec) {
746 if (isnullstartblock(rec.br_startblock))
748 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt);
749 xfs_bmbt_disk_set_all(arp, &rec);
752 ASSERT(cnt == ifp->if_nextents);
753 xfs_btree_set_numrecs(ablock, cnt);
756 * Fill in the root key and pointer.
758 kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
759 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
760 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
761 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
762 be16_to_cpu(block->bb_level)));
763 *pp = cpu_to_be64(args.fsbno);
766 * Do all this logging at the end so that
767 * the root is at the right level.
769 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
770 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
771 ASSERT(*curp == NULL);
773 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
777 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
779 xfs_iroot_realloc(ip, -1, whichfork);
780 ifp->if_format = XFS_DINODE_FMT_EXTENTS;
781 ASSERT(ifp->if_broot == NULL);
782 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
788 * Convert a local file to an extents file.
789 * This code is out of bounds for data forks of regular files,
790 * since the file data needs to get logged so things will stay consistent.
791 * (The bmap-level manipulations are ok, though).
794 xfs_bmap_local_to_extents_empty(
795 struct xfs_trans *tp,
796 struct xfs_inode *ip,
799 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
801 ASSERT(whichfork != XFS_COW_FORK);
802 ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
803 ASSERT(ifp->if_bytes == 0);
804 ASSERT(ifp->if_nextents == 0);
806 xfs_bmap_forkoff_reset(ip, whichfork);
807 ifp->if_u1.if_root = NULL;
809 ifp->if_format = XFS_DINODE_FMT_EXTENTS;
810 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
814 STATIC int /* error */
815 xfs_bmap_local_to_extents(
816 xfs_trans_t *tp, /* transaction pointer */
817 xfs_inode_t *ip, /* incore inode pointer */
818 xfs_extlen_t total, /* total blocks needed by transaction */
819 int *logflagsp, /* inode logging flags */
821 void (*init_fn)(struct xfs_trans *tp,
823 struct xfs_inode *ip,
824 struct xfs_ifork *ifp))
827 int flags; /* logging flags returned */
828 struct xfs_ifork *ifp; /* inode fork pointer */
829 xfs_alloc_arg_t args; /* allocation arguments */
830 struct xfs_buf *bp; /* buffer for extent block */
831 struct xfs_bmbt_irec rec;
832 struct xfs_iext_cursor icur;
835 * We don't want to deal with the case of keeping inode data inline yet.
836 * So sending the data fork of a regular inode is invalid.
838 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
839 ifp = XFS_IFORK_PTR(ip, whichfork);
840 ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
842 if (!ifp->if_bytes) {
843 xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
844 flags = XFS_ILOG_CORE;
850 memset(&args, 0, sizeof(args));
852 args.mp = ip->i_mount;
853 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
855 * Allocate a block. We know we need only one, since the
856 * file currently fits in an inode.
858 if (tp->t_firstblock == NULLFSBLOCK) {
859 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
860 args.type = XFS_ALLOCTYPE_START_BNO;
862 args.fsbno = tp->t_firstblock;
863 args.type = XFS_ALLOCTYPE_NEAR_BNO;
866 args.minlen = args.maxlen = args.prod = 1;
867 error = xfs_alloc_vextent(&args);
871 /* Can't fail, the space was reserved. */
872 ASSERT(args.fsbno != NULLFSBLOCK);
873 ASSERT(args.len == 1);
874 tp->t_firstblock = args.fsbno;
875 error = xfs_trans_get_buf(tp, args.mp->m_ddev_targp,
876 XFS_FSB_TO_DADDR(args.mp, args.fsbno),
877 args.mp->m_bsize, 0, &bp);
882 * Initialize the block, copy the data and log the remote buffer.
884 * The callout is responsible for logging because the remote format
885 * might differ from the local format and thus we don't know how much to
886 * log here. Note that init_fn must also set the buffer log item type
889 init_fn(tp, bp, ip, ifp);
891 /* account for the change in fork size */
892 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
893 xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
894 flags |= XFS_ILOG_CORE;
896 ifp->if_u1.if_root = NULL;
900 rec.br_startblock = args.fsbno;
901 rec.br_blockcount = 1;
902 rec.br_state = XFS_EXT_NORM;
903 xfs_iext_first(ifp, &icur);
904 xfs_iext_insert(ip, &icur, &rec, 0);
906 ifp->if_nextents = 1;
908 xfs_trans_mod_dquot_byino(tp, ip,
909 XFS_TRANS_DQ_BCOUNT, 1L);
910 flags |= xfs_ilog_fext(whichfork);
918 * Called from xfs_bmap_add_attrfork to handle btree format files.
920 STATIC int /* error */
921 xfs_bmap_add_attrfork_btree(
922 xfs_trans_t *tp, /* transaction pointer */
923 xfs_inode_t *ip, /* incore inode pointer */
924 int *flags) /* inode logging flags */
926 struct xfs_btree_block *block = ip->i_df.if_broot;
927 xfs_btree_cur_t *cur; /* btree cursor */
928 int error; /* error return value */
929 xfs_mount_t *mp; /* file system mount struct */
930 int stat; /* newroot status */
934 if (XFS_BMAP_BMDR_SPACE(block) <= XFS_IFORK_DSIZE(ip))
935 *flags |= XFS_ILOG_DBROOT;
937 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
938 error = xfs_bmbt_lookup_first(cur, &stat);
941 /* must be at least one entry */
942 if (XFS_IS_CORRUPT(mp, stat != 1)) {
943 error = -EFSCORRUPTED;
946 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
949 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
952 cur->bc_ino.allocated = 0;
953 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
957 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
962 * Called from xfs_bmap_add_attrfork to handle extents format files.
964 STATIC int /* error */
965 xfs_bmap_add_attrfork_extents(
966 struct xfs_trans *tp, /* transaction pointer */
967 struct xfs_inode *ip, /* incore inode pointer */
968 int *flags) /* inode logging flags */
970 xfs_btree_cur_t *cur; /* bmap btree cursor */
971 int error; /* error return value */
973 if (ip->i_df.if_nextents * sizeof(struct xfs_bmbt_rec) <=
977 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags,
980 cur->bc_ino.allocated = 0;
981 xfs_btree_del_cursor(cur, error);
987 * Called from xfs_bmap_add_attrfork to handle local format files. Each
988 * different data fork content type needs a different callout to do the
989 * conversion. Some are basic and only require special block initialisation
990 * callouts for the data formating, others (directories) are so specialised they
991 * handle everything themselves.
993 * XXX (dgc): investigate whether directory conversion can use the generic
994 * formatting callout. It should be possible - it's just a very complex
997 STATIC int /* error */
998 xfs_bmap_add_attrfork_local(
999 struct xfs_trans *tp, /* transaction pointer */
1000 struct xfs_inode *ip, /* incore inode pointer */
1001 int *flags) /* inode logging flags */
1003 struct xfs_da_args dargs; /* args for dir/attr code */
1005 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
1008 if (S_ISDIR(VFS_I(ip)->i_mode)) {
1009 memset(&dargs, 0, sizeof(dargs));
1010 dargs.geo = ip->i_mount->m_dir_geo;
1012 dargs.total = dargs.geo->fsbcount;
1013 dargs.whichfork = XFS_DATA_FORK;
1015 return xfs_dir2_sf_to_block(&dargs);
1018 if (S_ISLNK(VFS_I(ip)->i_mode))
1019 return xfs_bmap_local_to_extents(tp, ip, 1, flags,
1021 xfs_symlink_local_to_remote);
1023 /* should only be called for types that support local format data */
1025 return -EFSCORRUPTED;
1029 * Set an inode attr fork offset based on the format of the data fork.
1032 xfs_bmap_set_attrforkoff(
1033 struct xfs_inode *ip,
1037 int default_size = xfs_default_attroffset(ip) >> 3;
1039 switch (ip->i_df.if_format) {
1040 case XFS_DINODE_FMT_DEV:
1041 ip->i_forkoff = default_size;
1043 case XFS_DINODE_FMT_LOCAL:
1044 case XFS_DINODE_FMT_EXTENTS:
1045 case XFS_DINODE_FMT_BTREE:
1046 ip->i_forkoff = xfs_attr_shortform_bytesfit(ip, size);
1048 ip->i_forkoff = default_size;
1049 else if ((ip->i_mount->m_flags & XFS_MOUNT_ATTR2) && version)
1061 * Convert inode from non-attributed to attributed.
1062 * Must not be in a transaction, ip must not be locked.
1064 int /* error code */
1065 xfs_bmap_add_attrfork(
1066 xfs_inode_t *ip, /* incore inode pointer */
1067 int size, /* space new attribute needs */
1068 int rsvd) /* xact may use reserved blks */
1070 xfs_mount_t *mp; /* mount structure */
1071 xfs_trans_t *tp; /* transaction pointer */
1072 int blks; /* space reservation */
1073 int version = 1; /* superblock attr version */
1074 int logflags; /* logging flags */
1075 int error; /* error return value */
1077 ASSERT(XFS_IFORK_Q(ip) == 0);
1080 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1082 blks = XFS_ADDAFORK_SPACE_RES(mp);
1084 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_addafork, blks, 0,
1088 if (XFS_IFORK_Q(ip))
1091 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1092 error = xfs_bmap_set_attrforkoff(ip, size, &version);
1095 ASSERT(ip->i_afp == NULL);
1097 ip->i_afp = xfs_ifork_alloc(XFS_DINODE_FMT_EXTENTS, 0);
1099 switch (ip->i_df.if_format) {
1100 case XFS_DINODE_FMT_LOCAL:
1101 error = xfs_bmap_add_attrfork_local(tp, ip, &logflags);
1103 case XFS_DINODE_FMT_EXTENTS:
1104 error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags);
1106 case XFS_DINODE_FMT_BTREE:
1107 error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags);
1114 xfs_trans_log_inode(tp, ip, logflags);
1117 if (!xfs_sb_version_hasattr(&mp->m_sb) ||
1118 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
1119 bool log_sb = false;
1121 spin_lock(&mp->m_sb_lock);
1122 if (!xfs_sb_version_hasattr(&mp->m_sb)) {
1123 xfs_sb_version_addattr(&mp->m_sb);
1126 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
1127 xfs_sb_version_addattr2(&mp->m_sb);
1130 spin_unlock(&mp->m_sb_lock);
1135 error = xfs_trans_commit(tp);
1136 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1140 xfs_trans_cancel(tp);
1141 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1146 * Internal and external extent tree search functions.
1149 struct xfs_iread_state {
1150 struct xfs_iext_cursor icur;
1151 xfs_extnum_t loaded;
1154 /* Stuff every bmbt record from this block into the incore extent map. */
1156 xfs_iread_bmbt_block(
1157 struct xfs_btree_cur *cur,
1161 struct xfs_iread_state *ir = priv;
1162 struct xfs_mount *mp = cur->bc_mp;
1163 struct xfs_inode *ip = cur->bc_ino.ip;
1164 struct xfs_btree_block *block;
1166 struct xfs_bmbt_rec *frp;
1167 xfs_extnum_t num_recs;
1169 int whichfork = cur->bc_ino.whichfork;
1170 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1172 block = xfs_btree_get_block(cur, level, &bp);
1174 /* Abort if we find more records than nextents. */
1175 num_recs = xfs_btree_get_numrecs(block);
1176 if (unlikely(ir->loaded + num_recs > ifp->if_nextents)) {
1177 xfs_warn(ip->i_mount, "corrupt dinode %llu, (btree extents).",
1178 (unsigned long long)ip->i_ino);
1179 xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, block,
1180 sizeof(*block), __this_address);
1181 return -EFSCORRUPTED;
1184 /* Copy records into the incore cache. */
1185 frp = XFS_BMBT_REC_ADDR(mp, block, 1);
1186 for (j = 0; j < num_recs; j++, frp++, ir->loaded++) {
1187 struct xfs_bmbt_irec new;
1190 xfs_bmbt_disk_get_all(frp, &new);
1191 fa = xfs_bmap_validate_extent(ip, whichfork, &new);
1193 xfs_inode_verifier_error(ip, -EFSCORRUPTED,
1194 "xfs_iread_extents(2)", frp,
1196 return -EFSCORRUPTED;
1198 xfs_iext_insert(ip, &ir->icur, &new,
1199 xfs_bmap_fork_to_state(whichfork));
1200 trace_xfs_read_extent(ip, &ir->icur,
1201 xfs_bmap_fork_to_state(whichfork), _THIS_IP_);
1202 xfs_iext_next(ifp, &ir->icur);
1209 * Read in extents from a btree-format inode.
1213 struct xfs_trans *tp,
1214 struct xfs_inode *ip,
1217 struct xfs_iread_state ir;
1218 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1219 struct xfs_mount *mp = ip->i_mount;
1220 struct xfs_btree_cur *cur;
1223 if (!xfs_need_iread_extents(ifp))
1226 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1229 xfs_iext_first(ifp, &ir.icur);
1230 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
1231 error = xfs_btree_visit_blocks(cur, xfs_iread_bmbt_block,
1232 XFS_BTREE_VISIT_RECORDS, &ir);
1233 xfs_btree_del_cursor(cur, error);
1237 if (XFS_IS_CORRUPT(mp, ir.loaded != ifp->if_nextents)) {
1238 error = -EFSCORRUPTED;
1241 ASSERT(ir.loaded == xfs_iext_count(ifp));
1244 xfs_iext_destroy(ifp);
1249 * Returns the relative block number of the first unused block(s) in the given
1250 * fork with at least "len" logically contiguous blocks free. This is the
1251 * lowest-address hole if the fork has holes, else the first block past the end
1252 * of fork. Return 0 if the fork is currently local (in-inode).
1255 xfs_bmap_first_unused(
1256 struct xfs_trans *tp, /* transaction pointer */
1257 struct xfs_inode *ip, /* incore inode */
1258 xfs_extlen_t len, /* size of hole to find */
1259 xfs_fileoff_t *first_unused, /* unused block */
1260 int whichfork) /* data or attr fork */
1262 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1263 struct xfs_bmbt_irec got;
1264 struct xfs_iext_cursor icur;
1265 xfs_fileoff_t lastaddr = 0;
1266 xfs_fileoff_t lowest, max;
1269 if (ifp->if_format == XFS_DINODE_FMT_LOCAL) {
1274 ASSERT(xfs_ifork_has_extents(ifp));
1276 error = xfs_iread_extents(tp, ip, whichfork);
1280 lowest = max = *first_unused;
1281 for_each_xfs_iext(ifp, &icur, &got) {
1283 * See if the hole before this extent will work.
1285 if (got.br_startoff >= lowest + len &&
1286 got.br_startoff - max >= len)
1288 lastaddr = got.br_startoff + got.br_blockcount;
1289 max = XFS_FILEOFF_MAX(lastaddr, lowest);
1292 *first_unused = max;
1297 * Returns the file-relative block number of the last block - 1 before
1298 * last_block (input value) in the file.
1299 * This is not based on i_size, it is based on the extent records.
1300 * Returns 0 for local files, as they do not have extent records.
1303 xfs_bmap_last_before(
1304 struct xfs_trans *tp, /* transaction pointer */
1305 struct xfs_inode *ip, /* incore inode */
1306 xfs_fileoff_t *last_block, /* last block */
1307 int whichfork) /* data or attr fork */
1309 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1310 struct xfs_bmbt_irec got;
1311 struct xfs_iext_cursor icur;
1314 switch (ifp->if_format) {
1315 case XFS_DINODE_FMT_LOCAL:
1318 case XFS_DINODE_FMT_BTREE:
1319 case XFS_DINODE_FMT_EXTENTS:
1323 return -EFSCORRUPTED;
1326 error = xfs_iread_extents(tp, ip, whichfork);
1330 if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got))
1336 xfs_bmap_last_extent(
1337 struct xfs_trans *tp,
1338 struct xfs_inode *ip,
1340 struct xfs_bmbt_irec *rec,
1343 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1344 struct xfs_iext_cursor icur;
1347 error = xfs_iread_extents(tp, ip, whichfork);
1351 xfs_iext_last(ifp, &icur);
1352 if (!xfs_iext_get_extent(ifp, &icur, rec))
1360 * Check the last inode extent to determine whether this allocation will result
1361 * in blocks being allocated at the end of the file. When we allocate new data
1362 * blocks at the end of the file which do not start at the previous data block,
1363 * we will try to align the new blocks at stripe unit boundaries.
1365 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1366 * at, or past the EOF.
1370 struct xfs_bmalloca *bma,
1373 struct xfs_bmbt_irec rec;
1378 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1389 * Check if we are allocation or past the last extent, or at least into
1390 * the last delayed allocated extent.
1392 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1393 (bma->offset >= rec.br_startoff &&
1394 isnullstartblock(rec.br_startblock));
1399 * Returns the file-relative block number of the first block past eof in
1400 * the file. This is not based on i_size, it is based on the extent records.
1401 * Returns 0 for local files, as they do not have extent records.
1404 xfs_bmap_last_offset(
1405 struct xfs_inode *ip,
1406 xfs_fileoff_t *last_block,
1409 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1410 struct xfs_bmbt_irec rec;
1416 if (ifp->if_format == XFS_DINODE_FMT_LOCAL)
1419 if (XFS_IS_CORRUPT(ip->i_mount, !xfs_ifork_has_extents(ifp)))
1420 return -EFSCORRUPTED;
1422 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1423 if (error || is_empty)
1426 *last_block = rec.br_startoff + rec.br_blockcount;
1431 * Extent tree manipulation functions used during allocation.
1435 * Convert a delayed allocation to a real allocation.
1437 STATIC int /* error */
1438 xfs_bmap_add_extent_delay_real(
1439 struct xfs_bmalloca *bma,
1442 struct xfs_mount *mp = bma->ip->i_mount;
1443 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
1444 struct xfs_bmbt_irec *new = &bma->got;
1445 int error; /* error return value */
1446 int i; /* temp state */
1447 xfs_fileoff_t new_endoff; /* end offset of new entry */
1448 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
1449 /* left is 0, right is 1, prev is 2 */
1450 int rval=0; /* return value (logging flags) */
1451 int state = xfs_bmap_fork_to_state(whichfork);
1452 xfs_filblks_t da_new; /* new count del alloc blocks used */
1453 xfs_filblks_t da_old; /* old count del alloc blocks used */
1454 xfs_filblks_t temp=0; /* value for da_new calculations */
1455 int tmp_rval; /* partial logging flags */
1456 struct xfs_bmbt_irec old;
1458 ASSERT(whichfork != XFS_ATTR_FORK);
1459 ASSERT(!isnullstartblock(new->br_startblock));
1461 (bma->cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL));
1463 XFS_STATS_INC(mp, xs_add_exlist);
1470 * Set up a bunch of variables to make the tests simpler.
1472 xfs_iext_get_extent(ifp, &bma->icur, &PREV);
1473 new_endoff = new->br_startoff + new->br_blockcount;
1474 ASSERT(isnullstartblock(PREV.br_startblock));
1475 ASSERT(PREV.br_startoff <= new->br_startoff);
1476 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1478 da_old = startblockval(PREV.br_startblock);
1482 * Set flags determining what part of the previous delayed allocation
1483 * extent is being replaced by a real allocation.
1485 if (PREV.br_startoff == new->br_startoff)
1486 state |= BMAP_LEFT_FILLING;
1487 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1488 state |= BMAP_RIGHT_FILLING;
1491 * Check and set flags if this segment has a left neighbor.
1492 * Don't set contiguous if the combined extent would be too large.
1494 if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) {
1495 state |= BMAP_LEFT_VALID;
1496 if (isnullstartblock(LEFT.br_startblock))
1497 state |= BMAP_LEFT_DELAY;
1500 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1501 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1502 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1503 LEFT.br_state == new->br_state &&
1504 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1505 state |= BMAP_LEFT_CONTIG;
1508 * Check and set flags if this segment has a right neighbor.
1509 * Don't set contiguous if the combined extent would be too large.
1510 * Also check for all-three-contiguous being too large.
1512 if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) {
1513 state |= BMAP_RIGHT_VALID;
1514 if (isnullstartblock(RIGHT.br_startblock))
1515 state |= BMAP_RIGHT_DELAY;
1518 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1519 new_endoff == RIGHT.br_startoff &&
1520 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1521 new->br_state == RIGHT.br_state &&
1522 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1523 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1524 BMAP_RIGHT_FILLING)) !=
1525 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1526 BMAP_RIGHT_FILLING) ||
1527 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1529 state |= BMAP_RIGHT_CONTIG;
1533 * Switch out based on the FILLING and CONTIG state bits.
1535 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1536 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1537 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1538 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1540 * Filling in all of a previously delayed allocation extent.
1541 * The left and right neighbors are both contiguous with new.
1543 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
1545 xfs_iext_remove(bma->ip, &bma->icur, state);
1546 xfs_iext_remove(bma->ip, &bma->icur, state);
1547 xfs_iext_prev(ifp, &bma->icur);
1548 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1551 if (bma->cur == NULL)
1552 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1554 rval = XFS_ILOG_CORE;
1555 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1558 if (XFS_IS_CORRUPT(mp, i != 1)) {
1559 error = -EFSCORRUPTED;
1562 error = xfs_btree_delete(bma->cur, &i);
1565 if (XFS_IS_CORRUPT(mp, i != 1)) {
1566 error = -EFSCORRUPTED;
1569 error = xfs_btree_decrement(bma->cur, 0, &i);
1572 if (XFS_IS_CORRUPT(mp, i != 1)) {
1573 error = -EFSCORRUPTED;
1576 error = xfs_bmbt_update(bma->cur, &LEFT);
1582 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1584 * Filling in all of a previously delayed allocation extent.
1585 * The left neighbor is contiguous, the right is not.
1588 LEFT.br_blockcount += PREV.br_blockcount;
1590 xfs_iext_remove(bma->ip, &bma->icur, state);
1591 xfs_iext_prev(ifp, &bma->icur);
1592 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1594 if (bma->cur == NULL)
1595 rval = XFS_ILOG_DEXT;
1598 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1601 if (XFS_IS_CORRUPT(mp, i != 1)) {
1602 error = -EFSCORRUPTED;
1605 error = xfs_bmbt_update(bma->cur, &LEFT);
1611 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1613 * Filling in all of a previously delayed allocation extent.
1614 * The right neighbor is contiguous, the left is not. Take care
1615 * with delay -> unwritten extent allocation here because the
1616 * delalloc record we are overwriting is always written.
1618 PREV.br_startblock = new->br_startblock;
1619 PREV.br_blockcount += RIGHT.br_blockcount;
1620 PREV.br_state = new->br_state;
1622 xfs_iext_next(ifp, &bma->icur);
1623 xfs_iext_remove(bma->ip, &bma->icur, state);
1624 xfs_iext_prev(ifp, &bma->icur);
1625 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1627 if (bma->cur == NULL)
1628 rval = XFS_ILOG_DEXT;
1631 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1634 if (XFS_IS_CORRUPT(mp, i != 1)) {
1635 error = -EFSCORRUPTED;
1638 error = xfs_bmbt_update(bma->cur, &PREV);
1644 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1646 * Filling in all of a previously delayed allocation extent.
1647 * Neither the left nor right neighbors are contiguous with
1650 PREV.br_startblock = new->br_startblock;
1651 PREV.br_state = new->br_state;
1652 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1655 if (bma->cur == NULL)
1656 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1658 rval = XFS_ILOG_CORE;
1659 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1662 if (XFS_IS_CORRUPT(mp, i != 0)) {
1663 error = -EFSCORRUPTED;
1666 error = xfs_btree_insert(bma->cur, &i);
1669 if (XFS_IS_CORRUPT(mp, i != 1)) {
1670 error = -EFSCORRUPTED;
1676 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1678 * Filling in the first part of a previous delayed allocation.
1679 * The left neighbor is contiguous.
1682 temp = PREV.br_blockcount - new->br_blockcount;
1683 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1684 startblockval(PREV.br_startblock));
1686 LEFT.br_blockcount += new->br_blockcount;
1688 PREV.br_blockcount = temp;
1689 PREV.br_startoff += new->br_blockcount;
1690 PREV.br_startblock = nullstartblock(da_new);
1692 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1693 xfs_iext_prev(ifp, &bma->icur);
1694 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1696 if (bma->cur == NULL)
1697 rval = XFS_ILOG_DEXT;
1700 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1703 if (XFS_IS_CORRUPT(mp, i != 1)) {
1704 error = -EFSCORRUPTED;
1707 error = xfs_bmbt_update(bma->cur, &LEFT);
1713 case BMAP_LEFT_FILLING:
1715 * Filling in the first part of a previous delayed allocation.
1716 * The left neighbor is not contiguous.
1718 xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1721 if (bma->cur == NULL)
1722 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1724 rval = XFS_ILOG_CORE;
1725 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1728 if (XFS_IS_CORRUPT(mp, i != 0)) {
1729 error = -EFSCORRUPTED;
1732 error = xfs_btree_insert(bma->cur, &i);
1735 if (XFS_IS_CORRUPT(mp, i != 1)) {
1736 error = -EFSCORRUPTED;
1741 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1742 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1743 &bma->cur, 1, &tmp_rval, whichfork);
1749 temp = PREV.br_blockcount - new->br_blockcount;
1750 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1751 startblockval(PREV.br_startblock) -
1752 (bma->cur ? bma->cur->bc_ino.allocated : 0));
1754 PREV.br_startoff = new_endoff;
1755 PREV.br_blockcount = temp;
1756 PREV.br_startblock = nullstartblock(da_new);
1757 xfs_iext_next(ifp, &bma->icur);
1758 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1759 xfs_iext_prev(ifp, &bma->icur);
1762 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1764 * Filling in the last part of a previous delayed allocation.
1765 * The right neighbor is contiguous with the new allocation.
1768 RIGHT.br_startoff = new->br_startoff;
1769 RIGHT.br_startblock = new->br_startblock;
1770 RIGHT.br_blockcount += new->br_blockcount;
1772 if (bma->cur == NULL)
1773 rval = XFS_ILOG_DEXT;
1776 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1779 if (XFS_IS_CORRUPT(mp, i != 1)) {
1780 error = -EFSCORRUPTED;
1783 error = xfs_bmbt_update(bma->cur, &RIGHT);
1788 temp = PREV.br_blockcount - new->br_blockcount;
1789 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1790 startblockval(PREV.br_startblock));
1792 PREV.br_blockcount = temp;
1793 PREV.br_startblock = nullstartblock(da_new);
1795 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1796 xfs_iext_next(ifp, &bma->icur);
1797 xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT);
1800 case BMAP_RIGHT_FILLING:
1802 * Filling in the last part of a previous delayed allocation.
1803 * The right neighbor is not contiguous.
1805 xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1808 if (bma->cur == NULL)
1809 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1811 rval = XFS_ILOG_CORE;
1812 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1815 if (XFS_IS_CORRUPT(mp, i != 0)) {
1816 error = -EFSCORRUPTED;
1819 error = xfs_btree_insert(bma->cur, &i);
1822 if (XFS_IS_CORRUPT(mp, i != 1)) {
1823 error = -EFSCORRUPTED;
1828 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1829 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1830 &bma->cur, 1, &tmp_rval, whichfork);
1836 temp = PREV.br_blockcount - new->br_blockcount;
1837 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1838 startblockval(PREV.br_startblock) -
1839 (bma->cur ? bma->cur->bc_ino.allocated : 0));
1841 PREV.br_startblock = nullstartblock(da_new);
1842 PREV.br_blockcount = temp;
1843 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1844 xfs_iext_next(ifp, &bma->icur);
1849 * Filling in the middle part of a previous delayed allocation.
1850 * Contiguity is impossible here.
1851 * This case is avoided almost all the time.
1853 * We start with a delayed allocation:
1855 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
1858 * and we are allocating:
1859 * +rrrrrrrrrrrrrrrrr+
1862 * and we set it up for insertion as:
1863 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
1865 * PREV @ idx LEFT RIGHT
1866 * inserted at idx + 1
1870 /* LEFT is the new middle */
1873 /* RIGHT is the new right */
1874 RIGHT.br_state = PREV.br_state;
1875 RIGHT.br_startoff = new_endoff;
1876 RIGHT.br_blockcount =
1877 PREV.br_startoff + PREV.br_blockcount - new_endoff;
1878 RIGHT.br_startblock =
1879 nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1880 RIGHT.br_blockcount));
1883 PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
1884 PREV.br_startblock =
1885 nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1886 PREV.br_blockcount));
1887 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1889 xfs_iext_next(ifp, &bma->icur);
1890 xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state);
1891 xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state);
1894 if (bma->cur == NULL)
1895 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1897 rval = XFS_ILOG_CORE;
1898 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1901 if (XFS_IS_CORRUPT(mp, i != 0)) {
1902 error = -EFSCORRUPTED;
1905 error = xfs_btree_insert(bma->cur, &i);
1908 if (XFS_IS_CORRUPT(mp, i != 1)) {
1909 error = -EFSCORRUPTED;
1914 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1915 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1916 &bma->cur, 1, &tmp_rval, whichfork);
1922 da_new = startblockval(PREV.br_startblock) +
1923 startblockval(RIGHT.br_startblock);
1926 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1927 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1928 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1929 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1930 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1931 case BMAP_LEFT_CONTIG:
1932 case BMAP_RIGHT_CONTIG:
1934 * These cases are all impossible.
1939 /* add reverse mapping unless caller opted out */
1940 if (!(bma->flags & XFS_BMAPI_NORMAP))
1941 xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new);
1943 /* convert to a btree if necessary */
1944 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1945 int tmp_logflags; /* partial log flag return val */
1947 ASSERT(bma->cur == NULL);
1948 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1949 &bma->cur, da_old > 0, &tmp_logflags,
1951 bma->logflags |= tmp_logflags;
1956 if (da_new != da_old)
1957 xfs_mod_delalloc(mp, (int64_t)da_new - da_old);
1960 da_new += bma->cur->bc_ino.allocated;
1961 bma->cur->bc_ino.allocated = 0;
1964 /* adjust for changes in reserved delayed indirect blocks */
1965 if (da_new != da_old) {
1966 ASSERT(state == 0 || da_new < da_old);
1967 error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new),
1971 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
1973 if (whichfork != XFS_COW_FORK)
1974 bma->logflags |= rval;
1982 * Convert an unwritten allocation to a real allocation or vice versa.
1985 xfs_bmap_add_extent_unwritten_real(
1986 struct xfs_trans *tp,
1987 xfs_inode_t *ip, /* incore inode pointer */
1989 struct xfs_iext_cursor *icur,
1990 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
1991 xfs_bmbt_irec_t *new, /* new data to add to file extents */
1992 int *logflagsp) /* inode logging flags */
1994 xfs_btree_cur_t *cur; /* btree cursor */
1995 int error; /* error return value */
1996 int i; /* temp state */
1997 struct xfs_ifork *ifp; /* inode fork pointer */
1998 xfs_fileoff_t new_endoff; /* end offset of new entry */
1999 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
2000 /* left is 0, right is 1, prev is 2 */
2001 int rval=0; /* return value (logging flags) */
2002 int state = xfs_bmap_fork_to_state(whichfork);
2003 struct xfs_mount *mp = ip->i_mount;
2004 struct xfs_bmbt_irec old;
2009 ifp = XFS_IFORK_PTR(ip, whichfork);
2011 ASSERT(!isnullstartblock(new->br_startblock));
2013 XFS_STATS_INC(mp, xs_add_exlist);
2020 * Set up a bunch of variables to make the tests simpler.
2023 xfs_iext_get_extent(ifp, icur, &PREV);
2024 ASSERT(new->br_state != PREV.br_state);
2025 new_endoff = new->br_startoff + new->br_blockcount;
2026 ASSERT(PREV.br_startoff <= new->br_startoff);
2027 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
2030 * Set flags determining what part of the previous oldext allocation
2031 * extent is being replaced by a newext allocation.
2033 if (PREV.br_startoff == new->br_startoff)
2034 state |= BMAP_LEFT_FILLING;
2035 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2036 state |= BMAP_RIGHT_FILLING;
2039 * Check and set flags if this segment has a left neighbor.
2040 * Don't set contiguous if the combined extent would be too large.
2042 if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) {
2043 state |= BMAP_LEFT_VALID;
2044 if (isnullstartblock(LEFT.br_startblock))
2045 state |= BMAP_LEFT_DELAY;
2048 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2049 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2050 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2051 LEFT.br_state == new->br_state &&
2052 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2053 state |= BMAP_LEFT_CONTIG;
2056 * Check and set flags if this segment has a right neighbor.
2057 * Don't set contiguous if the combined extent would be too large.
2058 * Also check for all-three-contiguous being too large.
2060 if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) {
2061 state |= BMAP_RIGHT_VALID;
2062 if (isnullstartblock(RIGHT.br_startblock))
2063 state |= BMAP_RIGHT_DELAY;
2066 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2067 new_endoff == RIGHT.br_startoff &&
2068 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2069 new->br_state == RIGHT.br_state &&
2070 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
2071 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2072 BMAP_RIGHT_FILLING)) !=
2073 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2074 BMAP_RIGHT_FILLING) ||
2075 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2077 state |= BMAP_RIGHT_CONTIG;
2080 * Switch out based on the FILLING and CONTIG state bits.
2082 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2083 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2084 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2085 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2087 * Setting all of a previous oldext extent to newext.
2088 * The left and right neighbors are both contiguous with new.
2090 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
2092 xfs_iext_remove(ip, icur, state);
2093 xfs_iext_remove(ip, icur, state);
2094 xfs_iext_prev(ifp, icur);
2095 xfs_iext_update_extent(ip, state, icur, &LEFT);
2096 ifp->if_nextents -= 2;
2098 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2100 rval = XFS_ILOG_CORE;
2101 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2104 if (XFS_IS_CORRUPT(mp, i != 1)) {
2105 error = -EFSCORRUPTED;
2108 if ((error = xfs_btree_delete(cur, &i)))
2110 if (XFS_IS_CORRUPT(mp, i != 1)) {
2111 error = -EFSCORRUPTED;
2114 if ((error = xfs_btree_decrement(cur, 0, &i)))
2116 if (XFS_IS_CORRUPT(mp, i != 1)) {
2117 error = -EFSCORRUPTED;
2120 if ((error = xfs_btree_delete(cur, &i)))
2122 if (XFS_IS_CORRUPT(mp, i != 1)) {
2123 error = -EFSCORRUPTED;
2126 if ((error = xfs_btree_decrement(cur, 0, &i)))
2128 if (XFS_IS_CORRUPT(mp, i != 1)) {
2129 error = -EFSCORRUPTED;
2132 error = xfs_bmbt_update(cur, &LEFT);
2138 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2140 * Setting all of a previous oldext extent to newext.
2141 * The left neighbor is contiguous, the right is not.
2143 LEFT.br_blockcount += PREV.br_blockcount;
2145 xfs_iext_remove(ip, icur, state);
2146 xfs_iext_prev(ifp, icur);
2147 xfs_iext_update_extent(ip, state, icur, &LEFT);
2150 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2152 rval = XFS_ILOG_CORE;
2153 error = xfs_bmbt_lookup_eq(cur, &PREV, &i);
2156 if (XFS_IS_CORRUPT(mp, i != 1)) {
2157 error = -EFSCORRUPTED;
2160 if ((error = xfs_btree_delete(cur, &i)))
2162 if (XFS_IS_CORRUPT(mp, i != 1)) {
2163 error = -EFSCORRUPTED;
2166 if ((error = xfs_btree_decrement(cur, 0, &i)))
2168 if (XFS_IS_CORRUPT(mp, i != 1)) {
2169 error = -EFSCORRUPTED;
2172 error = xfs_bmbt_update(cur, &LEFT);
2178 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2180 * Setting all of a previous oldext extent to newext.
2181 * The right neighbor is contiguous, the left is not.
2183 PREV.br_blockcount += RIGHT.br_blockcount;
2184 PREV.br_state = new->br_state;
2186 xfs_iext_next(ifp, icur);
2187 xfs_iext_remove(ip, icur, state);
2188 xfs_iext_prev(ifp, icur);
2189 xfs_iext_update_extent(ip, state, icur, &PREV);
2193 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2195 rval = XFS_ILOG_CORE;
2196 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2199 if (XFS_IS_CORRUPT(mp, i != 1)) {
2200 error = -EFSCORRUPTED;
2203 if ((error = xfs_btree_delete(cur, &i)))
2205 if (XFS_IS_CORRUPT(mp, i != 1)) {
2206 error = -EFSCORRUPTED;
2209 if ((error = xfs_btree_decrement(cur, 0, &i)))
2211 if (XFS_IS_CORRUPT(mp, i != 1)) {
2212 error = -EFSCORRUPTED;
2215 error = xfs_bmbt_update(cur, &PREV);
2221 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2223 * Setting all of a previous oldext extent to newext.
2224 * Neither the left nor right neighbors are contiguous with
2227 PREV.br_state = new->br_state;
2228 xfs_iext_update_extent(ip, state, icur, &PREV);
2231 rval = XFS_ILOG_DEXT;
2234 error = xfs_bmbt_lookup_eq(cur, new, &i);
2237 if (XFS_IS_CORRUPT(mp, i != 1)) {
2238 error = -EFSCORRUPTED;
2241 error = xfs_bmbt_update(cur, &PREV);
2247 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2249 * Setting the first part of a previous oldext extent to newext.
2250 * The left neighbor is contiguous.
2252 LEFT.br_blockcount += new->br_blockcount;
2255 PREV.br_startoff += new->br_blockcount;
2256 PREV.br_startblock += new->br_blockcount;
2257 PREV.br_blockcount -= new->br_blockcount;
2259 xfs_iext_update_extent(ip, state, icur, &PREV);
2260 xfs_iext_prev(ifp, icur);
2261 xfs_iext_update_extent(ip, state, icur, &LEFT);
2264 rval = XFS_ILOG_DEXT;
2267 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2270 if (XFS_IS_CORRUPT(mp, i != 1)) {
2271 error = -EFSCORRUPTED;
2274 error = xfs_bmbt_update(cur, &PREV);
2277 error = xfs_btree_decrement(cur, 0, &i);
2280 error = xfs_bmbt_update(cur, &LEFT);
2286 case BMAP_LEFT_FILLING:
2288 * Setting the first part of a previous oldext extent to newext.
2289 * The left neighbor is not contiguous.
2292 PREV.br_startoff += new->br_blockcount;
2293 PREV.br_startblock += new->br_blockcount;
2294 PREV.br_blockcount -= new->br_blockcount;
2296 xfs_iext_update_extent(ip, state, icur, &PREV);
2297 xfs_iext_insert(ip, icur, new, state);
2301 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2303 rval = XFS_ILOG_CORE;
2304 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2307 if (XFS_IS_CORRUPT(mp, i != 1)) {
2308 error = -EFSCORRUPTED;
2311 error = xfs_bmbt_update(cur, &PREV);
2314 cur->bc_rec.b = *new;
2315 if ((error = xfs_btree_insert(cur, &i)))
2317 if (XFS_IS_CORRUPT(mp, i != 1)) {
2318 error = -EFSCORRUPTED;
2324 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2326 * Setting the last part of a previous oldext extent to newext.
2327 * The right neighbor is contiguous with the new allocation.
2330 PREV.br_blockcount -= new->br_blockcount;
2332 RIGHT.br_startoff = new->br_startoff;
2333 RIGHT.br_startblock = new->br_startblock;
2334 RIGHT.br_blockcount += new->br_blockcount;
2336 xfs_iext_update_extent(ip, state, icur, &PREV);
2337 xfs_iext_next(ifp, icur);
2338 xfs_iext_update_extent(ip, state, icur, &RIGHT);
2341 rval = XFS_ILOG_DEXT;
2344 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2347 if (XFS_IS_CORRUPT(mp, i != 1)) {
2348 error = -EFSCORRUPTED;
2351 error = xfs_bmbt_update(cur, &PREV);
2354 error = xfs_btree_increment(cur, 0, &i);
2357 error = xfs_bmbt_update(cur, &RIGHT);
2363 case BMAP_RIGHT_FILLING:
2365 * Setting the last part of a previous oldext extent to newext.
2366 * The right neighbor is not contiguous.
2369 PREV.br_blockcount -= new->br_blockcount;
2371 xfs_iext_update_extent(ip, state, icur, &PREV);
2372 xfs_iext_next(ifp, icur);
2373 xfs_iext_insert(ip, icur, new, state);
2377 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2379 rval = XFS_ILOG_CORE;
2380 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2383 if (XFS_IS_CORRUPT(mp, i != 1)) {
2384 error = -EFSCORRUPTED;
2387 error = xfs_bmbt_update(cur, &PREV);
2390 error = xfs_bmbt_lookup_eq(cur, new, &i);
2393 if (XFS_IS_CORRUPT(mp, i != 0)) {
2394 error = -EFSCORRUPTED;
2397 if ((error = xfs_btree_insert(cur, &i)))
2399 if (XFS_IS_CORRUPT(mp, i != 1)) {
2400 error = -EFSCORRUPTED;
2408 * Setting the middle part of a previous oldext extent to
2409 * newext. Contiguity is impossible here.
2410 * One extent becomes three extents.
2413 PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
2416 r[1].br_startoff = new_endoff;
2417 r[1].br_blockcount =
2418 old.br_startoff + old.br_blockcount - new_endoff;
2419 r[1].br_startblock = new->br_startblock + new->br_blockcount;
2420 r[1].br_state = PREV.br_state;
2422 xfs_iext_update_extent(ip, state, icur, &PREV);
2423 xfs_iext_next(ifp, icur);
2424 xfs_iext_insert(ip, icur, &r[1], state);
2425 xfs_iext_insert(ip, icur, &r[0], state);
2426 ifp->if_nextents += 2;
2429 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2431 rval = XFS_ILOG_CORE;
2432 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2435 if (XFS_IS_CORRUPT(mp, i != 1)) {
2436 error = -EFSCORRUPTED;
2439 /* new right extent - oldext */
2440 error = xfs_bmbt_update(cur, &r[1]);
2443 /* new left extent - oldext */
2444 cur->bc_rec.b = PREV;
2445 if ((error = xfs_btree_insert(cur, &i)))
2447 if (XFS_IS_CORRUPT(mp, i != 1)) {
2448 error = -EFSCORRUPTED;
2452 * Reset the cursor to the position of the new extent
2453 * we are about to insert as we can't trust it after
2454 * the previous insert.
2456 error = xfs_bmbt_lookup_eq(cur, new, &i);
2459 if (XFS_IS_CORRUPT(mp, i != 0)) {
2460 error = -EFSCORRUPTED;
2463 /* new middle extent - newext */
2464 if ((error = xfs_btree_insert(cur, &i)))
2466 if (XFS_IS_CORRUPT(mp, i != 1)) {
2467 error = -EFSCORRUPTED;
2473 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2474 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2475 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2476 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2477 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2478 case BMAP_LEFT_CONTIG:
2479 case BMAP_RIGHT_CONTIG:
2481 * These cases are all impossible.
2486 /* update reverse mappings */
2487 xfs_rmap_convert_extent(mp, tp, ip, whichfork, new);
2489 /* convert to a btree if necessary */
2490 if (xfs_bmap_needs_btree(ip, whichfork)) {
2491 int tmp_logflags; /* partial log flag return val */
2493 ASSERT(cur == NULL);
2494 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
2495 &tmp_logflags, whichfork);
2496 *logflagsp |= tmp_logflags;
2501 /* clear out the allocated field, done with it now in any case. */
2503 cur->bc_ino.allocated = 0;
2507 xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
2517 * Convert a hole to a delayed allocation.
2520 xfs_bmap_add_extent_hole_delay(
2521 xfs_inode_t *ip, /* incore inode pointer */
2523 struct xfs_iext_cursor *icur,
2524 xfs_bmbt_irec_t *new) /* new data to add to file extents */
2526 struct xfs_ifork *ifp; /* inode fork pointer */
2527 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2528 xfs_filblks_t newlen=0; /* new indirect size */
2529 xfs_filblks_t oldlen=0; /* old indirect size */
2530 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2531 int state = xfs_bmap_fork_to_state(whichfork);
2532 xfs_filblks_t temp; /* temp for indirect calculations */
2534 ifp = XFS_IFORK_PTR(ip, whichfork);
2535 ASSERT(isnullstartblock(new->br_startblock));
2538 * Check and set flags if this segment has a left neighbor
2540 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2541 state |= BMAP_LEFT_VALID;
2542 if (isnullstartblock(left.br_startblock))
2543 state |= BMAP_LEFT_DELAY;
2547 * Check and set flags if the current (right) segment exists.
2548 * If it doesn't exist, we're converting the hole at end-of-file.
2550 if (xfs_iext_get_extent(ifp, icur, &right)) {
2551 state |= BMAP_RIGHT_VALID;
2552 if (isnullstartblock(right.br_startblock))
2553 state |= BMAP_RIGHT_DELAY;
2557 * Set contiguity flags on the left and right neighbors.
2558 * Don't let extents get too large, even if the pieces are contiguous.
2560 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
2561 left.br_startoff + left.br_blockcount == new->br_startoff &&
2562 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2563 state |= BMAP_LEFT_CONTIG;
2565 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
2566 new->br_startoff + new->br_blockcount == right.br_startoff &&
2567 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2568 (!(state & BMAP_LEFT_CONTIG) ||
2569 (left.br_blockcount + new->br_blockcount +
2570 right.br_blockcount <= MAXEXTLEN)))
2571 state |= BMAP_RIGHT_CONTIG;
2574 * Switch out based on the contiguity flags.
2576 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2577 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2579 * New allocation is contiguous with delayed allocations
2580 * on the left and on the right.
2581 * Merge all three into a single extent record.
2583 temp = left.br_blockcount + new->br_blockcount +
2584 right.br_blockcount;
2586 oldlen = startblockval(left.br_startblock) +
2587 startblockval(new->br_startblock) +
2588 startblockval(right.br_startblock);
2589 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2591 left.br_startblock = nullstartblock(newlen);
2592 left.br_blockcount = temp;
2594 xfs_iext_remove(ip, icur, state);
2595 xfs_iext_prev(ifp, icur);
2596 xfs_iext_update_extent(ip, state, icur, &left);
2599 case BMAP_LEFT_CONTIG:
2601 * New allocation is contiguous with a delayed allocation
2603 * Merge the new allocation with the left neighbor.
2605 temp = left.br_blockcount + new->br_blockcount;
2607 oldlen = startblockval(left.br_startblock) +
2608 startblockval(new->br_startblock);
2609 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2611 left.br_blockcount = temp;
2612 left.br_startblock = nullstartblock(newlen);
2614 xfs_iext_prev(ifp, icur);
2615 xfs_iext_update_extent(ip, state, icur, &left);
2618 case BMAP_RIGHT_CONTIG:
2620 * New allocation is contiguous with a delayed allocation
2622 * Merge the new allocation with the right neighbor.
2624 temp = new->br_blockcount + right.br_blockcount;
2625 oldlen = startblockval(new->br_startblock) +
2626 startblockval(right.br_startblock);
2627 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2629 right.br_startoff = new->br_startoff;
2630 right.br_startblock = nullstartblock(newlen);
2631 right.br_blockcount = temp;
2632 xfs_iext_update_extent(ip, state, icur, &right);
2637 * New allocation is not contiguous with another
2638 * delayed allocation.
2639 * Insert a new entry.
2641 oldlen = newlen = 0;
2642 xfs_iext_insert(ip, icur, new, state);
2645 if (oldlen != newlen) {
2646 ASSERT(oldlen > newlen);
2647 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
2650 * Nothing to do for disk quota accounting here.
2652 xfs_mod_delalloc(ip->i_mount, (int64_t)newlen - oldlen);
2657 * Convert a hole to a real allocation.
2659 STATIC int /* error */
2660 xfs_bmap_add_extent_hole_real(
2661 struct xfs_trans *tp,
2662 struct xfs_inode *ip,
2664 struct xfs_iext_cursor *icur,
2665 struct xfs_btree_cur **curp,
2666 struct xfs_bmbt_irec *new,
2670 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
2671 struct xfs_mount *mp = ip->i_mount;
2672 struct xfs_btree_cur *cur = *curp;
2673 int error; /* error return value */
2674 int i; /* temp state */
2675 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2676 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2677 int rval=0; /* return value (logging flags) */
2678 int state = xfs_bmap_fork_to_state(whichfork);
2679 struct xfs_bmbt_irec old;
2681 ASSERT(!isnullstartblock(new->br_startblock));
2682 ASSERT(!cur || !(cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL));
2684 XFS_STATS_INC(mp, xs_add_exlist);
2687 * Check and set flags if this segment has a left neighbor.
2689 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2690 state |= BMAP_LEFT_VALID;
2691 if (isnullstartblock(left.br_startblock))
2692 state |= BMAP_LEFT_DELAY;
2696 * Check and set flags if this segment has a current value.
2697 * Not true if we're inserting into the "hole" at eof.
2699 if (xfs_iext_get_extent(ifp, icur, &right)) {
2700 state |= BMAP_RIGHT_VALID;
2701 if (isnullstartblock(right.br_startblock))
2702 state |= BMAP_RIGHT_DELAY;
2706 * We're inserting a real allocation between "left" and "right".
2707 * Set the contiguity flags. Don't let extents get too large.
2709 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2710 left.br_startoff + left.br_blockcount == new->br_startoff &&
2711 left.br_startblock + left.br_blockcount == new->br_startblock &&
2712 left.br_state == new->br_state &&
2713 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2714 state |= BMAP_LEFT_CONTIG;
2716 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2717 new->br_startoff + new->br_blockcount == right.br_startoff &&
2718 new->br_startblock + new->br_blockcount == right.br_startblock &&
2719 new->br_state == right.br_state &&
2720 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2721 (!(state & BMAP_LEFT_CONTIG) ||
2722 left.br_blockcount + new->br_blockcount +
2723 right.br_blockcount <= MAXEXTLEN))
2724 state |= BMAP_RIGHT_CONTIG;
2728 * Select which case we're in here, and implement it.
2730 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2731 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2733 * New allocation is contiguous with real allocations on the
2734 * left and on the right.
2735 * Merge all three into a single extent record.
2737 left.br_blockcount += new->br_blockcount + right.br_blockcount;
2739 xfs_iext_remove(ip, icur, state);
2740 xfs_iext_prev(ifp, icur);
2741 xfs_iext_update_extent(ip, state, icur, &left);
2745 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2747 rval = XFS_ILOG_CORE;
2748 error = xfs_bmbt_lookup_eq(cur, &right, &i);
2751 if (XFS_IS_CORRUPT(mp, i != 1)) {
2752 error = -EFSCORRUPTED;
2755 error = xfs_btree_delete(cur, &i);
2758 if (XFS_IS_CORRUPT(mp, i != 1)) {
2759 error = -EFSCORRUPTED;
2762 error = xfs_btree_decrement(cur, 0, &i);
2765 if (XFS_IS_CORRUPT(mp, i != 1)) {
2766 error = -EFSCORRUPTED;
2769 error = xfs_bmbt_update(cur, &left);
2775 case BMAP_LEFT_CONTIG:
2777 * New allocation is contiguous with a real allocation
2779 * Merge the new allocation with the left neighbor.
2782 left.br_blockcount += new->br_blockcount;
2784 xfs_iext_prev(ifp, icur);
2785 xfs_iext_update_extent(ip, state, icur, &left);
2788 rval = xfs_ilog_fext(whichfork);
2791 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2794 if (XFS_IS_CORRUPT(mp, i != 1)) {
2795 error = -EFSCORRUPTED;
2798 error = xfs_bmbt_update(cur, &left);
2804 case BMAP_RIGHT_CONTIG:
2806 * New allocation is contiguous with a real allocation
2808 * Merge the new allocation with the right neighbor.
2812 right.br_startoff = new->br_startoff;
2813 right.br_startblock = new->br_startblock;
2814 right.br_blockcount += new->br_blockcount;
2815 xfs_iext_update_extent(ip, state, icur, &right);
2818 rval = xfs_ilog_fext(whichfork);
2821 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2824 if (XFS_IS_CORRUPT(mp, i != 1)) {
2825 error = -EFSCORRUPTED;
2828 error = xfs_bmbt_update(cur, &right);
2836 * New allocation is not contiguous with another
2838 * Insert a new entry.
2840 xfs_iext_insert(ip, icur, new, state);
2844 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2846 rval = XFS_ILOG_CORE;
2847 error = xfs_bmbt_lookup_eq(cur, new, &i);
2850 if (XFS_IS_CORRUPT(mp, i != 0)) {
2851 error = -EFSCORRUPTED;
2854 error = xfs_btree_insert(cur, &i);
2857 if (XFS_IS_CORRUPT(mp, i != 1)) {
2858 error = -EFSCORRUPTED;
2865 /* add reverse mapping unless caller opted out */
2866 if (!(flags & XFS_BMAPI_NORMAP))
2867 xfs_rmap_map_extent(tp, ip, whichfork, new);
2869 /* convert to a btree if necessary */
2870 if (xfs_bmap_needs_btree(ip, whichfork)) {
2871 int tmp_logflags; /* partial log flag return val */
2873 ASSERT(cur == NULL);
2874 error = xfs_bmap_extents_to_btree(tp, ip, curp, 0,
2875 &tmp_logflags, whichfork);
2876 *logflagsp |= tmp_logflags;
2882 /* clear out the allocated field, done with it now in any case. */
2884 cur->bc_ino.allocated = 0;
2886 xfs_bmap_check_leaf_extents(cur, ip, whichfork);
2893 * Functions used in the extent read, allocate and remove paths
2897 * Adjust the size of the new extent based on i_extsize and rt extsize.
2900 xfs_bmap_extsize_align(
2902 xfs_bmbt_irec_t *gotp, /* next extent pointer */
2903 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
2904 xfs_extlen_t extsz, /* align to this extent size */
2905 int rt, /* is this a realtime inode? */
2906 int eof, /* is extent at end-of-file? */
2907 int delay, /* creating delalloc extent? */
2908 int convert, /* overwriting unwritten extent? */
2909 xfs_fileoff_t *offp, /* in/out: aligned offset */
2910 xfs_extlen_t *lenp) /* in/out: aligned length */
2912 xfs_fileoff_t orig_off; /* original offset */
2913 xfs_extlen_t orig_alen; /* original length */
2914 xfs_fileoff_t orig_end; /* original off+len */
2915 xfs_fileoff_t nexto; /* next file offset */
2916 xfs_fileoff_t prevo; /* previous file offset */
2917 xfs_fileoff_t align_off; /* temp for offset */
2918 xfs_extlen_t align_alen; /* temp for length */
2919 xfs_extlen_t temp; /* temp for calculations */
2924 orig_off = align_off = *offp;
2925 orig_alen = align_alen = *lenp;
2926 orig_end = orig_off + orig_alen;
2929 * If this request overlaps an existing extent, then don't
2930 * attempt to perform any additional alignment.
2932 if (!delay && !eof &&
2933 (orig_off >= gotp->br_startoff) &&
2934 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
2939 * If the file offset is unaligned vs. the extent size
2940 * we need to align it. This will be possible unless
2941 * the file was previously written with a kernel that didn't
2942 * perform this alignment, or if a truncate shot us in the
2945 div_u64_rem(orig_off, extsz, &temp);
2951 /* Same adjustment for the end of the requested area. */
2952 temp = (align_alen % extsz);
2954 align_alen += extsz - temp;
2957 * For large extent hint sizes, the aligned extent might be larger than
2958 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls
2959 * the length back under MAXEXTLEN. The outer allocation loops handle
2960 * short allocation just fine, so it is safe to do this. We only want to
2961 * do it when we are forced to, though, because it means more allocation
2962 * operations are required.
2964 while (align_alen > MAXEXTLEN)
2965 align_alen -= extsz;
2966 ASSERT(align_alen <= MAXEXTLEN);
2969 * If the previous block overlaps with this proposed allocation
2970 * then move the start forward without adjusting the length.
2972 if (prevp->br_startoff != NULLFILEOFF) {
2973 if (prevp->br_startblock == HOLESTARTBLOCK)
2974 prevo = prevp->br_startoff;
2976 prevo = prevp->br_startoff + prevp->br_blockcount;
2979 if (align_off != orig_off && align_off < prevo)
2982 * If the next block overlaps with this proposed allocation
2983 * then move the start back without adjusting the length,
2984 * but not before offset 0.
2985 * This may of course make the start overlap previous block,
2986 * and if we hit the offset 0 limit then the next block
2987 * can still overlap too.
2989 if (!eof && gotp->br_startoff != NULLFILEOFF) {
2990 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
2991 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
2992 nexto = gotp->br_startoff + gotp->br_blockcount;
2994 nexto = gotp->br_startoff;
2996 nexto = NULLFILEOFF;
2998 align_off + align_alen != orig_end &&
2999 align_off + align_alen > nexto)
3000 align_off = nexto > align_alen ? nexto - align_alen : 0;
3002 * If we're now overlapping the next or previous extent that
3003 * means we can't fit an extsz piece in this hole. Just move
3004 * the start forward to the first valid spot and set
3005 * the length so we hit the end.
3007 if (align_off != orig_off && align_off < prevo)
3009 if (align_off + align_alen != orig_end &&
3010 align_off + align_alen > nexto &&
3011 nexto != NULLFILEOFF) {
3012 ASSERT(nexto > prevo);
3013 align_alen = nexto - align_off;
3017 * If realtime, and the result isn't a multiple of the realtime
3018 * extent size we need to remove blocks until it is.
3020 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
3022 * We're not covering the original request, or
3023 * we won't be able to once we fix the length.
3025 if (orig_off < align_off ||
3026 orig_end > align_off + align_alen ||
3027 align_alen - temp < orig_alen)
3030 * Try to fix it by moving the start up.
3032 if (align_off + temp <= orig_off) {
3037 * Try to fix it by moving the end in.
3039 else if (align_off + align_alen - temp >= orig_end)
3042 * Set the start to the minimum then trim the length.
3045 align_alen -= orig_off - align_off;
3046 align_off = orig_off;
3047 align_alen -= align_alen % mp->m_sb.sb_rextsize;
3050 * Result doesn't cover the request, fail it.
3052 if (orig_off < align_off || orig_end > align_off + align_alen)
3055 ASSERT(orig_off >= align_off);
3056 /* see MAXEXTLEN handling above */
3057 ASSERT(orig_end <= align_off + align_alen ||
3058 align_alen + extsz > MAXEXTLEN);
3062 if (!eof && gotp->br_startoff != NULLFILEOFF)
3063 ASSERT(align_off + align_alen <= gotp->br_startoff);
3064 if (prevp->br_startoff != NULLFILEOFF)
3065 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
3073 #define XFS_ALLOC_GAP_UNITS 4
3077 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3079 xfs_fsblock_t adjust; /* adjustment to block numbers */
3080 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3081 xfs_mount_t *mp; /* mount point structure */
3082 int nullfb; /* true if ap->firstblock isn't set */
3083 int rt; /* true if inode is realtime */
3085 #define ISVALID(x,y) \
3087 (x) < mp->m_sb.sb_rblocks : \
3088 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3089 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3090 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3092 mp = ap->ip->i_mount;
3093 nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
3094 rt = XFS_IS_REALTIME_INODE(ap->ip) &&
3095 (ap->datatype & XFS_ALLOC_USERDATA);
3096 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp,
3097 ap->tp->t_firstblock);
3099 * If allocating at eof, and there's a previous real block,
3100 * try to use its last block as our starting point.
3102 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3103 !isnullstartblock(ap->prev.br_startblock) &&
3104 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
3105 ap->prev.br_startblock)) {
3106 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3108 * Adjust for the gap between prevp and us.
3110 adjust = ap->offset -
3111 (ap->prev.br_startoff + ap->prev.br_blockcount);
3113 ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
3114 ap->blkno += adjust;
3117 * If not at eof, then compare the two neighbor blocks.
3118 * Figure out whether either one gives us a good starting point,
3119 * and pick the better one.
3121 else if (!ap->eof) {
3122 xfs_fsblock_t gotbno; /* right side block number */
3123 xfs_fsblock_t gotdiff=0; /* right side difference */
3124 xfs_fsblock_t prevbno; /* left side block number */
3125 xfs_fsblock_t prevdiff=0; /* left side difference */
3128 * If there's a previous (left) block, select a requested
3129 * start block based on it.
3131 if (ap->prev.br_startoff != NULLFILEOFF &&
3132 !isnullstartblock(ap->prev.br_startblock) &&
3133 (prevbno = ap->prev.br_startblock +
3134 ap->prev.br_blockcount) &&
3135 ISVALID(prevbno, ap->prev.br_startblock)) {
3137 * Calculate gap to end of previous block.
3139 adjust = prevdiff = ap->offset -
3140 (ap->prev.br_startoff +
3141 ap->prev.br_blockcount);
3143 * Figure the startblock based on the previous block's
3144 * end and the gap size.
3146 * If the gap is large relative to the piece we're
3147 * allocating, or using it gives us an invalid block
3148 * number, then just use the end of the previous block.
3150 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3151 ISVALID(prevbno + prevdiff,
3152 ap->prev.br_startblock))
3157 * If the firstblock forbids it, can't use it,
3160 if (!rt && !nullfb &&
3161 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
3162 prevbno = NULLFSBLOCK;
3165 * No previous block or can't follow it, just default.
3168 prevbno = NULLFSBLOCK;
3170 * If there's a following (right) block, select a requested
3171 * start block based on it.
3173 if (!isnullstartblock(ap->got.br_startblock)) {
3175 * Calculate gap to start of next block.
3177 adjust = gotdiff = ap->got.br_startoff - ap->offset;
3179 * Figure the startblock based on the next block's
3180 * start and the gap size.
3182 gotbno = ap->got.br_startblock;
3185 * If the gap is large relative to the piece we're
3186 * allocating, or using it gives us an invalid block
3187 * number, then just use the start of the next block
3188 * offset by our length.
3190 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3191 ISVALID(gotbno - gotdiff, gotbno))
3193 else if (ISVALID(gotbno - ap->length, gotbno)) {
3194 gotbno -= ap->length;
3195 gotdiff += adjust - ap->length;
3199 * If the firstblock forbids it, can't use it,
3202 if (!rt && !nullfb &&
3203 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
3204 gotbno = NULLFSBLOCK;
3207 * No next block, just default.
3210 gotbno = NULLFSBLOCK;
3212 * If both valid, pick the better one, else the only good
3213 * one, else ap->blkno is already set (to 0 or the inode block).
3215 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
3216 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3217 else if (prevbno != NULLFSBLOCK)
3218 ap->blkno = prevbno;
3219 else if (gotbno != NULLFSBLOCK)
3226 xfs_bmap_longest_free_extent(
3227 struct xfs_trans *tp,
3232 struct xfs_mount *mp = tp->t_mountp;
3233 struct xfs_perag *pag;
3234 xfs_extlen_t longest;
3237 pag = xfs_perag_get(mp, ag);
3238 if (!pag->pagf_init) {
3239 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
3241 /* Couldn't lock the AGF, so skip this AG. */
3242 if (error == -EAGAIN) {
3250 longest = xfs_alloc_longest_free_extent(pag,
3251 xfs_alloc_min_freelist(mp, pag),
3252 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
3253 if (*blen < longest)
3262 xfs_bmap_select_minlen(
3263 struct xfs_bmalloca *ap,
3264 struct xfs_alloc_arg *args,
3268 if (notinit || *blen < ap->minlen) {
3270 * Since we did a BUF_TRYLOCK above, it is possible that
3271 * there is space for this request.
3273 args->minlen = ap->minlen;
3274 } else if (*blen < args->maxlen) {
3276 * If the best seen length is less than the request length,
3277 * use the best as the minimum.
3279 args->minlen = *blen;
3282 * Otherwise we've seen an extent as big as maxlen, use that
3285 args->minlen = args->maxlen;
3290 xfs_bmap_btalloc_nullfb(
3291 struct xfs_bmalloca *ap,
3292 struct xfs_alloc_arg *args,
3295 struct xfs_mount *mp = ap->ip->i_mount;
3296 xfs_agnumber_t ag, startag;
3300 args->type = XFS_ALLOCTYPE_START_BNO;
3301 args->total = ap->total;
3303 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3304 if (startag == NULLAGNUMBER)
3307 while (*blen < args->maxlen) {
3308 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3313 if (++ag == mp->m_sb.sb_agcount)
3319 xfs_bmap_select_minlen(ap, args, blen, notinit);
3324 xfs_bmap_btalloc_filestreams(
3325 struct xfs_bmalloca *ap,
3326 struct xfs_alloc_arg *args,
3329 struct xfs_mount *mp = ap->ip->i_mount;
3334 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3335 args->total = ap->total;
3337 ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3338 if (ag == NULLAGNUMBER)
3341 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, ¬init);
3345 if (*blen < args->maxlen) {
3346 error = xfs_filestream_new_ag(ap, &ag);
3350 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3357 xfs_bmap_select_minlen(ap, args, blen, notinit);
3360 * Set the failure fallback case to look in the selected AG as stream
3363 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
3367 /* Update all inode and quota accounting for the allocation we just did. */
3369 xfs_bmap_btalloc_accounting(
3370 struct xfs_bmalloca *ap,
3371 struct xfs_alloc_arg *args)
3373 if (ap->flags & XFS_BMAPI_COWFORK) {
3375 * COW fork blocks are in-core only and thus are treated as
3376 * in-core quota reservation (like delalloc blocks) even when
3377 * converted to real blocks. The quota reservation is not
3378 * accounted to disk until blocks are remapped to the data
3379 * fork. So if these blocks were previously delalloc, we
3380 * already have quota reservation and there's nothing to do
3384 xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len);
3389 * Otherwise, we've allocated blocks in a hole. The transaction
3390 * has acquired in-core quota reservation for this extent.
3391 * Rather than account these as real blocks, however, we reduce
3392 * the transaction quota reservation based on the allocation.
3393 * This essentially transfers the transaction quota reservation
3394 * to that of a delalloc extent.
3396 ap->ip->i_delayed_blks += args->len;
3397 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, XFS_TRANS_DQ_RES_BLKS,
3402 /* data/attr fork only */
3403 ap->ip->i_nblocks += args->len;
3404 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3406 ap->ip->i_delayed_blks -= args->len;
3407 xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len);
3409 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
3410 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : XFS_TRANS_DQ_BCOUNT,
3415 xfs_bmap_compute_alignments(
3416 struct xfs_bmalloca *ap,
3417 struct xfs_alloc_arg *args)
3419 struct xfs_mount *mp = args->mp;
3420 xfs_extlen_t align = 0; /* minimum allocation alignment */
3421 int stripe_align = 0;
3423 /* stripe alignment for allocation is determined by mount parameters */
3424 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
3425 stripe_align = mp->m_swidth;
3426 else if (mp->m_dalign)
3427 stripe_align = mp->m_dalign;
3429 if (ap->flags & XFS_BMAPI_COWFORK)
3430 align = xfs_get_cowextsz_hint(ap->ip);
3431 else if (ap->datatype & XFS_ALLOC_USERDATA)
3432 align = xfs_get_extsz_hint(ap->ip);
3434 if (xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, align, 0,
3435 ap->eof, 0, ap->conv, &ap->offset,
3441 /* apply extent size hints if obtained earlier */
3444 div_u64_rem(ap->offset, args->prod, &args->mod);
3446 args->mod = args->prod - args->mod;
3447 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3451 args->prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3452 div_u64_rem(ap->offset, args->prod, &args->mod);
3454 args->mod = args->prod - args->mod;
3457 return stripe_align;
3461 xfs_bmap_process_allocated_extent(
3462 struct xfs_bmalloca *ap,
3463 struct xfs_alloc_arg *args,
3464 xfs_fileoff_t orig_offset,
3465 xfs_extlen_t orig_length)
3469 nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
3472 * check the allocation happened at the same or higher AG than
3473 * the first block that was allocated.
3476 XFS_FSB_TO_AGNO(args->mp, ap->tp->t_firstblock) <=
3477 XFS_FSB_TO_AGNO(args->mp, args->fsbno));
3479 ap->blkno = args->fsbno;
3481 ap->tp->t_firstblock = args->fsbno;
3482 ap->length = args->len;
3484 * If the extent size hint is active, we tried to round the
3485 * caller's allocation request offset down to extsz and the
3486 * length up to another extsz boundary. If we found a free
3487 * extent we mapped it in starting at this new offset. If the
3488 * newly mapped space isn't long enough to cover any of the
3489 * range of offsets that was originally requested, move the
3490 * mapping up so that we can fill as much of the caller's
3491 * original request as possible. Free space is apparently
3492 * very fragmented so we're unlikely to be able to satisfy the
3495 if (ap->length <= orig_length)
3496 ap->offset = orig_offset;
3497 else if (ap->offset + ap->length < orig_offset + orig_length)
3498 ap->offset = orig_offset + orig_length - ap->length;
3499 xfs_bmap_btalloc_accounting(ap, args);
3504 xfs_bmap_exact_minlen_extent_alloc(
3505 struct xfs_bmalloca *ap)
3507 struct xfs_mount *mp = ap->ip->i_mount;
3508 struct xfs_alloc_arg args = { .tp = ap->tp, .mp = mp };
3509 xfs_fileoff_t orig_offset;
3510 xfs_extlen_t orig_length;
3515 if (ap->minlen != 1) {
3516 ap->blkno = NULLFSBLOCK;
3521 orig_offset = ap->offset;
3522 orig_length = ap->length;
3524 args.alloc_minlen_only = 1;
3526 xfs_bmap_compute_alignments(ap, &args);
3528 if (ap->tp->t_firstblock == NULLFSBLOCK) {
3530 * Unlike the longest extent available in an AG, we don't track
3531 * the length of an AG's shortest extent.
3532 * XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT is a debug only knob and
3533 * hence we can afford to start traversing from the 0th AG since
3534 * we need not be concerned about a drop in performance in
3535 * "debug only" code paths.
3537 ap->blkno = XFS_AGB_TO_FSB(mp, 0, 0);
3539 ap->blkno = ap->tp->t_firstblock;
3542 args.fsbno = ap->blkno;
3543 args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
3544 args.type = XFS_ALLOCTYPE_FIRST_AG;
3545 args.minlen = args.maxlen = ap->minlen;
3546 args.total = ap->total;
3549 args.minalignslop = 0;
3551 args.minleft = ap->minleft;
3552 args.wasdel = ap->wasdel;
3553 args.resv = XFS_AG_RESV_NONE;
3554 args.datatype = ap->datatype;
3556 error = xfs_alloc_vextent(&args);
3560 if (args.fsbno != NULLFSBLOCK) {
3561 xfs_bmap_process_allocated_extent(ap, &args, orig_offset,
3564 ap->blkno = NULLFSBLOCK;
3572 #define xfs_bmap_exact_minlen_extent_alloc(bma) (-EFSCORRUPTED)
3578 struct xfs_bmalloca *ap)
3580 struct xfs_mount *mp = ap->ip->i_mount;
3581 struct xfs_alloc_arg args = { .tp = ap->tp, .mp = mp };
3582 xfs_alloctype_t atype = 0;
3583 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3585 xfs_fileoff_t orig_offset;
3586 xfs_extlen_t orig_length;
3588 xfs_extlen_t nextminlen = 0;
3589 int nullfb; /* true if ap->firstblock isn't set */
3596 orig_offset = ap->offset;
3597 orig_length = ap->length;
3599 stripe_align = xfs_bmap_compute_alignments(ap, &args);
3601 nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
3602 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp,
3603 ap->tp->t_firstblock);
3605 if ((ap->datatype & XFS_ALLOC_USERDATA) &&
3606 xfs_inode_is_filestream(ap->ip)) {
3607 ag = xfs_filestream_lookup_ag(ap->ip);
3608 ag = (ag != NULLAGNUMBER) ? ag : 0;
3609 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0);
3611 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
3614 ap->blkno = ap->tp->t_firstblock;
3616 xfs_bmap_adjacent(ap);
3619 * If allowed, use ap->blkno; otherwise must use firstblock since
3620 * it's in the right allocation group.
3622 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
3625 ap->blkno = ap->tp->t_firstblock;
3627 * Normal allocation, done through xfs_alloc_vextent.
3629 tryagain = isaligned = 0;
3630 args.fsbno = ap->blkno;
3631 args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
3633 /* Trim the allocation back to the maximum an AG can fit. */
3634 args.maxlen = min(ap->length, mp->m_ag_max_usable);
3638 * Search for an allocation group with a single extent large
3639 * enough for the request. If one isn't found, then adjust
3640 * the minimum allocation size to the largest space found.
3642 if ((ap->datatype & XFS_ALLOC_USERDATA) &&
3643 xfs_inode_is_filestream(ap->ip))
3644 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);
3646 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
3649 } else if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
3650 if (xfs_inode_is_filestream(ap->ip))
3651 args.type = XFS_ALLOCTYPE_FIRST_AG;
3653 args.type = XFS_ALLOCTYPE_START_BNO;
3654 args.total = args.minlen = ap->minlen;
3656 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3657 args.total = ap->total;
3658 args.minlen = ap->minlen;
3662 * If we are not low on available data blocks, and the underlying
3663 * logical volume manager is a stripe, and the file offset is zero then
3664 * try to allocate data blocks on stripe unit boundary. NOTE: ap->aeof
3665 * is only set if the allocation length is >= the stripe unit and the
3666 * allocation offset is at the end of file.
3668 if (!(ap->tp->t_flags & XFS_TRANS_LOWMODE) && ap->aeof) {
3670 args.alignment = stripe_align;
3674 * Adjust minlen to try and preserve alignment if we
3675 * can't guarantee an aligned maxlen extent.
3677 if (blen > args.alignment &&
3678 blen <= args.maxlen + args.alignment)
3679 args.minlen = blen - args.alignment;
3680 args.minalignslop = 0;
3683 * First try an exact bno allocation.
3684 * If it fails then do a near or start bno
3685 * allocation with alignment turned on.
3689 args.type = XFS_ALLOCTYPE_THIS_BNO;
3692 * Compute the minlen+alignment for the
3693 * next case. Set slop so that the value
3694 * of minlen+alignment+slop doesn't go up
3695 * between the calls.
3697 if (blen > stripe_align && blen <= args.maxlen)
3698 nextminlen = blen - stripe_align;
3700 nextminlen = args.minlen;
3701 if (nextminlen + stripe_align > args.minlen + 1)
3703 nextminlen + stripe_align -
3706 args.minalignslop = 0;
3710 args.minalignslop = 0;
3712 args.minleft = ap->minleft;
3713 args.wasdel = ap->wasdel;
3714 args.resv = XFS_AG_RESV_NONE;
3715 args.datatype = ap->datatype;
3717 error = xfs_alloc_vextent(&args);
3721 if (tryagain && args.fsbno == NULLFSBLOCK) {
3723 * Exact allocation failed. Now try with alignment
3727 args.fsbno = ap->blkno;
3728 args.alignment = stripe_align;
3729 args.minlen = nextminlen;
3730 args.minalignslop = 0;
3732 if ((error = xfs_alloc_vextent(&args)))
3735 if (isaligned && args.fsbno == NULLFSBLOCK) {
3737 * allocation failed, so turn off alignment and
3741 args.fsbno = ap->blkno;
3743 if ((error = xfs_alloc_vextent(&args)))
3746 if (args.fsbno == NULLFSBLOCK && nullfb &&
3747 args.minlen > ap->minlen) {
3748 args.minlen = ap->minlen;
3749 args.type = XFS_ALLOCTYPE_START_BNO;
3750 args.fsbno = ap->blkno;
3751 if ((error = xfs_alloc_vextent(&args)))
3754 if (args.fsbno == NULLFSBLOCK && nullfb) {
3756 args.type = XFS_ALLOCTYPE_FIRST_AG;
3757 args.total = ap->minlen;
3758 if ((error = xfs_alloc_vextent(&args)))
3760 ap->tp->t_flags |= XFS_TRANS_LOWMODE;
3763 if (args.fsbno != NULLFSBLOCK) {
3764 xfs_bmap_process_allocated_extent(ap, &args, orig_offset,
3767 ap->blkno = NULLFSBLOCK;
3773 /* Trim extent to fit a logical block range. */
3776 struct xfs_bmbt_irec *irec,
3780 xfs_fileoff_t distance;
3781 xfs_fileoff_t end = bno + len;
3783 if (irec->br_startoff + irec->br_blockcount <= bno ||
3784 irec->br_startoff >= end) {
3785 irec->br_blockcount = 0;
3789 if (irec->br_startoff < bno) {
3790 distance = bno - irec->br_startoff;
3791 if (isnullstartblock(irec->br_startblock))
3792 irec->br_startblock = DELAYSTARTBLOCK;
3793 if (irec->br_startblock != DELAYSTARTBLOCK &&
3794 irec->br_startblock != HOLESTARTBLOCK)
3795 irec->br_startblock += distance;
3796 irec->br_startoff += distance;
3797 irec->br_blockcount -= distance;
3800 if (end < irec->br_startoff + irec->br_blockcount) {
3801 distance = irec->br_startoff + irec->br_blockcount - end;
3802 irec->br_blockcount -= distance;
3807 * Trim the returned map to the required bounds
3811 struct xfs_bmbt_irec *mval,
3812 struct xfs_bmbt_irec *got,
3820 if ((flags & XFS_BMAPI_ENTIRE) ||
3821 got->br_startoff + got->br_blockcount <= obno) {
3823 if (isnullstartblock(got->br_startblock))
3824 mval->br_startblock = DELAYSTARTBLOCK;
3830 ASSERT((*bno >= obno) || (n == 0));
3832 mval->br_startoff = *bno;
3833 if (isnullstartblock(got->br_startblock))
3834 mval->br_startblock = DELAYSTARTBLOCK;
3836 mval->br_startblock = got->br_startblock +
3837 (*bno - got->br_startoff);
3839 * Return the minimum of what we got and what we asked for for
3840 * the length. We can use the len variable here because it is
3841 * modified below and we could have been there before coming
3842 * here if the first part of the allocation didn't overlap what
3845 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
3846 got->br_blockcount - (*bno - got->br_startoff));
3847 mval->br_state = got->br_state;
3848 ASSERT(mval->br_blockcount <= len);
3853 * Update and validate the extent map to return
3856 xfs_bmapi_update_map(
3857 struct xfs_bmbt_irec **map,
3865 xfs_bmbt_irec_t *mval = *map;
3867 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
3868 ((mval->br_startoff + mval->br_blockcount) <= end));
3869 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
3870 (mval->br_startoff < obno));
3872 *bno = mval->br_startoff + mval->br_blockcount;
3874 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
3875 /* update previous map with new information */
3876 ASSERT(mval->br_startblock == mval[-1].br_startblock);
3877 ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
3878 ASSERT(mval->br_state == mval[-1].br_state);
3879 mval[-1].br_blockcount = mval->br_blockcount;
3880 mval[-1].br_state = mval->br_state;
3881 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
3882 mval[-1].br_startblock != DELAYSTARTBLOCK &&
3883 mval[-1].br_startblock != HOLESTARTBLOCK &&
3884 mval->br_startblock == mval[-1].br_startblock +
3885 mval[-1].br_blockcount &&
3886 mval[-1].br_state == mval->br_state) {
3887 ASSERT(mval->br_startoff ==
3888 mval[-1].br_startoff + mval[-1].br_blockcount);
3889 mval[-1].br_blockcount += mval->br_blockcount;
3890 } else if (*n > 0 &&
3891 mval->br_startblock == DELAYSTARTBLOCK &&
3892 mval[-1].br_startblock == DELAYSTARTBLOCK &&
3893 mval->br_startoff ==
3894 mval[-1].br_startoff + mval[-1].br_blockcount) {
3895 mval[-1].br_blockcount += mval->br_blockcount;
3896 mval[-1].br_state = mval->br_state;
3897 } else if (!((*n == 0) &&
3898 ((mval->br_startoff + mval->br_blockcount) <=
3907 * Map file blocks to filesystem blocks without allocation.
3911 struct xfs_inode *ip,
3914 struct xfs_bmbt_irec *mval,
3918 struct xfs_mount *mp = ip->i_mount;
3919 int whichfork = xfs_bmapi_whichfork(flags);
3920 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
3921 struct xfs_bmbt_irec got;
3924 struct xfs_iext_cursor icur;
3930 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_ENTIRE)));
3931 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
3933 if (WARN_ON_ONCE(!ifp))
3934 return -EFSCORRUPTED;
3936 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
3937 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT))
3938 return -EFSCORRUPTED;
3940 if (XFS_FORCED_SHUTDOWN(mp))
3943 XFS_STATS_INC(mp, xs_blk_mapr);
3945 error = xfs_iread_extents(NULL, ip, whichfork);
3949 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got))
3954 while (bno < end && n < *nmap) {
3955 /* Reading past eof, act as though there's a hole up to end. */
3957 got.br_startoff = end;
3958 if (got.br_startoff > bno) {
3959 /* Reading in a hole. */
3960 mval->br_startoff = bno;
3961 mval->br_startblock = HOLESTARTBLOCK;
3962 mval->br_blockcount =
3963 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
3964 mval->br_state = XFS_EXT_NORM;
3965 bno += mval->br_blockcount;
3966 len -= mval->br_blockcount;
3972 /* set up the extent map to return. */
3973 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
3974 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
3976 /* If we're done, stop now. */
3977 if (bno >= end || n >= *nmap)
3980 /* Else go on to the next record. */
3981 if (!xfs_iext_next_extent(ifp, &icur, &got))
3989 * Add a delayed allocation extent to an inode. Blocks are reserved from the
3990 * global pool and the extent inserted into the inode in-core extent tree.
3992 * On entry, got refers to the first extent beyond the offset of the extent to
3993 * allocate or eof is specified if no such extent exists. On return, got refers
3994 * to the extent record that was inserted to the inode fork.
3996 * Note that the allocated extent may have been merged with contiguous extents
3997 * during insertion into the inode fork. Thus, got does not reflect the current
3998 * state of the inode fork on return. If necessary, the caller can use lastx to
3999 * look up the updated record in the inode fork.
4002 xfs_bmapi_reserve_delalloc(
4003 struct xfs_inode *ip,
4007 xfs_filblks_t prealloc,
4008 struct xfs_bmbt_irec *got,
4009 struct xfs_iext_cursor *icur,
4012 struct xfs_mount *mp = ip->i_mount;
4013 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4015 xfs_extlen_t indlen;
4017 xfs_fileoff_t aoff = off;
4020 * Cap the alloc length. Keep track of prealloc so we know whether to
4021 * tag the inode before we return.
4023 alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN);
4025 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
4026 if (prealloc && alen >= len)
4027 prealloc = alen - len;
4029 /* Figure out the extent size, adjust alen */
4030 if (whichfork == XFS_COW_FORK) {
4031 struct xfs_bmbt_irec prev;
4032 xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip);
4034 if (!xfs_iext_peek_prev_extent(ifp, icur, &prev))
4035 prev.br_startoff = NULLFILEOFF;
4037 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof,
4038 1, 0, &aoff, &alen);
4043 * Make a transaction-less quota reservation for delayed allocation
4044 * blocks. This number gets adjusted later. We return if we haven't
4045 * allocated blocks already inside this loop.
4047 error = xfs_quota_reserve_blkres(ip, alen);
4052 * Split changing sb for alen and indlen since they could be coming
4053 * from different places.
4055 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
4058 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
4060 goto out_unreserve_quota;
4062 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
4064 goto out_unreserve_blocks;
4067 ip->i_delayed_blks += alen;
4068 xfs_mod_delalloc(ip->i_mount, alen + indlen);
4070 got->br_startoff = aoff;
4071 got->br_startblock = nullstartblock(indlen);
4072 got->br_blockcount = alen;
4073 got->br_state = XFS_EXT_NORM;
4075 xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got);
4078 * Tag the inode if blocks were preallocated. Note that COW fork
4079 * preallocation can occur at the start or end of the extent, even when
4080 * prealloc == 0, so we must also check the aligned offset and length.
4082 if (whichfork == XFS_DATA_FORK && prealloc)
4083 xfs_inode_set_eofblocks_tag(ip);
4084 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
4085 xfs_inode_set_cowblocks_tag(ip);
4089 out_unreserve_blocks:
4090 xfs_mod_fdblocks(mp, alen, false);
4091 out_unreserve_quota:
4092 if (XFS_IS_QUOTA_ON(mp))
4093 xfs_quota_unreserve_blkres(ip, alen);
4098 xfs_bmap_alloc_userdata(
4099 struct xfs_bmalloca *bma)
4101 struct xfs_mount *mp = bma->ip->i_mount;
4102 int whichfork = xfs_bmapi_whichfork(bma->flags);
4106 * Set the data type being allocated. For the data fork, the first data
4107 * in the file is treated differently to all other allocations. For the
4108 * attribute fork, we only need to ensure the allocated range is not on
4111 bma->datatype = XFS_ALLOC_NOBUSY;
4112 if (whichfork == XFS_DATA_FORK) {
4113 bma->datatype |= XFS_ALLOC_USERDATA;
4114 if (bma->offset == 0)
4115 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
4117 if (mp->m_dalign && bma->length >= mp->m_dalign) {
4118 error = xfs_bmap_isaeof(bma, whichfork);
4123 if (XFS_IS_REALTIME_INODE(bma->ip))
4124 return xfs_bmap_rtalloc(bma);
4127 if (unlikely(XFS_TEST_ERROR(false, mp,
4128 XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT)))
4129 return xfs_bmap_exact_minlen_extent_alloc(bma);
4131 return xfs_bmap_btalloc(bma);
4136 struct xfs_bmalloca *bma)
4138 struct xfs_mount *mp = bma->ip->i_mount;
4139 int whichfork = xfs_bmapi_whichfork(bma->flags);
4140 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4141 int tmp_logflags = 0;
4144 ASSERT(bma->length > 0);
4147 * For the wasdelay case, we could also just allocate the stuff asked
4148 * for in this bmap call but that wouldn't be as good.
4151 bma->length = (xfs_extlen_t)bma->got.br_blockcount;
4152 bma->offset = bma->got.br_startoff;
4153 if (!xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev))
4154 bma->prev.br_startoff = NULLFILEOFF;
4156 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
4158 bma->length = XFS_FILBLKS_MIN(bma->length,
4159 bma->got.br_startoff - bma->offset);
4162 if (bma->flags & XFS_BMAPI_CONTIG)
4163 bma->minlen = bma->length;
4167 if (bma->flags & XFS_BMAPI_METADATA) {
4168 if (unlikely(XFS_TEST_ERROR(false, mp,
4169 XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT)))
4170 error = xfs_bmap_exact_minlen_extent_alloc(bma);
4172 error = xfs_bmap_btalloc(bma);
4174 error = xfs_bmap_alloc_userdata(bma);
4176 if (error || bma->blkno == NULLFSBLOCK)
4179 if (bma->flags & XFS_BMAPI_ZERO) {
4180 error = xfs_zero_extent(bma->ip, bma->blkno, bma->length);
4185 if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur)
4186 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4188 * Bump the number of extents we've allocated
4194 bma->cur->bc_ino.flags =
4195 bma->wasdel ? XFS_BTCUR_BMBT_WASDEL : 0;
4197 bma->got.br_startoff = bma->offset;
4198 bma->got.br_startblock = bma->blkno;
4199 bma->got.br_blockcount = bma->length;
4200 bma->got.br_state = XFS_EXT_NORM;
4202 if (bma->flags & XFS_BMAPI_PREALLOC)
4203 bma->got.br_state = XFS_EXT_UNWRITTEN;
4206 error = xfs_bmap_add_extent_delay_real(bma, whichfork);
4208 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
4209 whichfork, &bma->icur, &bma->cur, &bma->got,
4210 &bma->logflags, bma->flags);
4212 bma->logflags |= tmp_logflags;
4217 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4218 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4219 * the neighbouring ones.
4221 xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4223 ASSERT(bma->got.br_startoff <= bma->offset);
4224 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4225 bma->offset + bma->length);
4226 ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4227 bma->got.br_state == XFS_EXT_UNWRITTEN);
4232 xfs_bmapi_convert_unwritten(
4233 struct xfs_bmalloca *bma,
4234 struct xfs_bmbt_irec *mval,
4238 int whichfork = xfs_bmapi_whichfork(flags);
4239 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4240 int tmp_logflags = 0;
4243 /* check if we need to do unwritten->real conversion */
4244 if (mval->br_state == XFS_EXT_UNWRITTEN &&
4245 (flags & XFS_BMAPI_PREALLOC))
4248 /* check if we need to do real->unwritten conversion */
4249 if (mval->br_state == XFS_EXT_NORM &&
4250 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4251 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4255 * Modify (by adding) the state flag, if writing.
4257 ASSERT(mval->br_blockcount <= len);
4258 if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur) {
4259 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4260 bma->ip, whichfork);
4262 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4263 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4266 * Before insertion into the bmbt, zero the range being converted
4269 if (flags & XFS_BMAPI_ZERO) {
4270 error = xfs_zero_extent(bma->ip, mval->br_startblock,
4271 mval->br_blockcount);
4276 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
4277 &bma->icur, &bma->cur, mval, &tmp_logflags);
4279 * Log the inode core unconditionally in the unwritten extent conversion
4280 * path because the conversion might not have done so (e.g., if the
4281 * extent count hasn't changed). We need to make sure the inode is dirty
4282 * in the transaction for the sake of fsync(), even if nothing has
4283 * changed, because fsync() will not force the log for this transaction
4284 * unless it sees the inode pinned.
4286 * Note: If we're only converting cow fork extents, there aren't
4287 * any on-disk updates to make, so we don't need to log anything.
4289 if (whichfork != XFS_COW_FORK)
4290 bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4295 * Update our extent pointer, given that
4296 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4297 * of the neighbouring ones.
4299 xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4302 * We may have combined previously unwritten space with written space,
4303 * so generate another request.
4305 if (mval->br_blockcount < len)
4310 static inline xfs_extlen_t
4312 struct xfs_trans *tp,
4313 struct xfs_inode *ip,
4316 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, fork);
4318 if (tp && tp->t_firstblock != NULLFSBLOCK)
4320 if (ifp->if_format != XFS_DINODE_FMT_BTREE)
4322 return be16_to_cpu(ifp->if_broot->bb_level) + 1;
4326 * Log whatever the flags say, even if error. Otherwise we might miss detecting
4327 * a case where the data is changed, there's an error, and it's not logged so we
4328 * don't shutdown when we should. Don't bother logging extents/btree changes if
4329 * we converted to the other format.
4333 struct xfs_bmalloca *bma,
4337 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4339 if ((bma->logflags & xfs_ilog_fext(whichfork)) &&
4340 ifp->if_format != XFS_DINODE_FMT_EXTENTS)
4341 bma->logflags &= ~xfs_ilog_fext(whichfork);
4342 else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) &&
4343 ifp->if_format != XFS_DINODE_FMT_BTREE)
4344 bma->logflags &= ~xfs_ilog_fbroot(whichfork);
4347 xfs_trans_log_inode(bma->tp, bma->ip, bma->logflags);
4349 xfs_btree_del_cursor(bma->cur, error);
4353 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4354 * extent state if necessary. Details behaviour is controlled by the flags
4355 * parameter. Only allocates blocks from a single allocation group, to avoid
4360 struct xfs_trans *tp, /* transaction pointer */
4361 struct xfs_inode *ip, /* incore inode */
4362 xfs_fileoff_t bno, /* starting file offs. mapped */
4363 xfs_filblks_t len, /* length to map in file */
4364 int flags, /* XFS_BMAPI_... */
4365 xfs_extlen_t total, /* total blocks needed */
4366 struct xfs_bmbt_irec *mval, /* output: map values */
4367 int *nmap) /* i/o: mval size/count */
4369 struct xfs_bmalloca bma = {
4374 struct xfs_mount *mp = ip->i_mount;
4375 int whichfork = xfs_bmapi_whichfork(flags);
4376 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4377 xfs_fileoff_t end; /* end of mapped file region */
4378 bool eof = false; /* after the end of extents */
4379 int error; /* error return */
4380 int n; /* current extent index */
4381 xfs_fileoff_t obno; /* old block number (offset) */
4384 xfs_fileoff_t orig_bno; /* original block number value */
4385 int orig_flags; /* original flags arg value */
4386 xfs_filblks_t orig_len; /* original value of len arg */
4387 struct xfs_bmbt_irec *orig_mval; /* original value of mval */
4388 int orig_nmap; /* original value of *nmap */
4398 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4401 ASSERT(ifp->if_format != XFS_DINODE_FMT_LOCAL);
4402 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4403 ASSERT(!(flags & XFS_BMAPI_REMAP));
4405 /* zeroing is for currently only for data extents, not metadata */
4406 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4407 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4409 * we can allocate unwritten extents or pre-zero allocated blocks,
4410 * but it makes no sense to do both at once. This would result in
4411 * zeroing the unwritten extent twice, but it still being an
4412 * unwritten extent....
4414 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4415 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4417 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
4418 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
4419 return -EFSCORRUPTED;
4422 if (XFS_FORCED_SHUTDOWN(mp))
4425 XFS_STATS_INC(mp, xs_blk_mapw);
4427 error = xfs_iread_extents(tp, ip, whichfork);
4431 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got))
4433 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4434 bma.prev.br_startoff = NULLFILEOFF;
4435 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4440 while (bno < end && n < *nmap) {
4441 bool need_alloc = false, wasdelay = false;
4443 /* in hole or beyond EOF? */
4444 if (eof || bma.got.br_startoff > bno) {
4446 * CoW fork conversions should /never/ hit EOF or
4447 * holes. There should always be something for us
4450 ASSERT(!((flags & XFS_BMAPI_CONVERT) &&
4451 (flags & XFS_BMAPI_COWFORK)));
4454 } else if (isnullstartblock(bma.got.br_startblock)) {
4459 * First, deal with the hole before the allocated space
4460 * that we found, if any.
4462 if (need_alloc || wasdelay) {
4464 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4465 bma.wasdel = wasdelay;
4470 * There's a 32/64 bit type mismatch between the
4471 * allocation length request (which can be 64 bits in
4472 * length) and the bma length request, which is
4473 * xfs_extlen_t and therefore 32 bits. Hence we have to
4474 * check for 32-bit overflows and handle them here.
4476 if (len > (xfs_filblks_t)MAXEXTLEN)
4477 bma.length = MAXEXTLEN;
4482 ASSERT(bma.length > 0);
4483 error = xfs_bmapi_allocate(&bma);
4486 if (bma.blkno == NULLFSBLOCK)
4490 * If this is a CoW allocation, record the data in
4491 * the refcount btree for orphan recovery.
4493 if (whichfork == XFS_COW_FORK)
4494 xfs_refcount_alloc_cow_extent(tp, bma.blkno,
4498 /* Deal with the allocated space we found. */
4499 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4502 /* Execute unwritten extent conversion if necessary */
4503 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4504 if (error == -EAGAIN)
4509 /* update the extent map to return */
4510 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4513 * If we're done, stop now. Stop when we've allocated
4514 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4515 * the transaction may get too big.
4517 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4520 /* Else go on to the next record. */
4522 if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got))
4527 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4532 ASSERT(ifp->if_format != XFS_DINODE_FMT_BTREE ||
4533 ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork));
4534 xfs_bmapi_finish(&bma, whichfork, 0);
4535 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4539 xfs_bmapi_finish(&bma, whichfork, error);
4544 * Convert an existing delalloc extent to real blocks based on file offset. This
4545 * attempts to allocate the entire delalloc extent and may require multiple
4546 * invocations to allocate the target offset if a large enough physical extent
4550 xfs_bmapi_convert_delalloc(
4551 struct xfs_inode *ip,
4554 struct iomap *iomap,
4557 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4558 struct xfs_mount *mp = ip->i_mount;
4559 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
4560 struct xfs_bmalloca bma = { NULL };
4562 struct xfs_trans *tp;
4565 if (whichfork == XFS_COW_FORK)
4566 flags |= IOMAP_F_SHARED;
4569 * Space for the extent and indirect blocks was reserved when the
4570 * delalloc extent was created so there's no need to do so here.
4572 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0,
4573 XFS_TRANS_RESERVE, &tp);
4577 xfs_ilock(ip, XFS_ILOCK_EXCL);
4579 error = xfs_iext_count_may_overflow(ip, whichfork,
4580 XFS_IEXT_ADD_NOSPLIT_CNT);
4582 goto out_trans_cancel;
4584 xfs_trans_ijoin(tp, ip, 0);
4586 if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) ||
4587 bma.got.br_startoff > offset_fsb) {
4589 * No extent found in the range we are trying to convert. This
4590 * should only happen for the COW fork, where another thread
4591 * might have moved the extent to the data fork in the meantime.
4593 WARN_ON_ONCE(whichfork != XFS_COW_FORK);
4595 goto out_trans_cancel;
4599 * If we find a real extent here we raced with another thread converting
4600 * the extent. Just return the real extent at this offset.
4602 if (!isnullstartblock(bma.got.br_startblock)) {
4603 xfs_bmbt_to_iomap(ip, iomap, &bma.got, flags);
4604 *seq = READ_ONCE(ifp->if_seq);
4605 goto out_trans_cancel;
4611 bma.offset = bma.got.br_startoff;
4612 bma.length = max_t(xfs_filblks_t, bma.got.br_blockcount, MAXEXTLEN);
4613 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4616 * When we're converting the delalloc reservations backing dirty pages
4617 * in the page cache, we must be careful about how we create the new
4620 * New CoW fork extents are created unwritten, turned into real extents
4621 * when we're about to write the data to disk, and mapped into the data
4622 * fork after the write finishes. End of story.
4624 * New data fork extents must be mapped in as unwritten and converted
4625 * to real extents after the write succeeds to avoid exposing stale
4626 * disk contents if we crash.
4628 bma.flags = XFS_BMAPI_PREALLOC;
4629 if (whichfork == XFS_COW_FORK)
4630 bma.flags |= XFS_BMAPI_COWFORK;
4632 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4633 bma.prev.br_startoff = NULLFILEOFF;
4635 error = xfs_bmapi_allocate(&bma);
4640 if (WARN_ON_ONCE(bma.blkno == NULLFSBLOCK))
4642 error = -EFSCORRUPTED;
4643 if (WARN_ON_ONCE(!xfs_valid_startblock(ip, bma.got.br_startblock)))
4646 XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length));
4647 XFS_STATS_INC(mp, xs_xstrat_quick);
4649 ASSERT(!isnullstartblock(bma.got.br_startblock));
4650 xfs_bmbt_to_iomap(ip, iomap, &bma.got, flags);
4651 *seq = READ_ONCE(ifp->if_seq);
4653 if (whichfork == XFS_COW_FORK)
4654 xfs_refcount_alloc_cow_extent(tp, bma.blkno, bma.length);
4656 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4661 xfs_bmapi_finish(&bma, whichfork, 0);
4662 error = xfs_trans_commit(tp);
4663 xfs_iunlock(ip, XFS_ILOCK_EXCL);
4667 xfs_bmapi_finish(&bma, whichfork, error);
4669 xfs_trans_cancel(tp);
4670 xfs_iunlock(ip, XFS_ILOCK_EXCL);
4676 struct xfs_trans *tp,
4677 struct xfs_inode *ip,
4680 xfs_fsblock_t startblock,
4683 struct xfs_mount *mp = ip->i_mount;
4684 struct xfs_ifork *ifp;
4685 struct xfs_btree_cur *cur = NULL;
4686 struct xfs_bmbt_irec got;
4687 struct xfs_iext_cursor icur;
4688 int whichfork = xfs_bmapi_whichfork(flags);
4689 int logflags = 0, error;
4691 ifp = XFS_IFORK_PTR(ip, whichfork);
4693 ASSERT(len <= (xfs_filblks_t)MAXEXTLEN);
4694 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4695 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC |
4696 XFS_BMAPI_NORMAP)));
4697 ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) !=
4698 (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC));
4700 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
4701 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
4702 return -EFSCORRUPTED;
4705 if (XFS_FORCED_SHUTDOWN(mp))
4708 error = xfs_iread_extents(tp, ip, whichfork);
4712 if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
4713 /* make sure we only reflink into a hole. */
4714 ASSERT(got.br_startoff > bno);
4715 ASSERT(got.br_startoff - bno >= len);
4718 ip->i_nblocks += len;
4719 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
4721 if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
4722 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
4723 cur->bc_ino.flags = 0;
4726 got.br_startoff = bno;
4727 got.br_startblock = startblock;
4728 got.br_blockcount = len;
4729 if (flags & XFS_BMAPI_PREALLOC)
4730 got.br_state = XFS_EXT_UNWRITTEN;
4732 got.br_state = XFS_EXT_NORM;
4734 error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur,
4735 &cur, &got, &logflags, flags);
4739 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork);
4742 if (ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS)
4743 logflags &= ~XFS_ILOG_DEXT;
4744 else if (ip->i_df.if_format != XFS_DINODE_FMT_BTREE)
4745 logflags &= ~XFS_ILOG_DBROOT;
4748 xfs_trans_log_inode(tp, ip, logflags);
4750 xfs_btree_del_cursor(cur, error);
4755 * When a delalloc extent is split (e.g., due to a hole punch), the original
4756 * indlen reservation must be shared across the two new extents that are left
4759 * Given the original reservation and the worst case indlen for the two new
4760 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4761 * reservation fairly across the two new extents. If necessary, steal available
4762 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4763 * ores == 1). The number of stolen blocks is returned. The availability and
4764 * subsequent accounting of stolen blocks is the responsibility of the caller.
4766 static xfs_filblks_t
4767 xfs_bmap_split_indlen(
4768 xfs_filblks_t ores, /* original res. */
4769 xfs_filblks_t *indlen1, /* ext1 worst indlen */
4770 xfs_filblks_t *indlen2, /* ext2 worst indlen */
4771 xfs_filblks_t avail) /* stealable blocks */
4773 xfs_filblks_t len1 = *indlen1;
4774 xfs_filblks_t len2 = *indlen2;
4775 xfs_filblks_t nres = len1 + len2; /* new total res. */
4776 xfs_filblks_t stolen = 0;
4777 xfs_filblks_t resfactor;
4780 * Steal as many blocks as we can to try and satisfy the worst case
4781 * indlen for both new extents.
4783 if (ores < nres && avail)
4784 stolen = XFS_FILBLKS_MIN(nres - ores, avail);
4787 /* nothing else to do if we've satisfied the new reservation */
4792 * We can't meet the total required reservation for the two extents.
4793 * Calculate the percent of the overall shortage between both extents
4794 * and apply this percentage to each of the requested indlen values.
4795 * This distributes the shortage fairly and reduces the chances that one
4796 * of the two extents is left with nothing when extents are repeatedly
4799 resfactor = (ores * 100);
4800 do_div(resfactor, nres);
4805 ASSERT(len1 + len2 <= ores);
4806 ASSERT(len1 < *indlen1 && len2 < *indlen2);
4809 * Hand out the remainder to each extent. If one of the two reservations
4810 * is zero, we want to make sure that one gets a block first. The loop
4811 * below starts with len1, so hand len2 a block right off the bat if it
4814 ores -= (len1 + len2);
4815 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
4816 if (ores && !len2 && *indlen2) {
4821 if (len1 < *indlen1) {
4827 if (len2 < *indlen2) {
4840 xfs_bmap_del_extent_delay(
4841 struct xfs_inode *ip,
4843 struct xfs_iext_cursor *icur,
4844 struct xfs_bmbt_irec *got,
4845 struct xfs_bmbt_irec *del)
4847 struct xfs_mount *mp = ip->i_mount;
4848 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4849 struct xfs_bmbt_irec new;
4850 int64_t da_old, da_new, da_diff = 0;
4851 xfs_fileoff_t del_endoff, got_endoff;
4852 xfs_filblks_t got_indlen, new_indlen, stolen;
4853 int state = xfs_bmap_fork_to_state(whichfork);
4857 XFS_STATS_INC(mp, xs_del_exlist);
4859 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
4860 del_endoff = del->br_startoff + del->br_blockcount;
4861 got_endoff = got->br_startoff + got->br_blockcount;
4862 da_old = startblockval(got->br_startblock);
4865 ASSERT(del->br_blockcount > 0);
4866 ASSERT(got->br_startoff <= del->br_startoff);
4867 ASSERT(got_endoff >= del_endoff);
4870 uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);
4872 do_div(rtexts, mp->m_sb.sb_rextsize);
4873 xfs_mod_frextents(mp, rtexts);
4877 * Update the inode delalloc counter now and wait to update the
4878 * sb counters as we might have to borrow some blocks for the
4879 * indirect block accounting.
4882 error = xfs_quota_unreserve_blkres(ip, del->br_blockcount);
4885 ip->i_delayed_blks -= del->br_blockcount;
4887 if (got->br_startoff == del->br_startoff)
4888 state |= BMAP_LEFT_FILLING;
4889 if (got_endoff == del_endoff)
4890 state |= BMAP_RIGHT_FILLING;
4892 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4893 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4895 * Matches the whole extent. Delete the entry.
4897 xfs_iext_remove(ip, icur, state);
4898 xfs_iext_prev(ifp, icur);
4900 case BMAP_LEFT_FILLING:
4902 * Deleting the first part of the extent.
4904 got->br_startoff = del_endoff;
4905 got->br_blockcount -= del->br_blockcount;
4906 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4907 got->br_blockcount), da_old);
4908 got->br_startblock = nullstartblock((int)da_new);
4909 xfs_iext_update_extent(ip, state, icur, got);
4911 case BMAP_RIGHT_FILLING:
4913 * Deleting the last part of the extent.
4915 got->br_blockcount = got->br_blockcount - del->br_blockcount;
4916 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4917 got->br_blockcount), da_old);
4918 got->br_startblock = nullstartblock((int)da_new);
4919 xfs_iext_update_extent(ip, state, icur, got);
4923 * Deleting the middle of the extent.
4925 * Distribute the original indlen reservation across the two new
4926 * extents. Steal blocks from the deleted extent if necessary.
4927 * Stealing blocks simply fudges the fdblocks accounting below.
4928 * Warn if either of the new indlen reservations is zero as this
4929 * can lead to delalloc problems.
4931 got->br_blockcount = del->br_startoff - got->br_startoff;
4932 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
4934 new.br_blockcount = got_endoff - del_endoff;
4935 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
4937 WARN_ON_ONCE(!got_indlen || !new_indlen);
4938 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
4939 del->br_blockcount);
4941 got->br_startblock = nullstartblock((int)got_indlen);
4943 new.br_startoff = del_endoff;
4944 new.br_state = got->br_state;
4945 new.br_startblock = nullstartblock((int)new_indlen);
4947 xfs_iext_update_extent(ip, state, icur, got);
4948 xfs_iext_next(ifp, icur);
4949 xfs_iext_insert(ip, icur, &new, state);
4951 da_new = got_indlen + new_indlen - stolen;
4952 del->br_blockcount -= stolen;
4956 ASSERT(da_old >= da_new);
4957 da_diff = da_old - da_new;
4959 da_diff += del->br_blockcount;
4961 xfs_mod_fdblocks(mp, da_diff, false);
4962 xfs_mod_delalloc(mp, -da_diff);
4968 xfs_bmap_del_extent_cow(
4969 struct xfs_inode *ip,
4970 struct xfs_iext_cursor *icur,
4971 struct xfs_bmbt_irec *got,
4972 struct xfs_bmbt_irec *del)
4974 struct xfs_mount *mp = ip->i_mount;
4975 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
4976 struct xfs_bmbt_irec new;
4977 xfs_fileoff_t del_endoff, got_endoff;
4978 int state = BMAP_COWFORK;
4980 XFS_STATS_INC(mp, xs_del_exlist);
4982 del_endoff = del->br_startoff + del->br_blockcount;
4983 got_endoff = got->br_startoff + got->br_blockcount;
4985 ASSERT(del->br_blockcount > 0);
4986 ASSERT(got->br_startoff <= del->br_startoff);
4987 ASSERT(got_endoff >= del_endoff);
4988 ASSERT(!isnullstartblock(got->br_startblock));
4990 if (got->br_startoff == del->br_startoff)
4991 state |= BMAP_LEFT_FILLING;
4992 if (got_endoff == del_endoff)
4993 state |= BMAP_RIGHT_FILLING;
4995 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4996 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4998 * Matches the whole extent. Delete the entry.
5000 xfs_iext_remove(ip, icur, state);
5001 xfs_iext_prev(ifp, icur);
5003 case BMAP_LEFT_FILLING:
5005 * Deleting the first part of the extent.
5007 got->br_startoff = del_endoff;
5008 got->br_blockcount -= del->br_blockcount;
5009 got->br_startblock = del->br_startblock + del->br_blockcount;
5010 xfs_iext_update_extent(ip, state, icur, got);
5012 case BMAP_RIGHT_FILLING:
5014 * Deleting the last part of the extent.
5016 got->br_blockcount -= del->br_blockcount;
5017 xfs_iext_update_extent(ip, state, icur, got);
5021 * Deleting the middle of the extent.
5023 got->br_blockcount = del->br_startoff - got->br_startoff;
5025 new.br_startoff = del_endoff;
5026 new.br_blockcount = got_endoff - del_endoff;
5027 new.br_state = got->br_state;
5028 new.br_startblock = del->br_startblock + del->br_blockcount;
5030 xfs_iext_update_extent(ip, state, icur, got);
5031 xfs_iext_next(ifp, icur);
5032 xfs_iext_insert(ip, icur, &new, state);
5035 ip->i_delayed_blks -= del->br_blockcount;
5039 * Called by xfs_bmapi to update file extent records and the btree
5040 * after removing space.
5042 STATIC int /* error */
5043 xfs_bmap_del_extent_real(
5044 xfs_inode_t *ip, /* incore inode pointer */
5045 xfs_trans_t *tp, /* current transaction pointer */
5046 struct xfs_iext_cursor *icur,
5047 xfs_btree_cur_t *cur, /* if null, not a btree */
5048 xfs_bmbt_irec_t *del, /* data to remove from extents */
5049 int *logflagsp, /* inode logging flags */
5050 int whichfork, /* data or attr fork */
5051 int bflags) /* bmapi flags */
5053 xfs_fsblock_t del_endblock=0; /* first block past del */
5054 xfs_fileoff_t del_endoff; /* first offset past del */
5055 int do_fx; /* free extent at end of routine */
5056 int error; /* error return value */
5057 int flags = 0;/* inode logging flags */
5058 struct xfs_bmbt_irec got; /* current extent entry */
5059 xfs_fileoff_t got_endoff; /* first offset past got */
5060 int i; /* temp state */
5061 struct xfs_ifork *ifp; /* inode fork pointer */
5062 xfs_mount_t *mp; /* mount structure */
5063 xfs_filblks_t nblks; /* quota/sb block count */
5064 xfs_bmbt_irec_t new; /* new record to be inserted */
5066 uint qfield; /* quota field to update */
5067 int state = xfs_bmap_fork_to_state(whichfork);
5068 struct xfs_bmbt_irec old;
5071 XFS_STATS_INC(mp, xs_del_exlist);
5073 ifp = XFS_IFORK_PTR(ip, whichfork);
5074 ASSERT(del->br_blockcount > 0);
5075 xfs_iext_get_extent(ifp, icur, &got);
5076 ASSERT(got.br_startoff <= del->br_startoff);
5077 del_endoff = del->br_startoff + del->br_blockcount;
5078 got_endoff = got.br_startoff + got.br_blockcount;
5079 ASSERT(got_endoff >= del_endoff);
5080 ASSERT(!isnullstartblock(got.br_startblock));
5085 * If it's the case where the directory code is running with no block
5086 * reservation, and the deleted block is in the middle of its extent,
5087 * and the resulting insert of an extent would cause transformation to
5088 * btree format, then reject it. The calling code will then swap blocks
5089 * around instead. We have to do this now, rather than waiting for the
5090 * conversion to btree format, since the transaction will be dirty then.
5092 if (tp->t_blk_res == 0 &&
5093 ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
5094 ifp->if_nextents >= XFS_IFORK_MAXEXT(ip, whichfork) &&
5095 del->br_startoff > got.br_startoff && del_endoff < got_endoff)
5098 flags = XFS_ILOG_CORE;
5099 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
5103 len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize,
5107 if (!(bflags & XFS_BMAPI_REMAP)) {
5110 bno = div_u64_rem(del->br_startblock,
5111 mp->m_sb.sb_rextsize, &mod);
5114 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
5120 nblks = len * mp->m_sb.sb_rextsize;
5121 qfield = XFS_TRANS_DQ_RTBCOUNT;
5124 nblks = del->br_blockcount;
5125 qfield = XFS_TRANS_DQ_BCOUNT;
5128 del_endblock = del->br_startblock + del->br_blockcount;
5130 error = xfs_bmbt_lookup_eq(cur, &got, &i);
5133 if (XFS_IS_CORRUPT(mp, i != 1)) {
5134 error = -EFSCORRUPTED;
5139 if (got.br_startoff == del->br_startoff)
5140 state |= BMAP_LEFT_FILLING;
5141 if (got_endoff == del_endoff)
5142 state |= BMAP_RIGHT_FILLING;
5144 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
5145 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
5147 * Matches the whole extent. Delete the entry.
5149 xfs_iext_remove(ip, icur, state);
5150 xfs_iext_prev(ifp, icur);
5153 flags |= XFS_ILOG_CORE;
5155 flags |= xfs_ilog_fext(whichfork);
5158 if ((error = xfs_btree_delete(cur, &i)))
5160 if (XFS_IS_CORRUPT(mp, i != 1)) {
5161 error = -EFSCORRUPTED;
5165 case BMAP_LEFT_FILLING:
5167 * Deleting the first part of the extent.
5169 got.br_startoff = del_endoff;
5170 got.br_startblock = del_endblock;
5171 got.br_blockcount -= del->br_blockcount;
5172 xfs_iext_update_extent(ip, state, icur, &got);
5174 flags |= xfs_ilog_fext(whichfork);
5177 error = xfs_bmbt_update(cur, &got);
5181 case BMAP_RIGHT_FILLING:
5183 * Deleting the last part of the extent.
5185 got.br_blockcount -= del->br_blockcount;
5186 xfs_iext_update_extent(ip, state, icur, &got);
5188 flags |= xfs_ilog_fext(whichfork);
5191 error = xfs_bmbt_update(cur, &got);
5197 * Deleting the middle of the extent.
5201 * For directories, -ENOSPC is returned since a directory entry
5202 * remove operation must not fail due to low extent count
5203 * availability. -ENOSPC will be handled by higher layers of XFS
5204 * by letting the corresponding empty Data/Free blocks to linger
5205 * until a future remove operation. Dabtree blocks would be
5206 * swapped with the last block in the leaf space and then the
5207 * new last block will be unmapped.
5209 * The above logic also applies to the source directory entry of
5210 * a rename operation.
5212 error = xfs_iext_count_may_overflow(ip, whichfork, 1);
5214 ASSERT(S_ISDIR(VFS_I(ip)->i_mode) &&
5215 whichfork == XFS_DATA_FORK);
5222 got.br_blockcount = del->br_startoff - got.br_startoff;
5223 xfs_iext_update_extent(ip, state, icur, &got);
5225 new.br_startoff = del_endoff;
5226 new.br_blockcount = got_endoff - del_endoff;
5227 new.br_state = got.br_state;
5228 new.br_startblock = del_endblock;
5230 flags |= XFS_ILOG_CORE;
5232 error = xfs_bmbt_update(cur, &got);
5235 error = xfs_btree_increment(cur, 0, &i);
5238 cur->bc_rec.b = new;
5239 error = xfs_btree_insert(cur, &i);
5240 if (error && error != -ENOSPC)
5243 * If get no-space back from btree insert, it tried a
5244 * split, and we have a zero block reservation. Fix up
5245 * our state and return the error.
5247 if (error == -ENOSPC) {
5249 * Reset the cursor, don't trust it after any
5252 error = xfs_bmbt_lookup_eq(cur, &got, &i);
5255 if (XFS_IS_CORRUPT(mp, i != 1)) {
5256 error = -EFSCORRUPTED;
5260 * Update the btree record back
5261 * to the original value.
5263 error = xfs_bmbt_update(cur, &old);
5267 * Reset the extent record back
5268 * to the original value.
5270 xfs_iext_update_extent(ip, state, icur, &old);
5275 if (XFS_IS_CORRUPT(mp, i != 1)) {
5276 error = -EFSCORRUPTED;
5280 flags |= xfs_ilog_fext(whichfork);
5283 xfs_iext_next(ifp, icur);
5284 xfs_iext_insert(ip, icur, &new, state);
5288 /* remove reverse mapping */
5289 xfs_rmap_unmap_extent(tp, ip, whichfork, del);
5292 * If we need to, add to list of extents to delete.
5294 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) {
5295 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
5296 xfs_refcount_decrease_extent(tp, del);
5298 __xfs_bmap_add_free(tp, del->br_startblock,
5299 del->br_blockcount, NULL,
5300 (bflags & XFS_BMAPI_NODISCARD) ||
5301 del->br_state == XFS_EXT_UNWRITTEN);
5306 * Adjust inode # blocks in the file.
5309 ip->i_nblocks -= nblks;
5311 * Adjust quota data.
5313 if (qfield && !(bflags & XFS_BMAPI_REMAP))
5314 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5322 * Unmap (remove) blocks from a file.
5323 * If nexts is nonzero then the number of extents to remove is limited to
5324 * that value. If not all extents in the block range can be removed then
5329 struct xfs_trans *tp, /* transaction pointer */
5330 struct xfs_inode *ip, /* incore inode */
5331 xfs_fileoff_t start, /* first file offset deleted */
5332 xfs_filblks_t *rlen, /* i/o: amount remaining */
5333 int flags, /* misc flags */
5334 xfs_extnum_t nexts) /* number of extents max */
5336 struct xfs_btree_cur *cur; /* bmap btree cursor */
5337 struct xfs_bmbt_irec del; /* extent being deleted */
5338 int error; /* error return value */
5339 xfs_extnum_t extno; /* extent number in list */
5340 struct xfs_bmbt_irec got; /* current extent record */
5341 struct xfs_ifork *ifp; /* inode fork pointer */
5342 int isrt; /* freeing in rt area */
5343 int logflags; /* transaction logging flags */
5344 xfs_extlen_t mod; /* rt extent offset */
5345 struct xfs_mount *mp = ip->i_mount;
5346 int tmp_logflags; /* partial logging flags */
5347 int wasdel; /* was a delayed alloc extent */
5348 int whichfork; /* data or attribute fork */
5350 xfs_filblks_t len = *rlen; /* length to unmap in file */
5351 xfs_fileoff_t max_len;
5353 struct xfs_iext_cursor icur;
5356 trace_xfs_bunmap(ip, start, len, flags, _RET_IP_);
5358 whichfork = xfs_bmapi_whichfork(flags);
5359 ASSERT(whichfork != XFS_COW_FORK);
5360 ifp = XFS_IFORK_PTR(ip, whichfork);
5361 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)))
5362 return -EFSCORRUPTED;
5363 if (XFS_FORCED_SHUTDOWN(mp))
5366 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5371 * Guesstimate how many blocks we can unmap without running the risk of
5372 * blowing out the transaction with a mix of EFIs and reflink
5375 if (tp && xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK)
5376 max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res));
5380 error = xfs_iread_extents(tp, ip, whichfork);
5384 if (xfs_iext_count(ifp) == 0) {
5388 XFS_STATS_INC(mp, xs_blk_unmap);
5389 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5392 if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) {
5399 if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
5400 ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
5401 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5402 cur->bc_ino.flags = 0;
5408 * Synchronize by locking the bitmap inode.
5410 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
5411 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
5412 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
5413 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
5417 while (end != (xfs_fileoff_t)-1 && end >= start &&
5418 (nexts == 0 || extno < nexts) && max_len > 0) {
5420 * Is the found extent after a hole in which end lives?
5421 * Just back up to the previous extent, if so.
5423 if (got.br_startoff > end &&
5424 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5429 * Is the last block of this extent before the range
5430 * we're supposed to delete? If so, we're done.
5432 end = XFS_FILEOFF_MIN(end,
5433 got.br_startoff + got.br_blockcount - 1);
5437 * Then deal with the (possibly delayed) allocated space
5441 wasdel = isnullstartblock(del.br_startblock);
5443 if (got.br_startoff < start) {
5444 del.br_startoff = start;
5445 del.br_blockcount -= start - got.br_startoff;
5447 del.br_startblock += start - got.br_startoff;
5449 if (del.br_startoff + del.br_blockcount > end + 1)
5450 del.br_blockcount = end + 1 - del.br_startoff;
5452 /* How much can we safely unmap? */
5453 if (max_len < del.br_blockcount) {
5454 del.br_startoff += del.br_blockcount - max_len;
5456 del.br_startblock += del.br_blockcount - max_len;
5457 del.br_blockcount = max_len;
5463 sum = del.br_startblock + del.br_blockcount;
5464 div_u64_rem(sum, mp->m_sb.sb_rextsize, &mod);
5467 * Realtime extent not lined up at the end.
5468 * The extent could have been split into written
5469 * and unwritten pieces, or we could just be
5470 * unmapping part of it. But we can't really
5471 * get rid of part of a realtime extent.
5473 if (del.br_state == XFS_EXT_UNWRITTEN) {
5475 * This piece is unwritten, or we're not
5476 * using unwritten extents. Skip over it.
5479 end -= mod > del.br_blockcount ?
5480 del.br_blockcount : mod;
5481 if (end < got.br_startoff &&
5482 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5489 * It's written, turn it unwritten.
5490 * This is better than zeroing it.
5492 ASSERT(del.br_state == XFS_EXT_NORM);
5493 ASSERT(tp->t_blk_res > 0);
5495 * If this spans a realtime extent boundary,
5496 * chop it back to the start of the one we end at.
5498 if (del.br_blockcount > mod) {
5499 del.br_startoff += del.br_blockcount - mod;
5500 del.br_startblock += del.br_blockcount - mod;
5501 del.br_blockcount = mod;
5503 del.br_state = XFS_EXT_UNWRITTEN;
5504 error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5505 whichfork, &icur, &cur, &del,
5511 div_u64_rem(del.br_startblock, mp->m_sb.sb_rextsize, &mod);
5513 xfs_extlen_t off = mp->m_sb.sb_rextsize - mod;
5516 * Realtime extent is lined up at the end but not
5517 * at the front. We'll get rid of full extents if
5520 if (del.br_blockcount > off) {
5521 del.br_blockcount -= off;
5522 del.br_startoff += off;
5523 del.br_startblock += off;
5524 } else if (del.br_startoff == start &&
5525 (del.br_state == XFS_EXT_UNWRITTEN ||
5526 tp->t_blk_res == 0)) {
5528 * Can't make it unwritten. There isn't
5529 * a full extent here so just skip it.
5531 ASSERT(end >= del.br_blockcount);
5532 end -= del.br_blockcount;
5533 if (got.br_startoff > end &&
5534 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5539 } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5540 struct xfs_bmbt_irec prev;
5541 xfs_fileoff_t unwrite_start;
5544 * This one is already unwritten.
5545 * It must have a written left neighbor.
5546 * Unwrite the killed part of that one and
5549 if (!xfs_iext_prev_extent(ifp, &icur, &prev))
5551 ASSERT(prev.br_state == XFS_EXT_NORM);
5552 ASSERT(!isnullstartblock(prev.br_startblock));
5553 ASSERT(del.br_startblock ==
5554 prev.br_startblock + prev.br_blockcount);
5555 unwrite_start = max3(start,
5556 del.br_startoff - mod,
5558 mod = unwrite_start - prev.br_startoff;
5559 prev.br_startoff = unwrite_start;
5560 prev.br_startblock += mod;
5561 prev.br_blockcount -= mod;
5562 prev.br_state = XFS_EXT_UNWRITTEN;
5563 error = xfs_bmap_add_extent_unwritten_real(tp,
5564 ip, whichfork, &icur, &cur,
5570 ASSERT(del.br_state == XFS_EXT_NORM);
5571 del.br_state = XFS_EXT_UNWRITTEN;
5572 error = xfs_bmap_add_extent_unwritten_real(tp,
5573 ip, whichfork, &icur, &cur,
5583 error = xfs_bmap_del_extent_delay(ip, whichfork, &icur,
5586 error = xfs_bmap_del_extent_real(ip, tp, &icur, cur,
5587 &del, &tmp_logflags, whichfork,
5589 logflags |= tmp_logflags;
5595 max_len -= del.br_blockcount;
5596 end = del.br_startoff - 1;
5599 * If not done go on to the next (previous) record.
5601 if (end != (xfs_fileoff_t)-1 && end >= start) {
5602 if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5603 (got.br_startoff > end &&
5604 !xfs_iext_prev_extent(ifp, &icur, &got))) {
5611 if (done || end == (xfs_fileoff_t)-1 || end < start)
5614 *rlen = end - start + 1;
5617 * Convert to a btree if necessary.
5619 if (xfs_bmap_needs_btree(ip, whichfork)) {
5620 ASSERT(cur == NULL);
5621 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
5622 &tmp_logflags, whichfork);
5623 logflags |= tmp_logflags;
5625 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags,
5631 * Log everything. Do this after conversion, there's no point in
5632 * logging the extent records if we've converted to btree format.
5634 if ((logflags & xfs_ilog_fext(whichfork)) &&
5635 ifp->if_format != XFS_DINODE_FMT_EXTENTS)
5636 logflags &= ~xfs_ilog_fext(whichfork);
5637 else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5638 ifp->if_format != XFS_DINODE_FMT_BTREE)
5639 logflags &= ~xfs_ilog_fbroot(whichfork);
5641 * Log inode even in the error case, if the transaction
5642 * is dirty we'll need to shut down the filesystem.
5645 xfs_trans_log_inode(tp, ip, logflags);
5648 cur->bc_ino.allocated = 0;
5649 xfs_btree_del_cursor(cur, error);
5654 /* Unmap a range of a file. */
5658 struct xfs_inode *ip,
5667 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts);
5673 * Determine whether an extent shift can be accomplished by a merge with the
5674 * extent that precedes the target hole of the shift.
5678 struct xfs_bmbt_irec *left, /* preceding extent */
5679 struct xfs_bmbt_irec *got, /* current extent to shift */
5680 xfs_fileoff_t shift) /* shift fsb */
5682 xfs_fileoff_t startoff;
5684 startoff = got->br_startoff - shift;
5687 * The extent, once shifted, must be adjacent in-file and on-disk with
5688 * the preceding extent.
5690 if ((left->br_startoff + left->br_blockcount != startoff) ||
5691 (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5692 (left->br_state != got->br_state) ||
5693 (left->br_blockcount + got->br_blockcount > MAXEXTLEN))
5700 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5701 * hole in the file. If an extent shift would result in the extent being fully
5702 * adjacent to the extent that currently precedes the hole, we can merge with
5703 * the preceding extent rather than do the shift.
5705 * This function assumes the caller has verified a shift-by-merge is possible
5706 * with the provided extents via xfs_bmse_can_merge().
5710 struct xfs_trans *tp,
5711 struct xfs_inode *ip,
5713 xfs_fileoff_t shift, /* shift fsb */
5714 struct xfs_iext_cursor *icur,
5715 struct xfs_bmbt_irec *got, /* extent to shift */
5716 struct xfs_bmbt_irec *left, /* preceding extent */
5717 struct xfs_btree_cur *cur,
5718 int *logflags) /* output */
5720 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
5721 struct xfs_bmbt_irec new;
5722 xfs_filblks_t blockcount;
5724 struct xfs_mount *mp = ip->i_mount;
5726 blockcount = left->br_blockcount + got->br_blockcount;
5728 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5729 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5730 ASSERT(xfs_bmse_can_merge(left, got, shift));
5733 new.br_blockcount = blockcount;
5736 * Update the on-disk extent count, the btree if necessary and log the
5740 *logflags |= XFS_ILOG_CORE;
5742 *logflags |= XFS_ILOG_DEXT;
5746 /* lookup and remove the extent to merge */
5747 error = xfs_bmbt_lookup_eq(cur, got, &i);
5750 if (XFS_IS_CORRUPT(mp, i != 1))
5751 return -EFSCORRUPTED;
5753 error = xfs_btree_delete(cur, &i);
5756 if (XFS_IS_CORRUPT(mp, i != 1))
5757 return -EFSCORRUPTED;
5759 /* lookup and update size of the previous extent */
5760 error = xfs_bmbt_lookup_eq(cur, left, &i);
5763 if (XFS_IS_CORRUPT(mp, i != 1))
5764 return -EFSCORRUPTED;
5766 error = xfs_bmbt_update(cur, &new);
5770 /* change to extent format if required after extent removal */
5771 error = xfs_bmap_btree_to_extents(tp, ip, cur, logflags, whichfork);
5776 xfs_iext_remove(ip, icur, 0);
5777 xfs_iext_prev(ifp, icur);
5778 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5781 /* update reverse mapping. rmap functions merge the rmaps for us */
5782 xfs_rmap_unmap_extent(tp, ip, whichfork, got);
5783 memcpy(&new, got, sizeof(new));
5784 new.br_startoff = left->br_startoff + left->br_blockcount;
5785 xfs_rmap_map_extent(tp, ip, whichfork, &new);
5790 xfs_bmap_shift_update_extent(
5791 struct xfs_trans *tp,
5792 struct xfs_inode *ip,
5794 struct xfs_iext_cursor *icur,
5795 struct xfs_bmbt_irec *got,
5796 struct xfs_btree_cur *cur,
5798 xfs_fileoff_t startoff)
5800 struct xfs_mount *mp = ip->i_mount;
5801 struct xfs_bmbt_irec prev = *got;
5804 *logflags |= XFS_ILOG_CORE;
5806 got->br_startoff = startoff;
5809 error = xfs_bmbt_lookup_eq(cur, &prev, &i);
5812 if (XFS_IS_CORRUPT(mp, i != 1))
5813 return -EFSCORRUPTED;
5815 error = xfs_bmbt_update(cur, got);
5819 *logflags |= XFS_ILOG_DEXT;
5822 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5825 /* update reverse mapping */
5826 xfs_rmap_unmap_extent(tp, ip, whichfork, &prev);
5827 xfs_rmap_map_extent(tp, ip, whichfork, got);
5832 xfs_bmap_collapse_extents(
5833 struct xfs_trans *tp,
5834 struct xfs_inode *ip,
5835 xfs_fileoff_t *next_fsb,
5836 xfs_fileoff_t offset_shift_fsb,
5839 int whichfork = XFS_DATA_FORK;
5840 struct xfs_mount *mp = ip->i_mount;
5841 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
5842 struct xfs_btree_cur *cur = NULL;
5843 struct xfs_bmbt_irec got, prev;
5844 struct xfs_iext_cursor icur;
5845 xfs_fileoff_t new_startoff;
5849 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5850 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
5851 return -EFSCORRUPTED;
5854 if (XFS_FORCED_SHUTDOWN(mp))
5857 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
5859 error = xfs_iread_extents(tp, ip, whichfork);
5863 if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
5864 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5865 cur->bc_ino.flags = 0;
5868 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5872 if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
5873 error = -EFSCORRUPTED;
5877 new_startoff = got.br_startoff - offset_shift_fsb;
5878 if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
5879 if (new_startoff < prev.br_startoff + prev.br_blockcount) {
5884 if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) {
5885 error = xfs_bmse_merge(tp, ip, whichfork,
5886 offset_shift_fsb, &icur, &got, &prev,
5893 if (got.br_startoff < offset_shift_fsb) {
5899 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
5900 cur, &logflags, new_startoff);
5905 if (!xfs_iext_next_extent(ifp, &icur, &got)) {
5910 *next_fsb = got.br_startoff;
5913 xfs_btree_del_cursor(cur, error);
5915 xfs_trans_log_inode(tp, ip, logflags);
5919 /* Make sure we won't be right-shifting an extent past the maximum bound. */
5921 xfs_bmap_can_insert_extents(
5922 struct xfs_inode *ip,
5924 xfs_fileoff_t shift)
5926 struct xfs_bmbt_irec got;
5930 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5932 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
5935 xfs_ilock(ip, XFS_ILOCK_EXCL);
5936 error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty);
5937 if (!error && !is_empty && got.br_startoff >= off &&
5938 ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff)
5940 xfs_iunlock(ip, XFS_ILOCK_EXCL);
5946 xfs_bmap_insert_extents(
5947 struct xfs_trans *tp,
5948 struct xfs_inode *ip,
5949 xfs_fileoff_t *next_fsb,
5950 xfs_fileoff_t offset_shift_fsb,
5952 xfs_fileoff_t stop_fsb)
5954 int whichfork = XFS_DATA_FORK;
5955 struct xfs_mount *mp = ip->i_mount;
5956 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
5957 struct xfs_btree_cur *cur = NULL;
5958 struct xfs_bmbt_irec got, next;
5959 struct xfs_iext_cursor icur;
5960 xfs_fileoff_t new_startoff;
5964 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5965 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
5966 return -EFSCORRUPTED;
5969 if (XFS_FORCED_SHUTDOWN(mp))
5972 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
5974 error = xfs_iread_extents(tp, ip, whichfork);
5978 if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
5979 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5980 cur->bc_ino.flags = 0;
5983 if (*next_fsb == NULLFSBLOCK) {
5984 xfs_iext_last(ifp, &icur);
5985 if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5986 stop_fsb > got.br_startoff) {
5991 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5996 if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
5997 error = -EFSCORRUPTED;
6001 if (XFS_IS_CORRUPT(mp, stop_fsb > got.br_startoff)) {
6002 error = -EFSCORRUPTED;
6006 new_startoff = got.br_startoff + offset_shift_fsb;
6007 if (xfs_iext_peek_next_extent(ifp, &icur, &next)) {
6008 if (new_startoff + got.br_blockcount > next.br_startoff) {
6014 * Unlike a left shift (which involves a hole punch), a right
6015 * shift does not modify extent neighbors in any way. We should
6016 * never find mergeable extents in this scenario. Check anyways
6017 * and warn if we encounter two extents that could be one.
6019 if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb))
6023 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
6024 cur, &logflags, new_startoff);
6028 if (!xfs_iext_prev_extent(ifp, &icur, &got) ||
6029 stop_fsb >= got.br_startoff + got.br_blockcount) {
6034 *next_fsb = got.br_startoff;
6037 xfs_btree_del_cursor(cur, error);
6039 xfs_trans_log_inode(tp, ip, logflags);
6044 * Splits an extent into two extents at split_fsb block such that it is the
6045 * first block of the current_ext. @ext is a target extent to be split.
6046 * @split_fsb is a block where the extents is split. If split_fsb lies in a
6047 * hole or the first block of extents, just return 0.
6050 xfs_bmap_split_extent(
6051 struct xfs_trans *tp,
6052 struct xfs_inode *ip,
6053 xfs_fileoff_t split_fsb)
6055 int whichfork = XFS_DATA_FORK;
6056 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
6057 struct xfs_btree_cur *cur = NULL;
6058 struct xfs_bmbt_irec got;
6059 struct xfs_bmbt_irec new; /* split extent */
6060 struct xfs_mount *mp = ip->i_mount;
6061 xfs_fsblock_t gotblkcnt; /* new block count for got */
6062 struct xfs_iext_cursor icur;
6067 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
6068 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
6069 return -EFSCORRUPTED;
6072 if (XFS_FORCED_SHUTDOWN(mp))
6075 /* Read in all the extents */
6076 error = xfs_iread_extents(tp, ip, whichfork);
6081 * If there are not extents, or split_fsb lies in a hole we are done.
6083 if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) ||
6084 got.br_startoff >= split_fsb)
6087 gotblkcnt = split_fsb - got.br_startoff;
6088 new.br_startoff = split_fsb;
6089 new.br_startblock = got.br_startblock + gotblkcnt;
6090 new.br_blockcount = got.br_blockcount - gotblkcnt;
6091 new.br_state = got.br_state;
6093 if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
6094 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6095 cur->bc_ino.flags = 0;
6096 error = xfs_bmbt_lookup_eq(cur, &got, &i);
6099 if (XFS_IS_CORRUPT(mp, i != 1)) {
6100 error = -EFSCORRUPTED;
6105 got.br_blockcount = gotblkcnt;
6106 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur,
6109 logflags = XFS_ILOG_CORE;
6111 error = xfs_bmbt_update(cur, &got);
6115 logflags |= XFS_ILOG_DEXT;
6117 /* Add new extent */
6118 xfs_iext_next(ifp, &icur);
6119 xfs_iext_insert(ip, &icur, &new, 0);
6123 error = xfs_bmbt_lookup_eq(cur, &new, &i);
6126 if (XFS_IS_CORRUPT(mp, i != 0)) {
6127 error = -EFSCORRUPTED;
6130 error = xfs_btree_insert(cur, &i);
6133 if (XFS_IS_CORRUPT(mp, i != 1)) {
6134 error = -EFSCORRUPTED;
6140 * Convert to a btree if necessary.
6142 if (xfs_bmap_needs_btree(ip, whichfork)) {
6143 int tmp_logflags; /* partial log flag return val */
6145 ASSERT(cur == NULL);
6146 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
6147 &tmp_logflags, whichfork);
6148 logflags |= tmp_logflags;
6153 cur->bc_ino.allocated = 0;
6154 xfs_btree_del_cursor(cur, error);
6158 xfs_trans_log_inode(tp, ip, logflags);
6162 /* Deferred mapping is only for real extents in the data fork. */
6164 xfs_bmap_is_update_needed(
6165 struct xfs_bmbt_irec *bmap)
6167 return bmap->br_startblock != HOLESTARTBLOCK &&
6168 bmap->br_startblock != DELAYSTARTBLOCK;
6171 /* Record a bmap intent. */
6174 struct xfs_trans *tp,
6175 enum xfs_bmap_intent_type type,
6176 struct xfs_inode *ip,
6178 struct xfs_bmbt_irec *bmap)
6180 struct xfs_bmap_intent *bi;
6182 trace_xfs_bmap_defer(tp->t_mountp,
6183 XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock),
6185 XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock),
6186 ip->i_ino, whichfork,
6188 bmap->br_blockcount,
6191 bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_NOFS);
6192 INIT_LIST_HEAD(&bi->bi_list);
6195 bi->bi_whichfork = whichfork;
6196 bi->bi_bmap = *bmap;
6198 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list);
6202 /* Map an extent into a file. */
6204 xfs_bmap_map_extent(
6205 struct xfs_trans *tp,
6206 struct xfs_inode *ip,
6207 struct xfs_bmbt_irec *PREV)
6209 if (!xfs_bmap_is_update_needed(PREV))
6212 __xfs_bmap_add(tp, XFS_BMAP_MAP, ip, XFS_DATA_FORK, PREV);
6215 /* Unmap an extent out of a file. */
6217 xfs_bmap_unmap_extent(
6218 struct xfs_trans *tp,
6219 struct xfs_inode *ip,
6220 struct xfs_bmbt_irec *PREV)
6222 if (!xfs_bmap_is_update_needed(PREV))
6225 __xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, XFS_DATA_FORK, PREV);
6229 * Process one of the deferred bmap operations. We pass back the
6230 * btree cursor to maintain our lock on the bmapbt between calls.
6233 xfs_bmap_finish_one(
6234 struct xfs_trans *tp,
6235 struct xfs_inode *ip,
6236 enum xfs_bmap_intent_type type,
6238 xfs_fileoff_t startoff,
6239 xfs_fsblock_t startblock,
6240 xfs_filblks_t *blockcount,
6245 ASSERT(tp->t_firstblock == NULLFSBLOCK);
6247 trace_xfs_bmap_deferred(tp->t_mountp,
6248 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type,
6249 XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
6250 ip->i_ino, whichfork, startoff, *blockcount, state);
6252 if (WARN_ON_ONCE(whichfork != XFS_DATA_FORK))
6253 return -EFSCORRUPTED;
6255 if (XFS_TEST_ERROR(false, tp->t_mountp,
6256 XFS_ERRTAG_BMAP_FINISH_ONE))
6261 error = xfs_bmapi_remap(tp, ip, startoff, *blockcount,
6265 case XFS_BMAP_UNMAP:
6266 error = __xfs_bunmapi(tp, ip, startoff, blockcount,
6267 XFS_BMAPI_REMAP, 1);
6271 error = -EFSCORRUPTED;
6277 /* Check that an inode's extent does not have invalid flags or bad ranges. */
6279 xfs_bmap_validate_extent(
6280 struct xfs_inode *ip,
6282 struct xfs_bmbt_irec *irec)
6284 struct xfs_mount *mp = ip->i_mount;
6286 if (!xfs_verify_fileext(mp, irec->br_startoff, irec->br_blockcount))
6287 return __this_address;
6289 if (XFS_IS_REALTIME_INODE(ip) && whichfork == XFS_DATA_FORK) {
6290 if (!xfs_verify_rtext(mp, irec->br_startblock,
6291 irec->br_blockcount))
6292 return __this_address;
6294 if (!xfs_verify_fsbext(mp, irec->br_startblock,
6295 irec->br_blockcount))
6296 return __this_address;
6298 if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK)
6299 return __this_address;