2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_da_format.h"
29 #include "xfs_da_btree.h"
31 #include "xfs_inode.h"
32 #include "xfs_btree.h"
33 #include "xfs_trans.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_extfree_item.h"
36 #include "xfs_alloc.h"
38 #include "xfs_bmap_util.h"
39 #include "xfs_bmap_btree.h"
40 #include "xfs_rtalloc.h"
41 #include "xfs_errortag.h"
42 #include "xfs_error.h"
43 #include "xfs_quota.h"
44 #include "xfs_trans_space.h"
45 #include "xfs_buf_item.h"
46 #include "xfs_trace.h"
47 #include "xfs_symlink.h"
48 #include "xfs_attr_leaf.h"
49 #include "xfs_filestream.h"
51 #include "xfs_ag_resv.h"
52 #include "xfs_refcount.h"
53 #include "xfs_icache.h"
56 kmem_zone_t *xfs_bmap_free_item_zone;
59 * Miscellaneous helper functions
63 * Compute and fill in the value of the maximum depth of a bmap btree
64 * in this filesystem. Done once, during mount.
67 xfs_bmap_compute_maxlevels(
68 xfs_mount_t *mp, /* file system mount structure */
69 int whichfork) /* data or attr fork */
71 int level; /* btree level */
72 uint maxblocks; /* max blocks at this level */
73 uint maxleafents; /* max leaf entries possible */
74 int maxrootrecs; /* max records in root block */
75 int minleafrecs; /* min records in leaf block */
76 int minnoderecs; /* min records in node block */
77 int sz; /* root block size */
80 * The maximum number of extents in a file, hence the maximum
81 * number of leaf entries, is controlled by the type of di_nextents
82 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
83 * (a signed 16-bit number, xfs_aextnum_t).
85 * Note that we can no longer assume that if we are in ATTR1 that
86 * the fork offset of all the inodes will be
87 * (xfs_default_attroffset(ip) >> 3) because we could have mounted
88 * with ATTR2 and then mounted back with ATTR1, keeping the
89 * di_forkoff's fixed but probably at various positions. Therefore,
90 * for both ATTR1 and ATTR2 we have to assume the worst case scenario
91 * of a minimum size available.
93 if (whichfork == XFS_DATA_FORK) {
94 maxleafents = MAXEXTNUM;
95 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
97 maxleafents = MAXAEXTNUM;
98 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
100 maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
101 minleafrecs = mp->m_bmap_dmnr[0];
102 minnoderecs = mp->m_bmap_dmnr[1];
103 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
104 for (level = 1; maxblocks > 1; level++) {
105 if (maxblocks <= maxrootrecs)
108 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
110 mp->m_bm_maxlevels[whichfork] = level;
113 STATIC int /* error */
115 struct xfs_btree_cur *cur,
116 struct xfs_bmbt_irec *irec,
117 int *stat) /* success/failure */
119 cur->bc_rec.b = *irec;
120 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
123 STATIC int /* error */
124 xfs_bmbt_lookup_first(
125 struct xfs_btree_cur *cur,
126 int *stat) /* success/failure */
128 cur->bc_rec.b.br_startoff = 0;
129 cur->bc_rec.b.br_startblock = 0;
130 cur->bc_rec.b.br_blockcount = 0;
131 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
135 * Check if the inode needs to be converted to btree format.
137 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
139 return whichfork != XFS_COW_FORK &&
140 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
141 XFS_IFORK_NEXTENTS(ip, whichfork) >
142 XFS_IFORK_MAXEXT(ip, whichfork);
146 * Check if the inode should be converted to extent format.
148 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
150 return whichfork != XFS_COW_FORK &&
151 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
152 XFS_IFORK_NEXTENTS(ip, whichfork) <=
153 XFS_IFORK_MAXEXT(ip, whichfork);
157 * Update the record referred to by cur to the value given by irec
158 * This either works (return 0) or gets an EFSCORRUPTED error.
162 struct xfs_btree_cur *cur,
163 struct xfs_bmbt_irec *irec)
165 union xfs_btree_rec rec;
167 xfs_bmbt_disk_set_all(&rec.bmbt, irec);
168 return xfs_btree_update(cur, &rec);
172 * Compute the worst-case number of indirect blocks that will be used
173 * for ip's delayed extent of length "len".
176 xfs_bmap_worst_indlen(
177 xfs_inode_t *ip, /* incore inode pointer */
178 xfs_filblks_t len) /* delayed extent length */
180 int level; /* btree level number */
181 int maxrecs; /* maximum record count at this level */
182 xfs_mount_t *mp; /* mount structure */
183 xfs_filblks_t rval; /* return value */
186 maxrecs = mp->m_bmap_dmxr[0];
187 for (level = 0, rval = 0;
188 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
191 do_div(len, maxrecs);
194 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
197 maxrecs = mp->m_bmap_dmxr[1];
203 * Calculate the default attribute fork offset for newly created inodes.
206 xfs_default_attroffset(
207 struct xfs_inode *ip)
209 struct xfs_mount *mp = ip->i_mount;
212 if (mp->m_sb.sb_inodesize == 256) {
213 offset = XFS_LITINO(mp, ip->i_d.di_version) -
214 XFS_BMDR_SPACE_CALC(MINABTPTRS);
216 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
219 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version));
224 * Helper routine to reset inode di_forkoff field when switching
225 * attribute fork from local to extent format - we reset it where
226 * possible to make space available for inline data fork extents.
229 xfs_bmap_forkoff_reset(
233 if (whichfork == XFS_ATTR_FORK &&
234 ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
235 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
236 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
238 if (dfl_forkoff > ip->i_d.di_forkoff)
239 ip->i_d.di_forkoff = dfl_forkoff;
244 STATIC struct xfs_buf *
246 struct xfs_btree_cur *cur,
249 struct xfs_log_item_desc *lidp;
255 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
256 if (!cur->bc_bufs[i])
258 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
259 return cur->bc_bufs[i];
262 /* Chase down all the log items to see if the bp is there */
263 list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) {
264 struct xfs_buf_log_item *bip;
265 bip = (struct xfs_buf_log_item *)lidp->lid_item;
266 if (bip->bli_item.li_type == XFS_LI_BUF &&
267 XFS_BUF_ADDR(bip->bli_buf) == bno)
276 struct xfs_btree_block *block,
282 __be64 *pp, *thispa; /* pointer to block address */
283 xfs_bmbt_key_t *prevp, *keyp;
285 ASSERT(be16_to_cpu(block->bb_level) > 0);
288 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
289 dmxr = mp->m_bmap_dmxr[0];
290 keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
293 ASSERT(be64_to_cpu(prevp->br_startoff) <
294 be64_to_cpu(keyp->br_startoff));
299 * Compare the block numbers to see if there are dups.
302 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
304 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
306 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
308 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
310 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
311 if (*thispa == *pp) {
312 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld",
314 (unsigned long long)be64_to_cpu(*thispa));
315 panic("%s: ptrs are equal in node\n",
323 * Check that the extents for the inode ip are in the right order in all
324 * btree leaves. THis becomes prohibitively expensive for large extent count
325 * files, so don't bother with inodes that have more than 10,000 extents in
326 * them. The btree record ordering checks will still be done, so for such large
327 * bmapbt constructs that is going to catch most corruptions.
330 xfs_bmap_check_leaf_extents(
331 xfs_btree_cur_t *cur, /* btree cursor or null */
332 xfs_inode_t *ip, /* incore inode pointer */
333 int whichfork) /* data or attr fork */
335 struct xfs_btree_block *block; /* current btree block */
336 xfs_fsblock_t bno; /* block # of "block" */
337 xfs_buf_t *bp; /* buffer for "block" */
338 int error; /* error return value */
339 xfs_extnum_t i=0, j; /* index into the extents list */
340 xfs_ifork_t *ifp; /* fork structure */
341 int level; /* btree level, for checking */
342 xfs_mount_t *mp; /* file system mount structure */
343 __be64 *pp; /* pointer to block address */
344 xfs_bmbt_rec_t *ep; /* pointer to current extent */
345 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
346 xfs_bmbt_rec_t *nextp; /* pointer to next extent */
349 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
353 /* skip large extent count inodes */
354 if (ip->i_d.di_nextents > 10000)
359 ifp = XFS_IFORK_PTR(ip, whichfork);
360 block = ifp->if_broot;
362 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
364 level = be16_to_cpu(block->bb_level);
366 xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
367 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
368 bno = be64_to_cpu(*pp);
370 ASSERT(bno != NULLFSBLOCK);
371 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
372 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
375 * Go down the tree until leaf level is reached, following the first
376 * pointer (leftmost) at each level.
378 while (level-- > 0) {
379 /* See if buf is in cur first */
381 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
384 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
390 block = XFS_BUF_TO_BLOCK(bp);
395 * Check this block for basic sanity (increasing keys and
396 * no duplicate blocks).
399 xfs_check_block(block, mp, 0, 0);
400 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
401 bno = be64_to_cpu(*pp);
402 XFS_WANT_CORRUPTED_GOTO(mp,
403 xfs_verify_fsbno(mp, bno), error0);
406 xfs_trans_brelse(NULL, bp);
411 * Here with bp and block set to the leftmost leaf node in the tree.
416 * Loop over all leaf nodes checking that all extents are in the right order.
419 xfs_fsblock_t nextbno;
420 xfs_extnum_t num_recs;
423 num_recs = xfs_btree_get_numrecs(block);
426 * Read-ahead the next leaf block, if any.
429 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
432 * Check all the extents to make sure they are OK.
433 * If we had a previous block, the last entry should
434 * conform with the first entry in this one.
437 ep = XFS_BMBT_REC_ADDR(mp, block, 1);
439 ASSERT(xfs_bmbt_disk_get_startoff(&last) +
440 xfs_bmbt_disk_get_blockcount(&last) <=
441 xfs_bmbt_disk_get_startoff(ep));
443 for (j = 1; j < num_recs; j++) {
444 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
445 ASSERT(xfs_bmbt_disk_get_startoff(ep) +
446 xfs_bmbt_disk_get_blockcount(ep) <=
447 xfs_bmbt_disk_get_startoff(nextp));
455 xfs_trans_brelse(NULL, bp);
459 * If we've reached the end, stop.
461 if (bno == NULLFSBLOCK)
465 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
468 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
474 block = XFS_BUF_TO_BLOCK(bp);
480 xfs_warn(mp, "%s: at error0", __func__);
482 xfs_trans_brelse(NULL, bp);
484 xfs_warn(mp, "%s: BAD after btree leaves for %d extents",
486 panic("%s: CORRUPTED BTREE OR SOMETHING", __func__);
491 * Validate that the bmbt_irecs being returned from bmapi are valid
492 * given the caller's original parameters. Specifically check the
493 * ranges of the returned irecs to ensure that they only extend beyond
494 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
497 xfs_bmap_validate_ret(
501 xfs_bmbt_irec_t *mval,
505 int i; /* index to map values */
507 ASSERT(ret_nmap <= nmap);
509 for (i = 0; i < ret_nmap; i++) {
510 ASSERT(mval[i].br_blockcount > 0);
511 if (!(flags & XFS_BMAPI_ENTIRE)) {
512 ASSERT(mval[i].br_startoff >= bno);
513 ASSERT(mval[i].br_blockcount <= len);
514 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
517 ASSERT(mval[i].br_startoff < bno + len);
518 ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
522 mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
523 mval[i].br_startoff);
524 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
525 mval[i].br_startblock != HOLESTARTBLOCK);
526 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
527 mval[i].br_state == XFS_EXT_UNWRITTEN);
532 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
533 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
537 * bmap free list manipulation functions
541 * Add the extent to the list of extents to be free at transaction end.
542 * The list is maintained sorted (by block number).
546 struct xfs_mount *mp,
547 struct xfs_defer_ops *dfops,
550 struct xfs_owner_info *oinfo)
552 struct xfs_extent_free_item *new; /* new element */
557 ASSERT(bno != NULLFSBLOCK);
559 ASSERT(len <= MAXEXTLEN);
560 ASSERT(!isnullstartblock(bno));
561 agno = XFS_FSB_TO_AGNO(mp, bno);
562 agbno = XFS_FSB_TO_AGBNO(mp, bno);
563 ASSERT(agno < mp->m_sb.sb_agcount);
564 ASSERT(agbno < mp->m_sb.sb_agblocks);
565 ASSERT(len < mp->m_sb.sb_agblocks);
566 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
568 ASSERT(xfs_bmap_free_item_zone != NULL);
570 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
571 new->xefi_startblock = bno;
572 new->xefi_blockcount = (xfs_extlen_t)len;
574 new->xefi_oinfo = *oinfo;
576 xfs_rmap_skip_owner_update(&new->xefi_oinfo);
577 trace_xfs_bmap_free_defer(mp, XFS_FSB_TO_AGNO(mp, bno), 0,
578 XFS_FSB_TO_AGBNO(mp, bno), len);
579 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list);
583 * Inode fork format manipulation functions
587 * Transform a btree format file with only one leaf node, where the
588 * extents list will fit in the inode, into an extents format file.
589 * Since the file extents are already in-core, all we have to do is
590 * give up the space for the btree root and pitch the leaf block.
592 STATIC int /* error */
593 xfs_bmap_btree_to_extents(
594 xfs_trans_t *tp, /* transaction pointer */
595 xfs_inode_t *ip, /* incore inode pointer */
596 xfs_btree_cur_t *cur, /* btree cursor */
597 int *logflagsp, /* inode logging flags */
598 int whichfork) /* data or attr fork */
601 struct xfs_btree_block *cblock;/* child btree block */
602 xfs_fsblock_t cbno; /* child block number */
603 xfs_buf_t *cbp; /* child block's buffer */
604 int error; /* error return value */
605 xfs_ifork_t *ifp; /* inode fork data */
606 xfs_mount_t *mp; /* mount point structure */
607 __be64 *pp; /* ptr to block address */
608 struct xfs_btree_block *rblock;/* root btree block */
609 struct xfs_owner_info oinfo;
612 ifp = XFS_IFORK_PTR(ip, whichfork);
613 ASSERT(whichfork != XFS_COW_FORK);
614 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
615 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
616 rblock = ifp->if_broot;
617 ASSERT(be16_to_cpu(rblock->bb_level) == 1);
618 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
619 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
620 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
621 cbno = be64_to_cpu(*pp);
624 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp,
625 xfs_btree_check_lptr(cur, cbno, 1));
627 error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF,
631 cblock = XFS_BUF_TO_BLOCK(cbp);
632 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
634 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
635 xfs_bmap_add_free(mp, cur->bc_private.b.dfops, cbno, 1, &oinfo);
636 ip->i_d.di_nblocks--;
637 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
638 xfs_trans_binval(tp, cbp);
639 if (cur->bc_bufs[0] == cbp)
640 cur->bc_bufs[0] = NULL;
641 xfs_iroot_realloc(ip, -1, whichfork);
642 ASSERT(ifp->if_broot == NULL);
643 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
644 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
645 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
650 * Convert an extents-format file into a btree-format file.
651 * The new file will have a root block (in the inode) and a single child block.
653 STATIC int /* error */
654 xfs_bmap_extents_to_btree(
655 xfs_trans_t *tp, /* transaction pointer */
656 xfs_inode_t *ip, /* incore inode pointer */
657 xfs_fsblock_t *firstblock, /* first-block-allocated */
658 struct xfs_defer_ops *dfops, /* blocks freed in xaction */
659 xfs_btree_cur_t **curp, /* cursor returned to caller */
660 int wasdel, /* converting a delayed alloc */
661 int *logflagsp, /* inode logging flags */
662 int whichfork) /* data or attr fork */
664 struct xfs_btree_block *ablock; /* allocated (child) bt block */
665 xfs_buf_t *abp; /* buffer for ablock */
666 xfs_alloc_arg_t args; /* allocation arguments */
667 xfs_bmbt_rec_t *arp; /* child record pointer */
668 struct xfs_btree_block *block; /* btree root block */
669 xfs_btree_cur_t *cur; /* bmap btree cursor */
670 int error; /* error return value */
671 xfs_ifork_t *ifp; /* inode fork pointer */
672 xfs_bmbt_key_t *kp; /* root block key pointer */
673 xfs_mount_t *mp; /* mount structure */
674 xfs_bmbt_ptr_t *pp; /* root block address pointer */
675 struct xfs_iext_cursor icur;
676 struct xfs_bmbt_irec rec;
677 xfs_extnum_t cnt = 0;
680 ASSERT(whichfork != XFS_COW_FORK);
681 ifp = XFS_IFORK_PTR(ip, whichfork);
682 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
685 * Make space in the inode incore.
687 xfs_iroot_realloc(ip, 1, whichfork);
688 ifp->if_flags |= XFS_IFBROOT;
693 block = ifp->if_broot;
694 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
695 XFS_BTNUM_BMAP, 1, 1, ip->i_ino,
696 XFS_BTREE_LONG_PTRS);
698 * Need a cursor. Can't allocate until bb_level is filled in.
700 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
701 cur->bc_private.b.firstblock = *firstblock;
702 cur->bc_private.b.dfops = dfops;
703 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
705 * Convert to a btree with two levels, one record in root.
707 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
708 memset(&args, 0, sizeof(args));
711 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
712 args.firstblock = *firstblock;
713 if (*firstblock == NULLFSBLOCK) {
714 args.type = XFS_ALLOCTYPE_START_BNO;
715 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
716 } else if (dfops->dop_low) {
717 args.type = XFS_ALLOCTYPE_START_BNO;
718 args.fsbno = *firstblock;
720 args.type = XFS_ALLOCTYPE_NEAR_BNO;
721 args.fsbno = *firstblock;
723 args.minlen = args.maxlen = args.prod = 1;
724 args.wasdel = wasdel;
726 if ((error = xfs_alloc_vextent(&args))) {
727 xfs_iroot_realloc(ip, -1, whichfork);
728 ASSERT(ifp->if_broot == NULL);
729 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
730 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
734 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
735 xfs_iroot_realloc(ip, -1, whichfork);
736 ASSERT(ifp->if_broot == NULL);
737 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
738 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
742 * Allocation can't fail, the space was reserved.
744 ASSERT(*firstblock == NULLFSBLOCK ||
745 args.agno >= XFS_FSB_TO_AGNO(mp, *firstblock));
746 *firstblock = cur->bc_private.b.firstblock = args.fsbno;
747 cur->bc_private.b.allocated++;
748 ip->i_d.di_nblocks++;
749 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
750 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
752 * Fill in the child block.
754 abp->b_ops = &xfs_bmbt_buf_ops;
755 ablock = XFS_BUF_TO_BLOCK(abp);
756 xfs_btree_init_block_int(mp, ablock, abp->b_bn,
757 XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
758 XFS_BTREE_LONG_PTRS);
760 for_each_xfs_iext(ifp, &icur, &rec) {
761 if (isnullstartblock(rec.br_startblock))
763 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt);
764 xfs_bmbt_disk_set_all(arp, &rec);
767 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
768 xfs_btree_set_numrecs(ablock, cnt);
771 * Fill in the root key and pointer.
773 kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
774 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
775 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
776 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
777 be16_to_cpu(block->bb_level)));
778 *pp = cpu_to_be64(args.fsbno);
781 * Do all this logging at the end so that
782 * the root is at the right level.
784 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
785 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
786 ASSERT(*curp == NULL);
788 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
793 * Convert a local file to an extents file.
794 * This code is out of bounds for data forks of regular files,
795 * since the file data needs to get logged so things will stay consistent.
796 * (The bmap-level manipulations are ok, though).
799 xfs_bmap_local_to_extents_empty(
800 struct xfs_inode *ip,
803 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
805 ASSERT(whichfork != XFS_COW_FORK);
806 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
807 ASSERT(ifp->if_bytes == 0);
808 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
810 xfs_bmap_forkoff_reset(ip, whichfork);
811 ifp->if_flags &= ~XFS_IFINLINE;
812 ifp->if_flags |= XFS_IFEXTENTS;
813 ifp->if_u1.if_root = NULL;
815 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
819 STATIC int /* error */
820 xfs_bmap_local_to_extents(
821 xfs_trans_t *tp, /* transaction pointer */
822 xfs_inode_t *ip, /* incore inode pointer */
823 xfs_fsblock_t *firstblock, /* first block allocated in xaction */
824 xfs_extlen_t total, /* total blocks needed by transaction */
825 int *logflagsp, /* inode logging flags */
827 void (*init_fn)(struct xfs_trans *tp,
829 struct xfs_inode *ip,
830 struct xfs_ifork *ifp))
833 int flags; /* logging flags returned */
834 xfs_ifork_t *ifp; /* inode fork pointer */
835 xfs_alloc_arg_t args; /* allocation arguments */
836 xfs_buf_t *bp; /* buffer for extent block */
837 struct xfs_bmbt_irec rec;
838 struct xfs_iext_cursor icur;
841 * We don't want to deal with the case of keeping inode data inline yet.
842 * So sending the data fork of a regular inode is invalid.
844 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
845 ifp = XFS_IFORK_PTR(ip, whichfork);
846 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
848 if (!ifp->if_bytes) {
849 xfs_bmap_local_to_extents_empty(ip, whichfork);
850 flags = XFS_ILOG_CORE;
856 ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS)) == XFS_IFINLINE);
857 memset(&args, 0, sizeof(args));
859 args.mp = ip->i_mount;
860 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
861 args.firstblock = *firstblock;
863 * Allocate a block. We know we need only one, since the
864 * file currently fits in an inode.
866 if (*firstblock == NULLFSBLOCK) {
867 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
868 args.type = XFS_ALLOCTYPE_START_BNO;
870 args.fsbno = *firstblock;
871 args.type = XFS_ALLOCTYPE_NEAR_BNO;
874 args.minlen = args.maxlen = args.prod = 1;
875 error = xfs_alloc_vextent(&args);
879 /* Can't fail, the space was reserved. */
880 ASSERT(args.fsbno != NULLFSBLOCK);
881 ASSERT(args.len == 1);
882 *firstblock = args.fsbno;
883 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
886 * Initialize the block, copy the data and log the remote buffer.
888 * The callout is responsible for logging because the remote format
889 * might differ from the local format and thus we don't know how much to
890 * log here. Note that init_fn must also set the buffer log item type
893 init_fn(tp, bp, ip, ifp);
895 /* account for the change in fork size */
896 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
897 xfs_bmap_local_to_extents_empty(ip, whichfork);
898 flags |= XFS_ILOG_CORE;
900 ifp->if_u1.if_root = NULL;
904 rec.br_startblock = args.fsbno;
905 rec.br_blockcount = 1;
906 rec.br_state = XFS_EXT_NORM;
907 xfs_iext_first(ifp, &icur);
908 xfs_iext_insert(ip, &icur, &rec, 0);
910 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
911 ip->i_d.di_nblocks = 1;
912 xfs_trans_mod_dquot_byino(tp, ip,
913 XFS_TRANS_DQ_BCOUNT, 1L);
914 flags |= xfs_ilog_fext(whichfork);
922 * Called from xfs_bmap_add_attrfork to handle btree format files.
924 STATIC int /* error */
925 xfs_bmap_add_attrfork_btree(
926 xfs_trans_t *tp, /* transaction pointer */
927 xfs_inode_t *ip, /* incore inode pointer */
928 xfs_fsblock_t *firstblock, /* first block allocated */
929 struct xfs_defer_ops *dfops, /* blocks to free at commit */
930 int *flags) /* inode logging flags */
932 xfs_btree_cur_t *cur; /* btree cursor */
933 int error; /* error return value */
934 xfs_mount_t *mp; /* file system mount struct */
935 int stat; /* newroot status */
938 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
939 *flags |= XFS_ILOG_DBROOT;
941 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
942 cur->bc_private.b.dfops = dfops;
943 cur->bc_private.b.firstblock = *firstblock;
944 error = xfs_bmbt_lookup_first(cur, &stat);
947 /* must be at least one entry */
948 XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0);
949 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
952 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
955 *firstblock = cur->bc_private.b.firstblock;
956 cur->bc_private.b.allocated = 0;
957 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
961 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
966 * Called from xfs_bmap_add_attrfork to handle extents format files.
968 STATIC int /* error */
969 xfs_bmap_add_attrfork_extents(
970 xfs_trans_t *tp, /* transaction pointer */
971 xfs_inode_t *ip, /* incore inode pointer */
972 xfs_fsblock_t *firstblock, /* first block allocated */
973 struct xfs_defer_ops *dfops, /* blocks to free at commit */
974 int *flags) /* inode logging flags */
976 xfs_btree_cur_t *cur; /* bmap btree cursor */
977 int error; /* error return value */
979 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
982 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops, &cur, 0,
983 flags, XFS_DATA_FORK);
985 cur->bc_private.b.allocated = 0;
986 xfs_btree_del_cursor(cur,
987 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
993 * Called from xfs_bmap_add_attrfork to handle local format files. Each
994 * different data fork content type needs a different callout to do the
995 * conversion. Some are basic and only require special block initialisation
996 * callouts for the data formating, others (directories) are so specialised they
997 * handle everything themselves.
999 * XXX (dgc): investigate whether directory conversion can use the generic
1000 * formatting callout. It should be possible - it's just a very complex
1003 STATIC int /* error */
1004 xfs_bmap_add_attrfork_local(
1005 xfs_trans_t *tp, /* transaction pointer */
1006 xfs_inode_t *ip, /* incore inode pointer */
1007 xfs_fsblock_t *firstblock, /* first block allocated */
1008 struct xfs_defer_ops *dfops, /* blocks to free at commit */
1009 int *flags) /* inode logging flags */
1011 xfs_da_args_t dargs; /* args for dir/attr code */
1013 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
1016 if (S_ISDIR(VFS_I(ip)->i_mode)) {
1017 memset(&dargs, 0, sizeof(dargs));
1018 dargs.geo = ip->i_mount->m_dir_geo;
1020 dargs.firstblock = firstblock;
1021 dargs.dfops = dfops;
1022 dargs.total = dargs.geo->fsbcount;
1023 dargs.whichfork = XFS_DATA_FORK;
1025 return xfs_dir2_sf_to_block(&dargs);
1028 if (S_ISLNK(VFS_I(ip)->i_mode))
1029 return xfs_bmap_local_to_extents(tp, ip, firstblock, 1,
1030 flags, XFS_DATA_FORK,
1031 xfs_symlink_local_to_remote);
1033 /* should only be called for types that support local format data */
1035 return -EFSCORRUPTED;
1039 * Convert inode from non-attributed to attributed.
1040 * Must not be in a transaction, ip must not be locked.
1042 int /* error code */
1043 xfs_bmap_add_attrfork(
1044 xfs_inode_t *ip, /* incore inode pointer */
1045 int size, /* space new attribute needs */
1046 int rsvd) /* xact may use reserved blks */
1048 xfs_fsblock_t firstblock; /* 1st block/ag allocated */
1049 struct xfs_defer_ops dfops; /* freed extent records */
1050 xfs_mount_t *mp; /* mount structure */
1051 xfs_trans_t *tp; /* transaction pointer */
1052 int blks; /* space reservation */
1053 int version = 1; /* superblock attr version */
1054 int logflags; /* logging flags */
1055 int error; /* error return value */
1057 ASSERT(XFS_IFORK_Q(ip) == 0);
1060 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1062 blks = XFS_ADDAFORK_SPACE_RES(mp);
1064 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0,
1065 rsvd ? XFS_TRANS_RESERVE : 0, &tp);
1069 xfs_ilock(ip, XFS_ILOCK_EXCL);
1070 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
1071 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
1072 XFS_QMOPT_RES_REGBLKS);
1075 if (XFS_IFORK_Q(ip))
1077 if (ip->i_d.di_anextents != 0) {
1078 error = -EFSCORRUPTED;
1081 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
1083 * For inodes coming from pre-6.2 filesystems.
1085 ASSERT(ip->i_d.di_aformat == 0);
1086 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1089 xfs_trans_ijoin(tp, ip, 0);
1090 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1092 switch (ip->i_d.di_format) {
1093 case XFS_DINODE_FMT_DEV:
1094 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
1096 case XFS_DINODE_FMT_LOCAL:
1097 case XFS_DINODE_FMT_EXTENTS:
1098 case XFS_DINODE_FMT_BTREE:
1099 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
1100 if (!ip->i_d.di_forkoff)
1101 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
1102 else if (mp->m_flags & XFS_MOUNT_ATTR2)
1111 ASSERT(ip->i_afp == NULL);
1112 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
1113 ip->i_afp->if_flags = XFS_IFEXTENTS;
1115 xfs_defer_init(&dfops, &firstblock);
1116 switch (ip->i_d.di_format) {
1117 case XFS_DINODE_FMT_LOCAL:
1118 error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &dfops,
1121 case XFS_DINODE_FMT_EXTENTS:
1122 error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
1125 case XFS_DINODE_FMT_BTREE:
1126 error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &dfops,
1134 xfs_trans_log_inode(tp, ip, logflags);
1137 if (!xfs_sb_version_hasattr(&mp->m_sb) ||
1138 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
1139 bool log_sb = false;
1141 spin_lock(&mp->m_sb_lock);
1142 if (!xfs_sb_version_hasattr(&mp->m_sb)) {
1143 xfs_sb_version_addattr(&mp->m_sb);
1146 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
1147 xfs_sb_version_addattr2(&mp->m_sb);
1150 spin_unlock(&mp->m_sb_lock);
1155 error = xfs_defer_finish(&tp, &dfops);
1158 error = xfs_trans_commit(tp);
1159 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1163 xfs_defer_cancel(&dfops);
1165 xfs_trans_cancel(tp);
1166 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1171 * Internal and external extent tree search functions.
1175 * Read in extents from a btree-format inode.
1179 struct xfs_trans *tp,
1180 struct xfs_inode *ip,
1183 struct xfs_mount *mp = ip->i_mount;
1184 int state = xfs_bmap_fork_to_state(whichfork);
1185 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1186 xfs_extnum_t nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
1187 struct xfs_btree_block *block = ifp->if_broot;
1188 struct xfs_iext_cursor icur;
1189 struct xfs_bmbt_irec new;
1197 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1199 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
1200 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
1201 return -EFSCORRUPTED;
1205 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
1207 level = be16_to_cpu(block->bb_level);
1209 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
1210 bno = be64_to_cpu(*pp);
1213 * Go down the tree until leaf level is reached, following the first
1214 * pointer (leftmost) at each level.
1216 while (level-- > 0) {
1217 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1218 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1221 block = XFS_BUF_TO_BLOCK(bp);
1224 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
1225 bno = be64_to_cpu(*pp);
1226 XFS_WANT_CORRUPTED_GOTO(mp,
1227 xfs_verify_fsbno(mp, bno), out_brelse);
1228 xfs_trans_brelse(tp, bp);
1232 * Here with bp and block set to the leftmost leaf node in the tree.
1235 xfs_iext_first(ifp, &icur);
1238 * Loop over all leaf nodes. Copy information to the extent records.
1241 xfs_bmbt_rec_t *frp;
1242 xfs_fsblock_t nextbno;
1243 xfs_extnum_t num_recs;
1245 num_recs = xfs_btree_get_numrecs(block);
1246 if (unlikely(i + num_recs > nextents)) {
1247 ASSERT(i + num_recs <= nextents);
1248 xfs_warn(ip->i_mount,
1249 "corrupt dinode %Lu, (btree extents).",
1250 (unsigned long long) ip->i_ino);
1251 xfs_inode_verifier_error(ip, -EFSCORRUPTED,
1252 __func__, block, sizeof(*block),
1254 error = -EFSCORRUPTED;
1258 * Read-ahead the next leaf block, if any.
1260 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
1261 if (nextbno != NULLFSBLOCK)
1262 xfs_btree_reada_bufl(mp, nextbno, 1,
1265 * Copy records into the extent records.
1267 frp = XFS_BMBT_REC_ADDR(mp, block, 1);
1268 for (j = 0; j < num_recs; j++, frp++, i++) {
1271 xfs_bmbt_disk_get_all(frp, &new);
1272 fa = xfs_bmap_validate_extent(ip, whichfork, &new);
1274 error = -EFSCORRUPTED;
1275 xfs_inode_verifier_error(ip, error,
1276 "xfs_iread_extents(2)",
1277 frp, sizeof(*frp), fa);
1280 xfs_iext_insert(ip, &icur, &new, state);
1281 trace_xfs_read_extent(ip, &icur, state, _THIS_IP_);
1282 xfs_iext_next(ifp, &icur);
1284 xfs_trans_brelse(tp, bp);
1287 * If we've reached the end, stop.
1289 if (bno == NULLFSBLOCK)
1291 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1292 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1295 block = XFS_BUF_TO_BLOCK(bp);
1298 if (i != XFS_IFORK_NEXTENTS(ip, whichfork)) {
1299 error = -EFSCORRUPTED;
1302 ASSERT(i == xfs_iext_count(ifp));
1304 ifp->if_flags |= XFS_IFEXTENTS;
1308 xfs_trans_brelse(tp, bp);
1310 xfs_iext_destroy(ifp);
1315 * Returns the relative block number of the first unused block(s) in the given
1316 * fork with at least "len" logically contiguous blocks free. This is the
1317 * lowest-address hole if the fork has holes, else the first block past the end
1318 * of fork. Return 0 if the fork is currently local (in-inode).
1321 xfs_bmap_first_unused(
1322 struct xfs_trans *tp, /* transaction pointer */
1323 struct xfs_inode *ip, /* incore inode */
1324 xfs_extlen_t len, /* size of hole to find */
1325 xfs_fileoff_t *first_unused, /* unused block */
1326 int whichfork) /* data or attr fork */
1328 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1329 struct xfs_bmbt_irec got;
1330 struct xfs_iext_cursor icur;
1331 xfs_fileoff_t lastaddr = 0;
1332 xfs_fileoff_t lowest, max;
1335 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
1336 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
1337 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
1339 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
1344 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1345 error = xfs_iread_extents(tp, ip, whichfork);
1350 lowest = max = *first_unused;
1351 for_each_xfs_iext(ifp, &icur, &got) {
1353 * See if the hole before this extent will work.
1355 if (got.br_startoff >= lowest + len &&
1356 got.br_startoff - max >= len)
1358 lastaddr = got.br_startoff + got.br_blockcount;
1359 max = XFS_FILEOFF_MAX(lastaddr, lowest);
1362 *first_unused = max;
1367 * Returns the file-relative block number of the last block - 1 before
1368 * last_block (input value) in the file.
1369 * This is not based on i_size, it is based on the extent records.
1370 * Returns 0 for local files, as they do not have extent records.
1373 xfs_bmap_last_before(
1374 struct xfs_trans *tp, /* transaction pointer */
1375 struct xfs_inode *ip, /* incore inode */
1376 xfs_fileoff_t *last_block, /* last block */
1377 int whichfork) /* data or attr fork */
1379 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1380 struct xfs_bmbt_irec got;
1381 struct xfs_iext_cursor icur;
1384 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
1385 case XFS_DINODE_FMT_LOCAL:
1388 case XFS_DINODE_FMT_BTREE:
1389 case XFS_DINODE_FMT_EXTENTS:
1395 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1396 error = xfs_iread_extents(tp, ip, whichfork);
1401 if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got))
1407 xfs_bmap_last_extent(
1408 struct xfs_trans *tp,
1409 struct xfs_inode *ip,
1411 struct xfs_bmbt_irec *rec,
1414 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1415 struct xfs_iext_cursor icur;
1418 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1419 error = xfs_iread_extents(tp, ip, whichfork);
1424 xfs_iext_last(ifp, &icur);
1425 if (!xfs_iext_get_extent(ifp, &icur, rec))
1433 * Check the last inode extent to determine whether this allocation will result
1434 * in blocks being allocated at the end of the file. When we allocate new data
1435 * blocks at the end of the file which do not start at the previous data block,
1436 * we will try to align the new blocks at stripe unit boundaries.
1438 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1439 * at, or past the EOF.
1443 struct xfs_bmalloca *bma,
1446 struct xfs_bmbt_irec rec;
1451 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1462 * Check if we are allocation or past the last extent, or at least into
1463 * the last delayed allocated extent.
1465 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1466 (bma->offset >= rec.br_startoff &&
1467 isnullstartblock(rec.br_startblock));
1472 * Returns the file-relative block number of the first block past eof in
1473 * the file. This is not based on i_size, it is based on the extent records.
1474 * Returns 0 for local files, as they do not have extent records.
1477 xfs_bmap_last_offset(
1478 struct xfs_inode *ip,
1479 xfs_fileoff_t *last_block,
1482 struct xfs_bmbt_irec rec;
1488 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL)
1491 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
1492 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1495 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1496 if (error || is_empty)
1499 *last_block = rec.br_startoff + rec.br_blockcount;
1504 * Returns whether the selected fork of the inode has exactly one
1505 * block or not. For the data fork we check this matches di_size,
1506 * implying the file's range is 0..bsize-1.
1508 int /* 1=>1 block, 0=>otherwise */
1510 xfs_inode_t *ip, /* incore inode */
1511 int whichfork) /* data or attr fork */
1513 xfs_ifork_t *ifp; /* inode fork pointer */
1514 int rval; /* return value */
1515 xfs_bmbt_irec_t s; /* internal version of extent */
1516 struct xfs_iext_cursor icur;
1519 if (whichfork == XFS_DATA_FORK)
1520 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize;
1522 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
1524 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1526 ifp = XFS_IFORK_PTR(ip, whichfork);
1527 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
1528 xfs_iext_first(ifp, &icur);
1529 xfs_iext_get_extent(ifp, &icur, &s);
1530 rval = s.br_startoff == 0 && s.br_blockcount == 1;
1531 if (rval && whichfork == XFS_DATA_FORK)
1532 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
1537 * Extent tree manipulation functions used during allocation.
1541 * Convert a delayed allocation to a real allocation.
1543 STATIC int /* error */
1544 xfs_bmap_add_extent_delay_real(
1545 struct xfs_bmalloca *bma,
1548 struct xfs_bmbt_irec *new = &bma->got;
1549 int error; /* error return value */
1550 int i; /* temp state */
1551 xfs_ifork_t *ifp; /* inode fork pointer */
1552 xfs_fileoff_t new_endoff; /* end offset of new entry */
1553 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
1554 /* left is 0, right is 1, prev is 2 */
1555 int rval=0; /* return value (logging flags) */
1556 int state = xfs_bmap_fork_to_state(whichfork);
1557 xfs_filblks_t da_new; /* new count del alloc blocks used */
1558 xfs_filblks_t da_old; /* old count del alloc blocks used */
1559 xfs_filblks_t temp=0; /* value for da_new calculations */
1560 int tmp_rval; /* partial logging flags */
1561 struct xfs_mount *mp;
1562 xfs_extnum_t *nextents;
1563 struct xfs_bmbt_irec old;
1565 mp = bma->ip->i_mount;
1566 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
1567 ASSERT(whichfork != XFS_ATTR_FORK);
1568 nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents :
1569 &bma->ip->i_d.di_nextents);
1571 ASSERT(!isnullstartblock(new->br_startblock));
1573 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
1575 XFS_STATS_INC(mp, xs_add_exlist);
1582 * Set up a bunch of variables to make the tests simpler.
1584 xfs_iext_get_extent(ifp, &bma->icur, &PREV);
1585 new_endoff = new->br_startoff + new->br_blockcount;
1586 ASSERT(isnullstartblock(PREV.br_startblock));
1587 ASSERT(PREV.br_startoff <= new->br_startoff);
1588 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1590 da_old = startblockval(PREV.br_startblock);
1594 * Set flags determining what part of the previous delayed allocation
1595 * extent is being replaced by a real allocation.
1597 if (PREV.br_startoff == new->br_startoff)
1598 state |= BMAP_LEFT_FILLING;
1599 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1600 state |= BMAP_RIGHT_FILLING;
1603 * Check and set flags if this segment has a left neighbor.
1604 * Don't set contiguous if the combined extent would be too large.
1606 if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) {
1607 state |= BMAP_LEFT_VALID;
1608 if (isnullstartblock(LEFT.br_startblock))
1609 state |= BMAP_LEFT_DELAY;
1612 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1613 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1614 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1615 LEFT.br_state == new->br_state &&
1616 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1617 state |= BMAP_LEFT_CONTIG;
1620 * Check and set flags if this segment has a right neighbor.
1621 * Don't set contiguous if the combined extent would be too large.
1622 * Also check for all-three-contiguous being too large.
1624 if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) {
1625 state |= BMAP_RIGHT_VALID;
1626 if (isnullstartblock(RIGHT.br_startblock))
1627 state |= BMAP_RIGHT_DELAY;
1630 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1631 new_endoff == RIGHT.br_startoff &&
1632 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1633 new->br_state == RIGHT.br_state &&
1634 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1635 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1636 BMAP_RIGHT_FILLING)) !=
1637 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1638 BMAP_RIGHT_FILLING) ||
1639 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1641 state |= BMAP_RIGHT_CONTIG;
1645 * Switch out based on the FILLING and CONTIG state bits.
1647 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1648 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1649 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1650 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1652 * Filling in all of a previously delayed allocation extent.
1653 * The left and right neighbors are both contiguous with new.
1655 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
1657 xfs_iext_remove(bma->ip, &bma->icur, state);
1658 xfs_iext_remove(bma->ip, &bma->icur, state);
1659 xfs_iext_prev(ifp, &bma->icur);
1660 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1663 if (bma->cur == NULL)
1664 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1666 rval = XFS_ILOG_CORE;
1667 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1670 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1671 error = xfs_btree_delete(bma->cur, &i);
1674 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1675 error = xfs_btree_decrement(bma->cur, 0, &i);
1678 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1679 error = xfs_bmbt_update(bma->cur, &LEFT);
1685 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1687 * Filling in all of a previously delayed allocation extent.
1688 * The left neighbor is contiguous, the right is not.
1691 LEFT.br_blockcount += PREV.br_blockcount;
1693 xfs_iext_remove(bma->ip, &bma->icur, state);
1694 xfs_iext_prev(ifp, &bma->icur);
1695 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1697 if (bma->cur == NULL)
1698 rval = XFS_ILOG_DEXT;
1701 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1704 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1705 error = xfs_bmbt_update(bma->cur, &LEFT);
1711 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1713 * Filling in all of a previously delayed allocation extent.
1714 * The right neighbor is contiguous, the left is not.
1716 PREV.br_startblock = new->br_startblock;
1717 PREV.br_blockcount += RIGHT.br_blockcount;
1719 xfs_iext_next(ifp, &bma->icur);
1720 xfs_iext_remove(bma->ip, &bma->icur, state);
1721 xfs_iext_prev(ifp, &bma->icur);
1722 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1724 if (bma->cur == NULL)
1725 rval = XFS_ILOG_DEXT;
1728 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1731 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1732 error = xfs_bmbt_update(bma->cur, &PREV);
1738 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1740 * Filling in all of a previously delayed allocation extent.
1741 * Neither the left nor right neighbors are contiguous with
1744 PREV.br_startblock = new->br_startblock;
1745 PREV.br_state = new->br_state;
1746 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1749 if (bma->cur == NULL)
1750 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1752 rval = XFS_ILOG_CORE;
1753 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1756 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1757 error = xfs_btree_insert(bma->cur, &i);
1760 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1764 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1766 * Filling in the first part of a previous delayed allocation.
1767 * The left neighbor is contiguous.
1770 temp = PREV.br_blockcount - new->br_blockcount;
1771 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1772 startblockval(PREV.br_startblock));
1774 LEFT.br_blockcount += new->br_blockcount;
1776 PREV.br_blockcount = temp;
1777 PREV.br_startoff += new->br_blockcount;
1778 PREV.br_startblock = nullstartblock(da_new);
1780 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1781 xfs_iext_prev(ifp, &bma->icur);
1782 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1784 if (bma->cur == NULL)
1785 rval = XFS_ILOG_DEXT;
1788 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1791 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1792 error = xfs_bmbt_update(bma->cur, &LEFT);
1798 case BMAP_LEFT_FILLING:
1800 * Filling in the first part of a previous delayed allocation.
1801 * The left neighbor is not contiguous.
1803 xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1805 if (bma->cur == NULL)
1806 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1808 rval = XFS_ILOG_CORE;
1809 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1812 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1813 error = xfs_btree_insert(bma->cur, &i);
1816 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1819 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1820 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1821 bma->firstblock, bma->dfops,
1822 &bma->cur, 1, &tmp_rval, whichfork);
1828 temp = PREV.br_blockcount - new->br_blockcount;
1829 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1830 startblockval(PREV.br_startblock) -
1831 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
1833 PREV.br_startoff = new_endoff;
1834 PREV.br_blockcount = temp;
1835 PREV.br_startblock = nullstartblock(da_new);
1836 xfs_iext_next(ifp, &bma->icur);
1837 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1838 xfs_iext_prev(ifp, &bma->icur);
1841 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1843 * Filling in the last part of a previous delayed allocation.
1844 * The right neighbor is contiguous with the new allocation.
1847 RIGHT.br_startoff = new->br_startoff;
1848 RIGHT.br_startblock = new->br_startblock;
1849 RIGHT.br_blockcount += new->br_blockcount;
1851 if (bma->cur == NULL)
1852 rval = XFS_ILOG_DEXT;
1855 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1858 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1859 error = xfs_bmbt_update(bma->cur, &RIGHT);
1864 temp = PREV.br_blockcount - new->br_blockcount;
1865 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1866 startblockval(PREV.br_startblock));
1868 PREV.br_blockcount = temp;
1869 PREV.br_startblock = nullstartblock(da_new);
1871 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1872 xfs_iext_next(ifp, &bma->icur);
1873 xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT);
1876 case BMAP_RIGHT_FILLING:
1878 * Filling in the last part of a previous delayed allocation.
1879 * The right neighbor is not contiguous.
1881 xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1883 if (bma->cur == NULL)
1884 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1886 rval = XFS_ILOG_CORE;
1887 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1890 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1891 error = xfs_btree_insert(bma->cur, &i);
1894 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1897 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1898 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1899 bma->firstblock, bma->dfops, &bma->cur, 1,
1900 &tmp_rval, whichfork);
1906 temp = PREV.br_blockcount - new->br_blockcount;
1907 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1908 startblockval(PREV.br_startblock) -
1909 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
1911 PREV.br_startblock = nullstartblock(da_new);
1912 PREV.br_blockcount = temp;
1913 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1914 xfs_iext_next(ifp, &bma->icur);
1919 * Filling in the middle part of a previous delayed allocation.
1920 * Contiguity is impossible here.
1921 * This case is avoided almost all the time.
1923 * We start with a delayed allocation:
1925 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
1928 * and we are allocating:
1929 * +rrrrrrrrrrrrrrrrr+
1932 * and we set it up for insertion as:
1933 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
1935 * PREV @ idx LEFT RIGHT
1936 * inserted at idx + 1
1940 /* LEFT is the new middle */
1943 /* RIGHT is the new right */
1944 RIGHT.br_state = PREV.br_state;
1945 RIGHT.br_startoff = new_endoff;
1946 RIGHT.br_blockcount =
1947 PREV.br_startoff + PREV.br_blockcount - new_endoff;
1948 RIGHT.br_startblock =
1949 nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1950 RIGHT.br_blockcount));
1953 PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
1954 PREV.br_startblock =
1955 nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1956 PREV.br_blockcount));
1957 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1959 xfs_iext_next(ifp, &bma->icur);
1960 xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state);
1961 xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state);
1964 if (bma->cur == NULL)
1965 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1967 rval = XFS_ILOG_CORE;
1968 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1971 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1972 error = xfs_btree_insert(bma->cur, &i);
1975 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1978 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1979 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1980 bma->firstblock, bma->dfops, &bma->cur,
1981 1, &tmp_rval, whichfork);
1987 da_new = startblockval(PREV.br_startblock) +
1988 startblockval(RIGHT.br_startblock);
1991 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1992 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1993 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1994 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1995 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1996 case BMAP_LEFT_CONTIG:
1997 case BMAP_RIGHT_CONTIG:
1999 * These cases are all impossible.
2004 /* add reverse mapping */
2005 error = xfs_rmap_map_extent(mp, bma->dfops, bma->ip, whichfork, new);
2009 /* convert to a btree if necessary */
2010 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2011 int tmp_logflags; /* partial log flag return val */
2013 ASSERT(bma->cur == NULL);
2014 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2015 bma->firstblock, bma->dfops, &bma->cur,
2016 da_old > 0, &tmp_logflags, whichfork);
2017 bma->logflags |= tmp_logflags;
2023 da_new += bma->cur->bc_private.b.allocated;
2024 bma->cur->bc_private.b.allocated = 0;
2027 /* adjust for changes in reserved delayed indirect blocks */
2028 if (da_new != da_old) {
2029 ASSERT(state == 0 || da_new < da_old);
2030 error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new),
2034 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
2036 if (whichfork != XFS_COW_FORK)
2037 bma->logflags |= rval;
2045 * Convert an unwritten allocation to a real allocation or vice versa.
2047 STATIC int /* error */
2048 xfs_bmap_add_extent_unwritten_real(
2049 struct xfs_trans *tp,
2050 xfs_inode_t *ip, /* incore inode pointer */
2052 struct xfs_iext_cursor *icur,
2053 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
2054 xfs_bmbt_irec_t *new, /* new data to add to file extents */
2055 xfs_fsblock_t *first, /* pointer to firstblock variable */
2056 struct xfs_defer_ops *dfops, /* list of extents to be freed */
2057 int *logflagsp) /* inode logging flags */
2059 xfs_btree_cur_t *cur; /* btree cursor */
2060 int error; /* error return value */
2061 int i; /* temp state */
2062 xfs_ifork_t *ifp; /* inode fork pointer */
2063 xfs_fileoff_t new_endoff; /* end offset of new entry */
2064 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
2065 /* left is 0, right is 1, prev is 2 */
2066 int rval=0; /* return value (logging flags) */
2067 int state = xfs_bmap_fork_to_state(whichfork);
2068 struct xfs_mount *mp = ip->i_mount;
2069 struct xfs_bmbt_irec old;
2074 ifp = XFS_IFORK_PTR(ip, whichfork);
2076 ASSERT(!isnullstartblock(new->br_startblock));
2078 XFS_STATS_INC(mp, xs_add_exlist);
2085 * Set up a bunch of variables to make the tests simpler.
2088 xfs_iext_get_extent(ifp, icur, &PREV);
2089 ASSERT(new->br_state != PREV.br_state);
2090 new_endoff = new->br_startoff + new->br_blockcount;
2091 ASSERT(PREV.br_startoff <= new->br_startoff);
2092 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
2095 * Set flags determining what part of the previous oldext allocation
2096 * extent is being replaced by a newext allocation.
2098 if (PREV.br_startoff == new->br_startoff)
2099 state |= BMAP_LEFT_FILLING;
2100 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2101 state |= BMAP_RIGHT_FILLING;
2104 * Check and set flags if this segment has a left neighbor.
2105 * Don't set contiguous if the combined extent would be too large.
2107 if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) {
2108 state |= BMAP_LEFT_VALID;
2109 if (isnullstartblock(LEFT.br_startblock))
2110 state |= BMAP_LEFT_DELAY;
2113 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2114 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2115 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2116 LEFT.br_state == new->br_state &&
2117 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2118 state |= BMAP_LEFT_CONTIG;
2121 * Check and set flags if this segment has a right neighbor.
2122 * Don't set contiguous if the combined extent would be too large.
2123 * Also check for all-three-contiguous being too large.
2125 if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) {
2126 state |= BMAP_RIGHT_VALID;
2127 if (isnullstartblock(RIGHT.br_startblock))
2128 state |= BMAP_RIGHT_DELAY;
2131 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2132 new_endoff == RIGHT.br_startoff &&
2133 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2134 new->br_state == RIGHT.br_state &&
2135 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
2136 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2137 BMAP_RIGHT_FILLING)) !=
2138 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2139 BMAP_RIGHT_FILLING) ||
2140 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2142 state |= BMAP_RIGHT_CONTIG;
2145 * Switch out based on the FILLING and CONTIG state bits.
2147 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2148 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2149 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2150 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2152 * Setting all of a previous oldext extent to newext.
2153 * The left and right neighbors are both contiguous with new.
2155 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
2157 xfs_iext_remove(ip, icur, state);
2158 xfs_iext_remove(ip, icur, state);
2159 xfs_iext_prev(ifp, icur);
2160 xfs_iext_update_extent(ip, state, icur, &LEFT);
2161 XFS_IFORK_NEXT_SET(ip, whichfork,
2162 XFS_IFORK_NEXTENTS(ip, whichfork) - 2);
2164 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2166 rval = XFS_ILOG_CORE;
2167 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2170 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2171 if ((error = xfs_btree_delete(cur, &i)))
2173 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2174 if ((error = xfs_btree_decrement(cur, 0, &i)))
2176 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2177 if ((error = xfs_btree_delete(cur, &i)))
2179 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2180 if ((error = xfs_btree_decrement(cur, 0, &i)))
2182 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2183 error = xfs_bmbt_update(cur, &LEFT);
2189 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2191 * Setting all of a previous oldext extent to newext.
2192 * The left neighbor is contiguous, the right is not.
2194 LEFT.br_blockcount += PREV.br_blockcount;
2196 xfs_iext_remove(ip, icur, state);
2197 xfs_iext_prev(ifp, icur);
2198 xfs_iext_update_extent(ip, state, icur, &LEFT);
2199 XFS_IFORK_NEXT_SET(ip, whichfork,
2200 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2202 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2204 rval = XFS_ILOG_CORE;
2205 error = xfs_bmbt_lookup_eq(cur, &PREV, &i);
2208 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2209 if ((error = xfs_btree_delete(cur, &i)))
2211 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2212 if ((error = xfs_btree_decrement(cur, 0, &i)))
2214 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2215 error = xfs_bmbt_update(cur, &LEFT);
2221 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2223 * Setting all of a previous oldext extent to newext.
2224 * The right neighbor is contiguous, the left is not.
2226 PREV.br_blockcount += RIGHT.br_blockcount;
2227 PREV.br_state = new->br_state;
2229 xfs_iext_next(ifp, icur);
2230 xfs_iext_remove(ip, icur, state);
2231 xfs_iext_prev(ifp, icur);
2232 xfs_iext_update_extent(ip, state, icur, &PREV);
2234 XFS_IFORK_NEXT_SET(ip, whichfork,
2235 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2237 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2239 rval = XFS_ILOG_CORE;
2240 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2243 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2244 if ((error = xfs_btree_delete(cur, &i)))
2246 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2247 if ((error = xfs_btree_decrement(cur, 0, &i)))
2249 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2250 error = xfs_bmbt_update(cur, &PREV);
2256 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2258 * Setting all of a previous oldext extent to newext.
2259 * Neither the left nor right neighbors are contiguous with
2262 PREV.br_state = new->br_state;
2263 xfs_iext_update_extent(ip, state, icur, &PREV);
2266 rval = XFS_ILOG_DEXT;
2269 error = xfs_bmbt_lookup_eq(cur, new, &i);
2272 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2273 error = xfs_bmbt_update(cur, &PREV);
2279 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2281 * Setting the first part of a previous oldext extent to newext.
2282 * The left neighbor is contiguous.
2284 LEFT.br_blockcount += new->br_blockcount;
2287 PREV.br_startoff += new->br_blockcount;
2288 PREV.br_startblock += new->br_blockcount;
2289 PREV.br_blockcount -= new->br_blockcount;
2291 xfs_iext_update_extent(ip, state, icur, &PREV);
2292 xfs_iext_prev(ifp, icur);
2293 xfs_iext_update_extent(ip, state, icur, &LEFT);
2296 rval = XFS_ILOG_DEXT;
2299 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2302 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2303 error = xfs_bmbt_update(cur, &PREV);
2306 error = xfs_btree_decrement(cur, 0, &i);
2309 error = xfs_bmbt_update(cur, &LEFT);
2315 case BMAP_LEFT_FILLING:
2317 * Setting the first part of a previous oldext extent to newext.
2318 * The left neighbor is not contiguous.
2321 PREV.br_startoff += new->br_blockcount;
2322 PREV.br_startblock += new->br_blockcount;
2323 PREV.br_blockcount -= new->br_blockcount;
2325 xfs_iext_update_extent(ip, state, icur, &PREV);
2326 xfs_iext_insert(ip, icur, new, state);
2327 XFS_IFORK_NEXT_SET(ip, whichfork,
2328 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2330 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2332 rval = XFS_ILOG_CORE;
2333 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2336 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2337 error = xfs_bmbt_update(cur, &PREV);
2340 cur->bc_rec.b = *new;
2341 if ((error = xfs_btree_insert(cur, &i)))
2343 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2347 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2349 * Setting the last part of a previous oldext extent to newext.
2350 * The right neighbor is contiguous with the new allocation.
2353 PREV.br_blockcount -= new->br_blockcount;
2355 RIGHT.br_startoff = new->br_startoff;
2356 RIGHT.br_startblock = new->br_startblock;
2357 RIGHT.br_blockcount += new->br_blockcount;
2359 xfs_iext_update_extent(ip, state, icur, &PREV);
2360 xfs_iext_next(ifp, icur);
2361 xfs_iext_update_extent(ip, state, icur, &RIGHT);
2364 rval = XFS_ILOG_DEXT;
2367 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2370 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2371 error = xfs_bmbt_update(cur, &PREV);
2374 error = xfs_btree_increment(cur, 0, &i);
2377 error = xfs_bmbt_update(cur, &RIGHT);
2383 case BMAP_RIGHT_FILLING:
2385 * Setting the last part of a previous oldext extent to newext.
2386 * The right neighbor is not contiguous.
2389 PREV.br_blockcount -= new->br_blockcount;
2391 xfs_iext_update_extent(ip, state, icur, &PREV);
2392 xfs_iext_next(ifp, icur);
2393 xfs_iext_insert(ip, icur, new, state);
2395 XFS_IFORK_NEXT_SET(ip, whichfork,
2396 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2398 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2400 rval = XFS_ILOG_CORE;
2401 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2404 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2405 error = xfs_bmbt_update(cur, &PREV);
2408 error = xfs_bmbt_lookup_eq(cur, new, &i);
2411 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2412 if ((error = xfs_btree_insert(cur, &i)))
2414 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2420 * Setting the middle part of a previous oldext extent to
2421 * newext. Contiguity is impossible here.
2422 * One extent becomes three extents.
2425 PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
2428 r[1].br_startoff = new_endoff;
2429 r[1].br_blockcount =
2430 old.br_startoff + old.br_blockcount - new_endoff;
2431 r[1].br_startblock = new->br_startblock + new->br_blockcount;
2432 r[1].br_state = PREV.br_state;
2434 xfs_iext_update_extent(ip, state, icur, &PREV);
2435 xfs_iext_next(ifp, icur);
2436 xfs_iext_insert(ip, icur, &r[1], state);
2437 xfs_iext_insert(ip, icur, &r[0], state);
2439 XFS_IFORK_NEXT_SET(ip, whichfork,
2440 XFS_IFORK_NEXTENTS(ip, whichfork) + 2);
2442 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2444 rval = XFS_ILOG_CORE;
2445 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2448 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2449 /* new right extent - oldext */
2450 error = xfs_bmbt_update(cur, &r[1]);
2453 /* new left extent - oldext */
2454 cur->bc_rec.b = PREV;
2455 if ((error = xfs_btree_insert(cur, &i)))
2457 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2459 * Reset the cursor to the position of the new extent
2460 * we are about to insert as we can't trust it after
2461 * the previous insert.
2463 error = xfs_bmbt_lookup_eq(cur, new, &i);
2466 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2467 /* new middle extent - newext */
2468 if ((error = xfs_btree_insert(cur, &i)))
2470 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2474 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2475 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2476 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2477 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2478 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2479 case BMAP_LEFT_CONTIG:
2480 case BMAP_RIGHT_CONTIG:
2482 * These cases are all impossible.
2487 /* update reverse mappings */
2488 error = xfs_rmap_convert_extent(mp, dfops, ip, whichfork, new);
2492 /* convert to a btree if necessary */
2493 if (xfs_bmap_needs_btree(ip, whichfork)) {
2494 int tmp_logflags; /* partial log flag return val */
2496 ASSERT(cur == NULL);
2497 error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, &cur,
2498 0, &tmp_logflags, whichfork);
2499 *logflagsp |= tmp_logflags;
2504 /* clear out the allocated field, done with it now in any case. */
2506 cur->bc_private.b.allocated = 0;
2510 xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
2520 * Convert a hole to a delayed allocation.
2523 xfs_bmap_add_extent_hole_delay(
2524 xfs_inode_t *ip, /* incore inode pointer */
2526 struct xfs_iext_cursor *icur,
2527 xfs_bmbt_irec_t *new) /* new data to add to file extents */
2529 xfs_ifork_t *ifp; /* inode fork pointer */
2530 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2531 xfs_filblks_t newlen=0; /* new indirect size */
2532 xfs_filblks_t oldlen=0; /* old indirect size */
2533 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2534 int state = xfs_bmap_fork_to_state(whichfork);
2535 xfs_filblks_t temp; /* temp for indirect calculations */
2537 ifp = XFS_IFORK_PTR(ip, whichfork);
2538 ASSERT(isnullstartblock(new->br_startblock));
2541 * Check and set flags if this segment has a left neighbor
2543 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2544 state |= BMAP_LEFT_VALID;
2545 if (isnullstartblock(left.br_startblock))
2546 state |= BMAP_LEFT_DELAY;
2550 * Check and set flags if the current (right) segment exists.
2551 * If it doesn't exist, we're converting the hole at end-of-file.
2553 if (xfs_iext_get_extent(ifp, icur, &right)) {
2554 state |= BMAP_RIGHT_VALID;
2555 if (isnullstartblock(right.br_startblock))
2556 state |= BMAP_RIGHT_DELAY;
2560 * Set contiguity flags on the left and right neighbors.
2561 * Don't let extents get too large, even if the pieces are contiguous.
2563 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
2564 left.br_startoff + left.br_blockcount == new->br_startoff &&
2565 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2566 state |= BMAP_LEFT_CONTIG;
2568 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
2569 new->br_startoff + new->br_blockcount == right.br_startoff &&
2570 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2571 (!(state & BMAP_LEFT_CONTIG) ||
2572 (left.br_blockcount + new->br_blockcount +
2573 right.br_blockcount <= MAXEXTLEN)))
2574 state |= BMAP_RIGHT_CONTIG;
2577 * Switch out based on the contiguity flags.
2579 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2580 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2582 * New allocation is contiguous with delayed allocations
2583 * on the left and on the right.
2584 * Merge all three into a single extent record.
2586 temp = left.br_blockcount + new->br_blockcount +
2587 right.br_blockcount;
2589 oldlen = startblockval(left.br_startblock) +
2590 startblockval(new->br_startblock) +
2591 startblockval(right.br_startblock);
2592 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2594 left.br_startblock = nullstartblock(newlen);
2595 left.br_blockcount = temp;
2597 xfs_iext_remove(ip, icur, state);
2598 xfs_iext_prev(ifp, icur);
2599 xfs_iext_update_extent(ip, state, icur, &left);
2602 case BMAP_LEFT_CONTIG:
2604 * New allocation is contiguous with a delayed allocation
2606 * Merge the new allocation with the left neighbor.
2608 temp = left.br_blockcount + new->br_blockcount;
2610 oldlen = startblockval(left.br_startblock) +
2611 startblockval(new->br_startblock);
2612 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2614 left.br_blockcount = temp;
2615 left.br_startblock = nullstartblock(newlen);
2617 xfs_iext_prev(ifp, icur);
2618 xfs_iext_update_extent(ip, state, icur, &left);
2621 case BMAP_RIGHT_CONTIG:
2623 * New allocation is contiguous with a delayed allocation
2625 * Merge the new allocation with the right neighbor.
2627 temp = new->br_blockcount + right.br_blockcount;
2628 oldlen = startblockval(new->br_startblock) +
2629 startblockval(right.br_startblock);
2630 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2632 right.br_startoff = new->br_startoff;
2633 right.br_startblock = nullstartblock(newlen);
2634 right.br_blockcount = temp;
2635 xfs_iext_update_extent(ip, state, icur, &right);
2640 * New allocation is not contiguous with another
2641 * delayed allocation.
2642 * Insert a new entry.
2644 oldlen = newlen = 0;
2645 xfs_iext_insert(ip, icur, new, state);
2648 if (oldlen != newlen) {
2649 ASSERT(oldlen > newlen);
2650 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
2653 * Nothing to do for disk quota accounting here.
2659 * Convert a hole to a real allocation.
2661 STATIC int /* error */
2662 xfs_bmap_add_extent_hole_real(
2663 struct xfs_trans *tp,
2664 struct xfs_inode *ip,
2666 struct xfs_iext_cursor *icur,
2667 struct xfs_btree_cur **curp,
2668 struct xfs_bmbt_irec *new,
2669 xfs_fsblock_t *first,
2670 struct xfs_defer_ops *dfops,
2673 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
2674 struct xfs_mount *mp = ip->i_mount;
2675 struct xfs_btree_cur *cur = *curp;
2676 int error; /* error return value */
2677 int i; /* temp state */
2678 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2679 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2680 int rval=0; /* return value (logging flags) */
2681 int state = xfs_bmap_fork_to_state(whichfork);
2682 struct xfs_bmbt_irec old;
2684 ASSERT(!isnullstartblock(new->br_startblock));
2685 ASSERT(!cur || !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
2687 XFS_STATS_INC(mp, xs_add_exlist);
2690 * Check and set flags if this segment has a left neighbor.
2692 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2693 state |= BMAP_LEFT_VALID;
2694 if (isnullstartblock(left.br_startblock))
2695 state |= BMAP_LEFT_DELAY;
2699 * Check and set flags if this segment has a current value.
2700 * Not true if we're inserting into the "hole" at eof.
2702 if (xfs_iext_get_extent(ifp, icur, &right)) {
2703 state |= BMAP_RIGHT_VALID;
2704 if (isnullstartblock(right.br_startblock))
2705 state |= BMAP_RIGHT_DELAY;
2709 * We're inserting a real allocation between "left" and "right".
2710 * Set the contiguity flags. Don't let extents get too large.
2712 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2713 left.br_startoff + left.br_blockcount == new->br_startoff &&
2714 left.br_startblock + left.br_blockcount == new->br_startblock &&
2715 left.br_state == new->br_state &&
2716 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2717 state |= BMAP_LEFT_CONTIG;
2719 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2720 new->br_startoff + new->br_blockcount == right.br_startoff &&
2721 new->br_startblock + new->br_blockcount == right.br_startblock &&
2722 new->br_state == right.br_state &&
2723 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2724 (!(state & BMAP_LEFT_CONTIG) ||
2725 left.br_blockcount + new->br_blockcount +
2726 right.br_blockcount <= MAXEXTLEN))
2727 state |= BMAP_RIGHT_CONTIG;
2731 * Select which case we're in here, and implement it.
2733 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2734 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2736 * New allocation is contiguous with real allocations on the
2737 * left and on the right.
2738 * Merge all three into a single extent record.
2740 left.br_blockcount += new->br_blockcount + right.br_blockcount;
2742 xfs_iext_remove(ip, icur, state);
2743 xfs_iext_prev(ifp, icur);
2744 xfs_iext_update_extent(ip, state, icur, &left);
2746 XFS_IFORK_NEXT_SET(ip, whichfork,
2747 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2749 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2751 rval = XFS_ILOG_CORE;
2752 error = xfs_bmbt_lookup_eq(cur, &right, &i);
2755 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2756 error = xfs_btree_delete(cur, &i);
2759 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2760 error = xfs_btree_decrement(cur, 0, &i);
2763 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2764 error = xfs_bmbt_update(cur, &left);
2770 case BMAP_LEFT_CONTIG:
2772 * New allocation is contiguous with a real allocation
2774 * Merge the new allocation with the left neighbor.
2777 left.br_blockcount += new->br_blockcount;
2779 xfs_iext_prev(ifp, icur);
2780 xfs_iext_update_extent(ip, state, icur, &left);
2783 rval = xfs_ilog_fext(whichfork);
2786 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2789 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2790 error = xfs_bmbt_update(cur, &left);
2796 case BMAP_RIGHT_CONTIG:
2798 * New allocation is contiguous with a real allocation
2800 * Merge the new allocation with the right neighbor.
2804 right.br_startoff = new->br_startoff;
2805 right.br_startblock = new->br_startblock;
2806 right.br_blockcount += new->br_blockcount;
2807 xfs_iext_update_extent(ip, state, icur, &right);
2810 rval = xfs_ilog_fext(whichfork);
2813 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2816 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2817 error = xfs_bmbt_update(cur, &right);
2825 * New allocation is not contiguous with another
2827 * Insert a new entry.
2829 xfs_iext_insert(ip, icur, new, state);
2830 XFS_IFORK_NEXT_SET(ip, whichfork,
2831 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2833 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2835 rval = XFS_ILOG_CORE;
2836 error = xfs_bmbt_lookup_eq(cur, new, &i);
2839 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2840 error = xfs_btree_insert(cur, &i);
2843 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2848 /* add reverse mapping */
2849 error = xfs_rmap_map_extent(mp, dfops, ip, whichfork, new);
2853 /* convert to a btree if necessary */
2854 if (xfs_bmap_needs_btree(ip, whichfork)) {
2855 int tmp_logflags; /* partial log flag return val */
2857 ASSERT(cur == NULL);
2858 error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, curp,
2859 0, &tmp_logflags, whichfork);
2860 *logflagsp |= tmp_logflags;
2866 /* clear out the allocated field, done with it now in any case. */
2868 cur->bc_private.b.allocated = 0;
2870 xfs_bmap_check_leaf_extents(cur, ip, whichfork);
2877 * Functions used in the extent read, allocate and remove paths
2881 * Adjust the size of the new extent based on di_extsize and rt extsize.
2884 xfs_bmap_extsize_align(
2886 xfs_bmbt_irec_t *gotp, /* next extent pointer */
2887 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
2888 xfs_extlen_t extsz, /* align to this extent size */
2889 int rt, /* is this a realtime inode? */
2890 int eof, /* is extent at end-of-file? */
2891 int delay, /* creating delalloc extent? */
2892 int convert, /* overwriting unwritten extent? */
2893 xfs_fileoff_t *offp, /* in/out: aligned offset */
2894 xfs_extlen_t *lenp) /* in/out: aligned length */
2896 xfs_fileoff_t orig_off; /* original offset */
2897 xfs_extlen_t orig_alen; /* original length */
2898 xfs_fileoff_t orig_end; /* original off+len */
2899 xfs_fileoff_t nexto; /* next file offset */
2900 xfs_fileoff_t prevo; /* previous file offset */
2901 xfs_fileoff_t align_off; /* temp for offset */
2902 xfs_extlen_t align_alen; /* temp for length */
2903 xfs_extlen_t temp; /* temp for calculations */
2908 orig_off = align_off = *offp;
2909 orig_alen = align_alen = *lenp;
2910 orig_end = orig_off + orig_alen;
2913 * If this request overlaps an existing extent, then don't
2914 * attempt to perform any additional alignment.
2916 if (!delay && !eof &&
2917 (orig_off >= gotp->br_startoff) &&
2918 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
2923 * If the file offset is unaligned vs. the extent size
2924 * we need to align it. This will be possible unless
2925 * the file was previously written with a kernel that didn't
2926 * perform this alignment, or if a truncate shot us in the
2929 temp = do_mod(orig_off, extsz);
2935 /* Same adjustment for the end of the requested area. */
2936 temp = (align_alen % extsz);
2938 align_alen += extsz - temp;
2941 * For large extent hint sizes, the aligned extent might be larger than
2942 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls
2943 * the length back under MAXEXTLEN. The outer allocation loops handle
2944 * short allocation just fine, so it is safe to do this. We only want to
2945 * do it when we are forced to, though, because it means more allocation
2946 * operations are required.
2948 while (align_alen > MAXEXTLEN)
2949 align_alen -= extsz;
2950 ASSERT(align_alen <= MAXEXTLEN);
2953 * If the previous block overlaps with this proposed allocation
2954 * then move the start forward without adjusting the length.
2956 if (prevp->br_startoff != NULLFILEOFF) {
2957 if (prevp->br_startblock == HOLESTARTBLOCK)
2958 prevo = prevp->br_startoff;
2960 prevo = prevp->br_startoff + prevp->br_blockcount;
2963 if (align_off != orig_off && align_off < prevo)
2966 * If the next block overlaps with this proposed allocation
2967 * then move the start back without adjusting the length,
2968 * but not before offset 0.
2969 * This may of course make the start overlap previous block,
2970 * and if we hit the offset 0 limit then the next block
2971 * can still overlap too.
2973 if (!eof && gotp->br_startoff != NULLFILEOFF) {
2974 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
2975 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
2976 nexto = gotp->br_startoff + gotp->br_blockcount;
2978 nexto = gotp->br_startoff;
2980 nexto = NULLFILEOFF;
2982 align_off + align_alen != orig_end &&
2983 align_off + align_alen > nexto)
2984 align_off = nexto > align_alen ? nexto - align_alen : 0;
2986 * If we're now overlapping the next or previous extent that
2987 * means we can't fit an extsz piece in this hole. Just move
2988 * the start forward to the first valid spot and set
2989 * the length so we hit the end.
2991 if (align_off != orig_off && align_off < prevo)
2993 if (align_off + align_alen != orig_end &&
2994 align_off + align_alen > nexto &&
2995 nexto != NULLFILEOFF) {
2996 ASSERT(nexto > prevo);
2997 align_alen = nexto - align_off;
3001 * If realtime, and the result isn't a multiple of the realtime
3002 * extent size we need to remove blocks until it is.
3004 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
3006 * We're not covering the original request, or
3007 * we won't be able to once we fix the length.
3009 if (orig_off < align_off ||
3010 orig_end > align_off + align_alen ||
3011 align_alen - temp < orig_alen)
3014 * Try to fix it by moving the start up.
3016 if (align_off + temp <= orig_off) {
3021 * Try to fix it by moving the end in.
3023 else if (align_off + align_alen - temp >= orig_end)
3026 * Set the start to the minimum then trim the length.
3029 align_alen -= orig_off - align_off;
3030 align_off = orig_off;
3031 align_alen -= align_alen % mp->m_sb.sb_rextsize;
3034 * Result doesn't cover the request, fail it.
3036 if (orig_off < align_off || orig_end > align_off + align_alen)
3039 ASSERT(orig_off >= align_off);
3040 /* see MAXEXTLEN handling above */
3041 ASSERT(orig_end <= align_off + align_alen ||
3042 align_alen + extsz > MAXEXTLEN);
3046 if (!eof && gotp->br_startoff != NULLFILEOFF)
3047 ASSERT(align_off + align_alen <= gotp->br_startoff);
3048 if (prevp->br_startoff != NULLFILEOFF)
3049 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
3057 #define XFS_ALLOC_GAP_UNITS 4
3061 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3063 xfs_fsblock_t adjust; /* adjustment to block numbers */
3064 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3065 xfs_mount_t *mp; /* mount point structure */
3066 int nullfb; /* true if ap->firstblock isn't set */
3067 int rt; /* true if inode is realtime */
3069 #define ISVALID(x,y) \
3071 (x) < mp->m_sb.sb_rblocks : \
3072 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3073 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3074 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3076 mp = ap->ip->i_mount;
3077 nullfb = *ap->firstblock == NULLFSBLOCK;
3078 rt = XFS_IS_REALTIME_INODE(ap->ip) &&
3079 xfs_alloc_is_userdata(ap->datatype);
3080 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
3082 * If allocating at eof, and there's a previous real block,
3083 * try to use its last block as our starting point.
3085 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3086 !isnullstartblock(ap->prev.br_startblock) &&
3087 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
3088 ap->prev.br_startblock)) {
3089 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3091 * Adjust for the gap between prevp and us.
3093 adjust = ap->offset -
3094 (ap->prev.br_startoff + ap->prev.br_blockcount);
3096 ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
3097 ap->blkno += adjust;
3100 * If not at eof, then compare the two neighbor blocks.
3101 * Figure out whether either one gives us a good starting point,
3102 * and pick the better one.
3104 else if (!ap->eof) {
3105 xfs_fsblock_t gotbno; /* right side block number */
3106 xfs_fsblock_t gotdiff=0; /* right side difference */
3107 xfs_fsblock_t prevbno; /* left side block number */
3108 xfs_fsblock_t prevdiff=0; /* left side difference */
3111 * If there's a previous (left) block, select a requested
3112 * start block based on it.
3114 if (ap->prev.br_startoff != NULLFILEOFF &&
3115 !isnullstartblock(ap->prev.br_startblock) &&
3116 (prevbno = ap->prev.br_startblock +
3117 ap->prev.br_blockcount) &&
3118 ISVALID(prevbno, ap->prev.br_startblock)) {
3120 * Calculate gap to end of previous block.
3122 adjust = prevdiff = ap->offset -
3123 (ap->prev.br_startoff +
3124 ap->prev.br_blockcount);
3126 * Figure the startblock based on the previous block's
3127 * end and the gap size.
3129 * If the gap is large relative to the piece we're
3130 * allocating, or using it gives us an invalid block
3131 * number, then just use the end of the previous block.
3133 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3134 ISVALID(prevbno + prevdiff,
3135 ap->prev.br_startblock))
3140 * If the firstblock forbids it, can't use it,
3143 if (!rt && !nullfb &&
3144 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
3145 prevbno = NULLFSBLOCK;
3148 * No previous block or can't follow it, just default.
3151 prevbno = NULLFSBLOCK;
3153 * If there's a following (right) block, select a requested
3154 * start block based on it.
3156 if (!isnullstartblock(ap->got.br_startblock)) {
3158 * Calculate gap to start of next block.
3160 adjust = gotdiff = ap->got.br_startoff - ap->offset;
3162 * Figure the startblock based on the next block's
3163 * start and the gap size.
3165 gotbno = ap->got.br_startblock;
3168 * If the gap is large relative to the piece we're
3169 * allocating, or using it gives us an invalid block
3170 * number, then just use the start of the next block
3171 * offset by our length.
3173 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3174 ISVALID(gotbno - gotdiff, gotbno))
3176 else if (ISVALID(gotbno - ap->length, gotbno)) {
3177 gotbno -= ap->length;
3178 gotdiff += adjust - ap->length;
3182 * If the firstblock forbids it, can't use it,
3185 if (!rt && !nullfb &&
3186 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
3187 gotbno = NULLFSBLOCK;
3190 * No next block, just default.
3193 gotbno = NULLFSBLOCK;
3195 * If both valid, pick the better one, else the only good
3196 * one, else ap->blkno is already set (to 0 or the inode block).
3198 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
3199 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3200 else if (prevbno != NULLFSBLOCK)
3201 ap->blkno = prevbno;
3202 else if (gotbno != NULLFSBLOCK)
3209 xfs_bmap_longest_free_extent(
3210 struct xfs_trans *tp,
3215 struct xfs_mount *mp = tp->t_mountp;
3216 struct xfs_perag *pag;
3217 xfs_extlen_t longest;
3220 pag = xfs_perag_get(mp, ag);
3221 if (!pag->pagf_init) {
3222 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
3226 if (!pag->pagf_init) {
3232 longest = xfs_alloc_longest_free_extent(pag,
3233 xfs_alloc_min_freelist(mp, pag),
3234 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
3235 if (*blen < longest)
3244 xfs_bmap_select_minlen(
3245 struct xfs_bmalloca *ap,
3246 struct xfs_alloc_arg *args,
3250 if (notinit || *blen < ap->minlen) {
3252 * Since we did a BUF_TRYLOCK above, it is possible that
3253 * there is space for this request.
3255 args->minlen = ap->minlen;
3256 } else if (*blen < args->maxlen) {
3258 * If the best seen length is less than the request length,
3259 * use the best as the minimum.
3261 args->minlen = *blen;
3264 * Otherwise we've seen an extent as big as maxlen, use that
3267 args->minlen = args->maxlen;
3272 xfs_bmap_btalloc_nullfb(
3273 struct xfs_bmalloca *ap,
3274 struct xfs_alloc_arg *args,
3277 struct xfs_mount *mp = ap->ip->i_mount;
3278 xfs_agnumber_t ag, startag;
3282 args->type = XFS_ALLOCTYPE_START_BNO;
3283 args->total = ap->total;
3285 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3286 if (startag == NULLAGNUMBER)
3289 while (*blen < args->maxlen) {
3290 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3295 if (++ag == mp->m_sb.sb_agcount)
3301 xfs_bmap_select_minlen(ap, args, blen, notinit);
3306 xfs_bmap_btalloc_filestreams(
3307 struct xfs_bmalloca *ap,
3308 struct xfs_alloc_arg *args,
3311 struct xfs_mount *mp = ap->ip->i_mount;
3316 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3317 args->total = ap->total;
3319 ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3320 if (ag == NULLAGNUMBER)
3323 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, ¬init);
3327 if (*blen < args->maxlen) {
3328 error = xfs_filestream_new_ag(ap, &ag);
3332 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3339 xfs_bmap_select_minlen(ap, args, blen, notinit);
3342 * Set the failure fallback case to look in the selected AG as stream
3345 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
3349 /* Update all inode and quota accounting for the allocation we just did. */
3351 xfs_bmap_btalloc_accounting(
3352 struct xfs_bmalloca *ap,
3353 struct xfs_alloc_arg *args)
3355 if (ap->flags & XFS_BMAPI_COWFORK) {
3357 * COW fork blocks are in-core only and thus are treated as
3358 * in-core quota reservation (like delalloc blocks) even when
3359 * converted to real blocks. The quota reservation is not
3360 * accounted to disk until blocks are remapped to the data
3361 * fork. So if these blocks were previously delalloc, we
3362 * already have quota reservation and there's nothing to do
3369 * Otherwise, we've allocated blocks in a hole. The transaction
3370 * has acquired in-core quota reservation for this extent.
3371 * Rather than account these as real blocks, however, we reduce
3372 * the transaction quota reservation based on the allocation.
3373 * This essentially transfers the transaction quota reservation
3374 * to that of a delalloc extent.
3376 ap->ip->i_delayed_blks += args->len;
3377 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, XFS_TRANS_DQ_RES_BLKS,
3382 /* data/attr fork only */
3383 ap->ip->i_d.di_nblocks += args->len;
3384 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3386 ap->ip->i_delayed_blks -= args->len;
3387 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
3388 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : XFS_TRANS_DQ_BCOUNT,
3394 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3396 xfs_mount_t *mp; /* mount point structure */
3397 xfs_alloctype_t atype = 0; /* type for allocation routines */
3398 xfs_extlen_t align = 0; /* minimum allocation alignment */
3399 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3401 xfs_alloc_arg_t args;
3402 xfs_fileoff_t orig_offset;
3403 xfs_extlen_t orig_length;
3405 xfs_extlen_t nextminlen = 0;
3406 int nullfb; /* true if ap->firstblock isn't set */
3413 orig_offset = ap->offset;
3414 orig_length = ap->length;
3416 mp = ap->ip->i_mount;
3418 /* stripe alignment for allocation is determined by mount parameters */
3420 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
3421 stripe_align = mp->m_swidth;
3422 else if (mp->m_dalign)
3423 stripe_align = mp->m_dalign;
3425 if (ap->flags & XFS_BMAPI_COWFORK)
3426 align = xfs_get_cowextsz_hint(ap->ip);
3427 else if (xfs_alloc_is_userdata(ap->datatype))
3428 align = xfs_get_extsz_hint(ap->ip);
3430 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
3431 align, 0, ap->eof, 0, ap->conv,
3432 &ap->offset, &ap->length);
3438 nullfb = *ap->firstblock == NULLFSBLOCK;
3439 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
3441 if (xfs_alloc_is_userdata(ap->datatype) &&
3442 xfs_inode_is_filestream(ap->ip)) {
3443 ag = xfs_filestream_lookup_ag(ap->ip);
3444 ag = (ag != NULLAGNUMBER) ? ag : 0;
3445 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0);
3447 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
3450 ap->blkno = *ap->firstblock;
3452 xfs_bmap_adjacent(ap);
3455 * If allowed, use ap->blkno; otherwise must use firstblock since
3456 * it's in the right allocation group.
3458 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
3461 ap->blkno = *ap->firstblock;
3463 * Normal allocation, done through xfs_alloc_vextent.
3465 tryagain = isaligned = 0;
3466 memset(&args, 0, sizeof(args));
3469 args.fsbno = ap->blkno;
3470 xfs_rmap_skip_owner_update(&args.oinfo);
3472 /* Trim the allocation back to the maximum an AG can fit. */
3473 args.maxlen = MIN(ap->length, mp->m_ag_max_usable);
3474 args.firstblock = *ap->firstblock;
3478 * Search for an allocation group with a single extent large
3479 * enough for the request. If one isn't found, then adjust
3480 * the minimum allocation size to the largest space found.
3482 if (xfs_alloc_is_userdata(ap->datatype) &&
3483 xfs_inode_is_filestream(ap->ip))
3484 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);
3486 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
3489 } else if (ap->dfops->dop_low) {
3490 if (xfs_inode_is_filestream(ap->ip))
3491 args.type = XFS_ALLOCTYPE_FIRST_AG;
3493 args.type = XFS_ALLOCTYPE_START_BNO;
3494 args.total = args.minlen = ap->minlen;
3496 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3497 args.total = ap->total;
3498 args.minlen = ap->minlen;
3500 /* apply extent size hints if obtained earlier */
3503 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
3504 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3505 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3509 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3510 if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod))))
3511 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3514 * If we are not low on available data blocks, and the
3515 * underlying logical volume manager is a stripe, and
3516 * the file offset is zero then try to allocate data
3517 * blocks on stripe unit boundary.
3518 * NOTE: ap->aeof is only set if the allocation length
3519 * is >= the stripe unit and the allocation offset is
3520 * at the end of file.
3522 if (!ap->dfops->dop_low && ap->aeof) {
3524 args.alignment = stripe_align;
3528 * Adjust for alignment
3530 if (blen > args.alignment && blen <= args.maxlen)
3531 args.minlen = blen - args.alignment;
3532 args.minalignslop = 0;
3535 * First try an exact bno allocation.
3536 * If it fails then do a near or start bno
3537 * allocation with alignment turned on.
3541 args.type = XFS_ALLOCTYPE_THIS_BNO;
3544 * Compute the minlen+alignment for the
3545 * next case. Set slop so that the value
3546 * of minlen+alignment+slop doesn't go up
3547 * between the calls.
3549 if (blen > stripe_align && blen <= args.maxlen)
3550 nextminlen = blen - stripe_align;
3552 nextminlen = args.minlen;
3553 if (nextminlen + stripe_align > args.minlen + 1)
3555 nextminlen + stripe_align -
3558 args.minalignslop = 0;
3562 args.minalignslop = 0;
3564 args.minleft = ap->minleft;
3565 args.wasdel = ap->wasdel;
3566 args.resv = XFS_AG_RESV_NONE;
3567 args.datatype = ap->datatype;
3568 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO)
3571 error = xfs_alloc_vextent(&args);
3575 if (tryagain && args.fsbno == NULLFSBLOCK) {
3577 * Exact allocation failed. Now try with alignment
3581 args.fsbno = ap->blkno;
3582 args.alignment = stripe_align;
3583 args.minlen = nextminlen;
3584 args.minalignslop = 0;
3586 if ((error = xfs_alloc_vextent(&args)))
3589 if (isaligned && args.fsbno == NULLFSBLOCK) {
3591 * allocation failed, so turn off alignment and
3595 args.fsbno = ap->blkno;
3597 if ((error = xfs_alloc_vextent(&args)))
3600 if (args.fsbno == NULLFSBLOCK && nullfb &&
3601 args.minlen > ap->minlen) {
3602 args.minlen = ap->minlen;
3603 args.type = XFS_ALLOCTYPE_START_BNO;
3604 args.fsbno = ap->blkno;
3605 if ((error = xfs_alloc_vextent(&args)))
3608 if (args.fsbno == NULLFSBLOCK && nullfb) {
3610 args.type = XFS_ALLOCTYPE_FIRST_AG;
3611 args.total = ap->minlen;
3612 if ((error = xfs_alloc_vextent(&args)))
3614 ap->dfops->dop_low = true;
3616 if (args.fsbno != NULLFSBLOCK) {
3618 * check the allocation happened at the same or higher AG than
3619 * the first block that was allocated.
3621 ASSERT(*ap->firstblock == NULLFSBLOCK ||
3622 XFS_FSB_TO_AGNO(mp, *ap->firstblock) <=
3623 XFS_FSB_TO_AGNO(mp, args.fsbno));
3625 ap->blkno = args.fsbno;
3626 if (*ap->firstblock == NULLFSBLOCK)
3627 *ap->firstblock = args.fsbno;
3628 ASSERT(nullfb || fb_agno <= args.agno);
3629 ap->length = args.len;
3631 * If the extent size hint is active, we tried to round the
3632 * caller's allocation request offset down to extsz and the
3633 * length up to another extsz boundary. If we found a free
3634 * extent we mapped it in starting at this new offset. If the
3635 * newly mapped space isn't long enough to cover any of the
3636 * range of offsets that was originally requested, move the
3637 * mapping up so that we can fill as much of the caller's
3638 * original request as possible. Free space is apparently
3639 * very fragmented so we're unlikely to be able to satisfy the
3642 if (ap->length <= orig_length)
3643 ap->offset = orig_offset;
3644 else if (ap->offset + ap->length < orig_offset + orig_length)
3645 ap->offset = orig_offset + orig_length - ap->length;
3646 xfs_bmap_btalloc_accounting(ap, &args);
3648 ap->blkno = NULLFSBLOCK;
3655 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
3656 * It figures out where to ask the underlying allocator to put the new extent.
3660 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3662 if (XFS_IS_REALTIME_INODE(ap->ip) &&
3663 xfs_alloc_is_userdata(ap->datatype))
3664 return xfs_bmap_rtalloc(ap);
3665 return xfs_bmap_btalloc(ap);
3668 /* Trim extent to fit a logical block range. */
3671 struct xfs_bmbt_irec *irec,
3675 xfs_fileoff_t distance;
3676 xfs_fileoff_t end = bno + len;
3678 if (irec->br_startoff + irec->br_blockcount <= bno ||
3679 irec->br_startoff >= end) {
3680 irec->br_blockcount = 0;
3684 if (irec->br_startoff < bno) {
3685 distance = bno - irec->br_startoff;
3686 if (isnullstartblock(irec->br_startblock))
3687 irec->br_startblock = DELAYSTARTBLOCK;
3688 if (irec->br_startblock != DELAYSTARTBLOCK &&
3689 irec->br_startblock != HOLESTARTBLOCK)
3690 irec->br_startblock += distance;
3691 irec->br_startoff += distance;
3692 irec->br_blockcount -= distance;
3695 if (end < irec->br_startoff + irec->br_blockcount) {
3696 distance = irec->br_startoff + irec->br_blockcount - end;
3697 irec->br_blockcount -= distance;
3701 /* trim extent to within eof */
3703 xfs_trim_extent_eof(
3704 struct xfs_bmbt_irec *irec,
3705 struct xfs_inode *ip)
3708 xfs_trim_extent(irec, 0, XFS_B_TO_FSB(ip->i_mount,
3709 i_size_read(VFS_I(ip))));
3713 * Trim the returned map to the required bounds
3717 struct xfs_bmbt_irec *mval,
3718 struct xfs_bmbt_irec *got,
3726 if ((flags & XFS_BMAPI_ENTIRE) ||
3727 got->br_startoff + got->br_blockcount <= obno) {
3729 if (isnullstartblock(got->br_startblock))
3730 mval->br_startblock = DELAYSTARTBLOCK;
3736 ASSERT((*bno >= obno) || (n == 0));
3738 mval->br_startoff = *bno;
3739 if (isnullstartblock(got->br_startblock))
3740 mval->br_startblock = DELAYSTARTBLOCK;
3742 mval->br_startblock = got->br_startblock +
3743 (*bno - got->br_startoff);
3745 * Return the minimum of what we got and what we asked for for
3746 * the length. We can use the len variable here because it is
3747 * modified below and we could have been there before coming
3748 * here if the first part of the allocation didn't overlap what
3751 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
3752 got->br_blockcount - (*bno - got->br_startoff));
3753 mval->br_state = got->br_state;
3754 ASSERT(mval->br_blockcount <= len);
3759 * Update and validate the extent map to return
3762 xfs_bmapi_update_map(
3763 struct xfs_bmbt_irec **map,
3771 xfs_bmbt_irec_t *mval = *map;
3773 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
3774 ((mval->br_startoff + mval->br_blockcount) <= end));
3775 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
3776 (mval->br_startoff < obno));
3778 *bno = mval->br_startoff + mval->br_blockcount;
3780 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
3781 /* update previous map with new information */
3782 ASSERT(mval->br_startblock == mval[-1].br_startblock);
3783 ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
3784 ASSERT(mval->br_state == mval[-1].br_state);
3785 mval[-1].br_blockcount = mval->br_blockcount;
3786 mval[-1].br_state = mval->br_state;
3787 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
3788 mval[-1].br_startblock != DELAYSTARTBLOCK &&
3789 mval[-1].br_startblock != HOLESTARTBLOCK &&
3790 mval->br_startblock == mval[-1].br_startblock +
3791 mval[-1].br_blockcount &&
3792 ((flags & XFS_BMAPI_IGSTATE) ||
3793 mval[-1].br_state == mval->br_state)) {
3794 ASSERT(mval->br_startoff ==
3795 mval[-1].br_startoff + mval[-1].br_blockcount);
3796 mval[-1].br_blockcount += mval->br_blockcount;
3797 } else if (*n > 0 &&
3798 mval->br_startblock == DELAYSTARTBLOCK &&
3799 mval[-1].br_startblock == DELAYSTARTBLOCK &&
3800 mval->br_startoff ==
3801 mval[-1].br_startoff + mval[-1].br_blockcount) {
3802 mval[-1].br_blockcount += mval->br_blockcount;
3803 mval[-1].br_state = mval->br_state;
3804 } else if (!((*n == 0) &&
3805 ((mval->br_startoff + mval->br_blockcount) <=
3814 * Map file blocks to filesystem blocks without allocation.
3818 struct xfs_inode *ip,
3821 struct xfs_bmbt_irec *mval,
3825 struct xfs_mount *mp = ip->i_mount;
3826 struct xfs_ifork *ifp;
3827 struct xfs_bmbt_irec got;
3830 struct xfs_iext_cursor icur;
3834 int whichfork = xfs_bmapi_whichfork(flags);
3837 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
3838 XFS_BMAPI_IGSTATE|XFS_BMAPI_COWFORK)));
3839 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
3841 if (unlikely(XFS_TEST_ERROR(
3842 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
3843 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
3844 mp, XFS_ERRTAG_BMAPIFORMAT))) {
3845 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp);
3846 return -EFSCORRUPTED;
3849 if (XFS_FORCED_SHUTDOWN(mp))
3852 XFS_STATS_INC(mp, xs_blk_mapr);
3854 ifp = XFS_IFORK_PTR(ip, whichfork);
3856 /* No CoW fork? Return a hole. */
3857 if (whichfork == XFS_COW_FORK && !ifp) {
3858 mval->br_startoff = bno;
3859 mval->br_startblock = HOLESTARTBLOCK;
3860 mval->br_blockcount = len;
3861 mval->br_state = XFS_EXT_NORM;
3866 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
3867 error = xfs_iread_extents(NULL, ip, whichfork);
3872 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got))
3877 while (bno < end && n < *nmap) {
3878 /* Reading past eof, act as though there's a hole up to end. */
3880 got.br_startoff = end;
3881 if (got.br_startoff > bno) {
3882 /* Reading in a hole. */
3883 mval->br_startoff = bno;
3884 mval->br_startblock = HOLESTARTBLOCK;
3885 mval->br_blockcount =
3886 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
3887 mval->br_state = XFS_EXT_NORM;
3888 bno += mval->br_blockcount;
3889 len -= mval->br_blockcount;
3895 /* set up the extent map to return. */
3896 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
3897 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
3899 /* If we're done, stop now. */
3900 if (bno >= end || n >= *nmap)
3903 /* Else go on to the next record. */
3904 if (!xfs_iext_next_extent(ifp, &icur, &got))
3912 * Add a delayed allocation extent to an inode. Blocks are reserved from the
3913 * global pool and the extent inserted into the inode in-core extent tree.
3915 * On entry, got refers to the first extent beyond the offset of the extent to
3916 * allocate or eof is specified if no such extent exists. On return, got refers
3917 * to the extent record that was inserted to the inode fork.
3919 * Note that the allocated extent may have been merged with contiguous extents
3920 * during insertion into the inode fork. Thus, got does not reflect the current
3921 * state of the inode fork on return. If necessary, the caller can use lastx to
3922 * look up the updated record in the inode fork.
3925 xfs_bmapi_reserve_delalloc(
3926 struct xfs_inode *ip,
3930 xfs_filblks_t prealloc,
3931 struct xfs_bmbt_irec *got,
3932 struct xfs_iext_cursor *icur,
3935 struct xfs_mount *mp = ip->i_mount;
3936 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
3938 xfs_extlen_t indlen;
3940 xfs_fileoff_t aoff = off;
3943 * Cap the alloc length. Keep track of prealloc so we know whether to
3944 * tag the inode before we return.
3946 alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN);
3948 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
3949 if (prealloc && alen >= len)
3950 prealloc = alen - len;
3952 /* Figure out the extent size, adjust alen */
3953 if (whichfork == XFS_COW_FORK) {
3954 struct xfs_bmbt_irec prev;
3955 xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip);
3957 if (!xfs_iext_peek_prev_extent(ifp, icur, &prev))
3958 prev.br_startoff = NULLFILEOFF;
3960 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof,
3961 1, 0, &aoff, &alen);
3966 * Make a transaction-less quota reservation for delayed allocation
3967 * blocks. This number gets adjusted later. We return if we haven't
3968 * allocated blocks already inside this loop.
3970 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0,
3971 XFS_QMOPT_RES_REGBLKS);
3976 * Split changing sb for alen and indlen since they could be coming
3977 * from different places.
3979 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
3982 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
3984 goto out_unreserve_quota;
3986 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
3988 goto out_unreserve_blocks;
3991 ip->i_delayed_blks += alen;
3993 got->br_startoff = aoff;
3994 got->br_startblock = nullstartblock(indlen);
3995 got->br_blockcount = alen;
3996 got->br_state = XFS_EXT_NORM;
3998 xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got);
4001 * Tag the inode if blocks were preallocated. Note that COW fork
4002 * preallocation can occur at the start or end of the extent, even when
4003 * prealloc == 0, so we must also check the aligned offset and length.
4005 if (whichfork == XFS_DATA_FORK && prealloc)
4006 xfs_inode_set_eofblocks_tag(ip);
4007 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
4008 xfs_inode_set_cowblocks_tag(ip);
4012 out_unreserve_blocks:
4013 xfs_mod_fdblocks(mp, alen, false);
4014 out_unreserve_quota:
4015 if (XFS_IS_QUOTA_ON(mp))
4016 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0,
4017 XFS_QMOPT_RES_REGBLKS);
4023 struct xfs_bmalloca *bma)
4025 struct xfs_mount *mp = bma->ip->i_mount;
4026 int whichfork = xfs_bmapi_whichfork(bma->flags);
4027 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4028 int tmp_logflags = 0;
4031 ASSERT(bma->length > 0);
4034 * For the wasdelay case, we could also just allocate the stuff asked
4035 * for in this bmap call but that wouldn't be as good.
4038 bma->length = (xfs_extlen_t)bma->got.br_blockcount;
4039 bma->offset = bma->got.br_startoff;
4040 xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev);
4042 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
4044 bma->length = XFS_FILBLKS_MIN(bma->length,
4045 bma->got.br_startoff - bma->offset);
4049 * Set the data type being allocated. For the data fork, the first data
4050 * in the file is treated differently to all other allocations. For the
4051 * attribute fork, we only need to ensure the allocated range is not on
4054 if (!(bma->flags & XFS_BMAPI_METADATA)) {
4055 bma->datatype = XFS_ALLOC_NOBUSY;
4056 if (whichfork == XFS_DATA_FORK) {
4057 if (bma->offset == 0)
4058 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
4060 bma->datatype |= XFS_ALLOC_USERDATA;
4062 if (bma->flags & XFS_BMAPI_ZERO)
4063 bma->datatype |= XFS_ALLOC_USERDATA_ZERO;
4066 bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
4069 * Only want to do the alignment at the eof if it is userdata and
4070 * allocation length is larger than a stripe unit.
4072 if (mp->m_dalign && bma->length >= mp->m_dalign &&
4073 !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) {
4074 error = xfs_bmap_isaeof(bma, whichfork);
4079 error = xfs_bmap_alloc(bma);
4084 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4085 if (bma->blkno == NULLFSBLOCK)
4087 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4088 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4089 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4090 bma->cur->bc_private.b.dfops = bma->dfops;
4093 * Bump the number of extents we've allocated
4099 bma->cur->bc_private.b.flags =
4100 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
4102 bma->got.br_startoff = bma->offset;
4103 bma->got.br_startblock = bma->blkno;
4104 bma->got.br_blockcount = bma->length;
4105 bma->got.br_state = XFS_EXT_NORM;
4108 * In the data fork, a wasdelay extent has been initialized, so
4109 * shouldn't be flagged as unwritten.
4111 * For the cow fork, however, we convert delalloc reservations
4112 * (extents allocated for speculative preallocation) to
4113 * allocated unwritten extents, and only convert the unwritten
4114 * extents to real extents when we're about to write the data.
4116 if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) &&
4117 (bma->flags & XFS_BMAPI_PREALLOC) &&
4118 xfs_sb_version_hasextflgbit(&mp->m_sb))
4119 bma->got.br_state = XFS_EXT_UNWRITTEN;
4122 error = xfs_bmap_add_extent_delay_real(bma, whichfork);
4124 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
4125 whichfork, &bma->icur, &bma->cur, &bma->got,
4126 bma->firstblock, bma->dfops, &bma->logflags);
4128 bma->logflags |= tmp_logflags;
4133 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4134 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4135 * the neighbouring ones.
4137 xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4139 ASSERT(bma->got.br_startoff <= bma->offset);
4140 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4141 bma->offset + bma->length);
4142 ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4143 bma->got.br_state == XFS_EXT_UNWRITTEN);
4148 xfs_bmapi_convert_unwritten(
4149 struct xfs_bmalloca *bma,
4150 struct xfs_bmbt_irec *mval,
4154 int whichfork = xfs_bmapi_whichfork(flags);
4155 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4156 int tmp_logflags = 0;
4159 /* check if we need to do unwritten->real conversion */
4160 if (mval->br_state == XFS_EXT_UNWRITTEN &&
4161 (flags & XFS_BMAPI_PREALLOC))
4164 /* check if we need to do real->unwritten conversion */
4165 if (mval->br_state == XFS_EXT_NORM &&
4166 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4167 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4171 * Modify (by adding) the state flag, if writing.
4173 ASSERT(mval->br_blockcount <= len);
4174 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4175 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4176 bma->ip, whichfork);
4177 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4178 bma->cur->bc_private.b.dfops = bma->dfops;
4180 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4181 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4184 * Before insertion into the bmbt, zero the range being converted
4187 if (flags & XFS_BMAPI_ZERO) {
4188 error = xfs_zero_extent(bma->ip, mval->br_startblock,
4189 mval->br_blockcount);
4194 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
4195 &bma->icur, &bma->cur, mval, bma->firstblock,
4196 bma->dfops, &tmp_logflags);
4198 * Log the inode core unconditionally in the unwritten extent conversion
4199 * path because the conversion might not have done so (e.g., if the
4200 * extent count hasn't changed). We need to make sure the inode is dirty
4201 * in the transaction for the sake of fsync(), even if nothing has
4202 * changed, because fsync() will not force the log for this transaction
4203 * unless it sees the inode pinned.
4205 * Note: If we're only converting cow fork extents, there aren't
4206 * any on-disk updates to make, so we don't need to log anything.
4208 if (whichfork != XFS_COW_FORK)
4209 bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4214 * Update our extent pointer, given that
4215 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4216 * of the neighbouring ones.
4218 xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4221 * We may have combined previously unwritten space with written space,
4222 * so generate another request.
4224 if (mval->br_blockcount < len)
4230 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4231 * extent state if necessary. Details behaviour is controlled by the flags
4232 * parameter. Only allocates blocks from a single allocation group, to avoid
4235 * The returned value in "firstblock" from the first call in a transaction
4236 * must be remembered and presented to subsequent calls in "firstblock".
4237 * An upper bound for the number of blocks to be allocated is supplied to
4238 * the first call in "total"; if no allocation group has that many free
4239 * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
4243 struct xfs_trans *tp, /* transaction pointer */
4244 struct xfs_inode *ip, /* incore inode */
4245 xfs_fileoff_t bno, /* starting file offs. mapped */
4246 xfs_filblks_t len, /* length to map in file */
4247 int flags, /* XFS_BMAPI_... */
4248 xfs_fsblock_t *firstblock, /* first allocated block
4249 controls a.g. for allocs */
4250 xfs_extlen_t total, /* total blocks needed */
4251 struct xfs_bmbt_irec *mval, /* output: map values */
4252 int *nmap, /* i/o: mval size/count */
4253 struct xfs_defer_ops *dfops) /* i/o: list extents to free */
4255 struct xfs_mount *mp = ip->i_mount;
4256 struct xfs_ifork *ifp;
4257 struct xfs_bmalloca bma = { NULL }; /* args for xfs_bmap_alloc */
4258 xfs_fileoff_t end; /* end of mapped file region */
4259 bool eof = false; /* after the end of extents */
4260 int error; /* error return */
4261 int n; /* current extent index */
4262 xfs_fileoff_t obno; /* old block number (offset) */
4263 int whichfork; /* data or attr fork */
4266 xfs_fileoff_t orig_bno; /* original block number value */
4267 int orig_flags; /* original flags arg value */
4268 xfs_filblks_t orig_len; /* original value of len arg */
4269 struct xfs_bmbt_irec *orig_mval; /* original value of mval */
4270 int orig_nmap; /* original value of *nmap */
4278 whichfork = xfs_bmapi_whichfork(flags);
4281 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4282 ASSERT(!(flags & XFS_BMAPI_IGSTATE));
4283 ASSERT(tp != NULL ||
4284 (flags & (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK)) ==
4285 (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK));
4287 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
4288 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4289 ASSERT(!(flags & XFS_BMAPI_REMAP));
4291 /* zeroing is for currently only for data extents, not metadata */
4292 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4293 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4295 * we can allocate unwritten extents or pre-zero allocated blocks,
4296 * but it makes no sense to do both at once. This would result in
4297 * zeroing the unwritten extent twice, but it still being an
4298 * unwritten extent....
4300 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4301 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4303 if (unlikely(XFS_TEST_ERROR(
4304 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4305 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4306 mp, XFS_ERRTAG_BMAPIFORMAT))) {
4307 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp);
4308 return -EFSCORRUPTED;
4311 if (XFS_FORCED_SHUTDOWN(mp))
4314 ifp = XFS_IFORK_PTR(ip, whichfork);
4316 XFS_STATS_INC(mp, xs_blk_mapw);
4318 if (*firstblock == NULLFSBLOCK) {
4319 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
4320 bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
4327 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4328 error = xfs_iread_extents(tp, ip, whichfork);
4337 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got))
4339 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4340 bma.prev.br_startoff = NULLFILEOFF;
4346 bma.firstblock = firstblock;
4348 while (bno < end && n < *nmap) {
4349 bool need_alloc = false, wasdelay = false;
4351 /* in hole or beyond EOF? */
4352 if (eof || bma.got.br_startoff > bno) {
4354 * CoW fork conversions should /never/ hit EOF or
4355 * holes. There should always be something for us
4358 ASSERT(!((flags & XFS_BMAPI_CONVERT) &&
4359 (flags & XFS_BMAPI_COWFORK)));
4361 if (flags & XFS_BMAPI_DELALLOC) {
4363 * For the COW fork we can reasonably get a
4364 * request for converting an extent that races
4365 * with other threads already having converted
4366 * part of it, as there converting COW to
4367 * regular blocks is not protected using the
4370 ASSERT(flags & XFS_BMAPI_COWFORK);
4371 if (!(flags & XFS_BMAPI_COWFORK)) {
4376 if (eof || bno >= end)
4381 } else if (isnullstartblock(bma.got.br_startblock)) {
4386 * First, deal with the hole before the allocated space
4387 * that we found, if any.
4389 if ((need_alloc || wasdelay) &&
4390 !(flags & XFS_BMAPI_CONVERT_ONLY)) {
4392 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4393 bma.wasdel = wasdelay;
4398 * There's a 32/64 bit type mismatch between the
4399 * allocation length request (which can be 64 bits in
4400 * length) and the bma length request, which is
4401 * xfs_extlen_t and therefore 32 bits. Hence we have to
4402 * check for 32-bit overflows and handle them here.
4404 if (len > (xfs_filblks_t)MAXEXTLEN)
4405 bma.length = MAXEXTLEN;
4410 ASSERT(bma.length > 0);
4411 error = xfs_bmapi_allocate(&bma);
4414 if (bma.blkno == NULLFSBLOCK)
4418 * If this is a CoW allocation, record the data in
4419 * the refcount btree for orphan recovery.
4421 if (whichfork == XFS_COW_FORK) {
4422 error = xfs_refcount_alloc_cow_extent(mp, dfops,
4423 bma.blkno, bma.length);
4429 /* Deal with the allocated space we found. */
4430 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4433 /* Execute unwritten extent conversion if necessary */
4434 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4435 if (error == -EAGAIN)
4440 /* update the extent map to return */
4441 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4444 * If we're done, stop now. Stop when we've allocated
4445 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4446 * the transaction may get too big.
4448 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4451 /* Else go on to the next record. */
4453 if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got))
4459 * Transform from btree to extents, give it cur.
4461 if (xfs_bmap_wants_extents(ip, whichfork)) {
4462 int tmp_logflags = 0;
4465 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur,
4466 &tmp_logflags, whichfork);
4467 bma.logflags |= tmp_logflags;
4472 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
4473 XFS_IFORK_NEXTENTS(ip, whichfork) >
4474 XFS_IFORK_MAXEXT(ip, whichfork));
4478 * Log everything. Do this after conversion, there's no point in
4479 * logging the extent records if we've converted to btree format.
4481 if ((bma.logflags & xfs_ilog_fext(whichfork)) &&
4482 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
4483 bma.logflags &= ~xfs_ilog_fext(whichfork);
4484 else if ((bma.logflags & xfs_ilog_fbroot(whichfork)) &&
4485 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
4486 bma.logflags &= ~xfs_ilog_fbroot(whichfork);
4488 * Log whatever the flags say, even if error. Otherwise we might miss
4489 * detecting a case where the data is changed, there's an error,
4490 * and it's not logged so we don't shutdown when we should.
4493 xfs_trans_log_inode(tp, ip, bma.logflags);
4497 ASSERT(*firstblock == NULLFSBLOCK ||
4498 XFS_FSB_TO_AGNO(mp, *firstblock) <=
4500 bma.cur->bc_private.b.firstblock));
4501 *firstblock = bma.cur->bc_private.b.firstblock;
4503 xfs_btree_del_cursor(bma.cur,
4504 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
4507 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4514 struct xfs_trans *tp,
4515 struct xfs_inode *ip,
4518 xfs_fsblock_t startblock,
4519 struct xfs_defer_ops *dfops)
4521 struct xfs_mount *mp = ip->i_mount;
4522 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
4523 struct xfs_btree_cur *cur = NULL;
4524 xfs_fsblock_t firstblock = NULLFSBLOCK;
4525 struct xfs_bmbt_irec got;
4526 struct xfs_iext_cursor icur;
4527 int logflags = 0, error;
4530 ASSERT(len <= (xfs_filblks_t)MAXEXTLEN);
4531 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4533 if (unlikely(XFS_TEST_ERROR(
4534 (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
4535 XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
4536 mp, XFS_ERRTAG_BMAPIFORMAT))) {
4537 XFS_ERROR_REPORT("xfs_bmapi_remap", XFS_ERRLEVEL_LOW, mp);
4538 return -EFSCORRUPTED;
4541 if (XFS_FORCED_SHUTDOWN(mp))
4544 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4545 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
4550 if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
4551 /* make sure we only reflink into a hole. */
4552 ASSERT(got.br_startoff > bno);
4553 ASSERT(got.br_startoff - bno >= len);
4556 ip->i_d.di_nblocks += len;
4557 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
4559 if (ifp->if_flags & XFS_IFBROOT) {
4560 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
4561 cur->bc_private.b.firstblock = firstblock;
4562 cur->bc_private.b.dfops = dfops;
4563 cur->bc_private.b.flags = 0;
4566 got.br_startoff = bno;
4567 got.br_startblock = startblock;
4568 got.br_blockcount = len;
4569 got.br_state = XFS_EXT_NORM;
4571 error = xfs_bmap_add_extent_hole_real(tp, ip, XFS_DATA_FORK, &icur,
4572 &cur, &got, &firstblock, dfops, &logflags);
4576 if (xfs_bmap_wants_extents(ip, XFS_DATA_FORK)) {
4577 int tmp_logflags = 0;
4579 error = xfs_bmap_btree_to_extents(tp, ip, cur,
4580 &tmp_logflags, XFS_DATA_FORK);
4581 logflags |= tmp_logflags;
4585 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS)
4586 logflags &= ~XFS_ILOG_DEXT;
4587 else if (ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
4588 logflags &= ~XFS_ILOG_DBROOT;
4591 xfs_trans_log_inode(tp, ip, logflags);
4593 xfs_btree_del_cursor(cur,
4594 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
4600 * When a delalloc extent is split (e.g., due to a hole punch), the original
4601 * indlen reservation must be shared across the two new extents that are left
4604 * Given the original reservation and the worst case indlen for the two new
4605 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4606 * reservation fairly across the two new extents. If necessary, steal available
4607 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4608 * ores == 1). The number of stolen blocks is returned. The availability and
4609 * subsequent accounting of stolen blocks is the responsibility of the caller.
4611 static xfs_filblks_t
4612 xfs_bmap_split_indlen(
4613 xfs_filblks_t ores, /* original res. */
4614 xfs_filblks_t *indlen1, /* ext1 worst indlen */
4615 xfs_filblks_t *indlen2, /* ext2 worst indlen */
4616 xfs_filblks_t avail) /* stealable blocks */
4618 xfs_filblks_t len1 = *indlen1;
4619 xfs_filblks_t len2 = *indlen2;
4620 xfs_filblks_t nres = len1 + len2; /* new total res. */
4621 xfs_filblks_t stolen = 0;
4622 xfs_filblks_t resfactor;
4625 * Steal as many blocks as we can to try and satisfy the worst case
4626 * indlen for both new extents.
4628 if (ores < nres && avail)
4629 stolen = XFS_FILBLKS_MIN(nres - ores, avail);
4632 /* nothing else to do if we've satisfied the new reservation */
4637 * We can't meet the total required reservation for the two extents.
4638 * Calculate the percent of the overall shortage between both extents
4639 * and apply this percentage to each of the requested indlen values.
4640 * This distributes the shortage fairly and reduces the chances that one
4641 * of the two extents is left with nothing when extents are repeatedly
4644 resfactor = (ores * 100);
4645 do_div(resfactor, nres);
4650 ASSERT(len1 + len2 <= ores);
4651 ASSERT(len1 < *indlen1 && len2 < *indlen2);
4654 * Hand out the remainder to each extent. If one of the two reservations
4655 * is zero, we want to make sure that one gets a block first. The loop
4656 * below starts with len1, so hand len2 a block right off the bat if it
4659 ores -= (len1 + len2);
4660 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
4661 if (ores && !len2 && *indlen2) {
4666 if (len1 < *indlen1) {
4672 if (len2 < *indlen2) {
4685 xfs_bmap_del_extent_delay(
4686 struct xfs_inode *ip,
4688 struct xfs_iext_cursor *icur,
4689 struct xfs_bmbt_irec *got,
4690 struct xfs_bmbt_irec *del)
4692 struct xfs_mount *mp = ip->i_mount;
4693 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4694 struct xfs_bmbt_irec new;
4695 int64_t da_old, da_new, da_diff = 0;
4696 xfs_fileoff_t del_endoff, got_endoff;
4697 xfs_filblks_t got_indlen, new_indlen, stolen;
4698 int state = xfs_bmap_fork_to_state(whichfork);
4702 XFS_STATS_INC(mp, xs_del_exlist);
4704 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
4705 del_endoff = del->br_startoff + del->br_blockcount;
4706 got_endoff = got->br_startoff + got->br_blockcount;
4707 da_old = startblockval(got->br_startblock);
4710 ASSERT(del->br_blockcount > 0);
4711 ASSERT(got->br_startoff <= del->br_startoff);
4712 ASSERT(got_endoff >= del_endoff);
4715 uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);
4717 do_div(rtexts, mp->m_sb.sb_rextsize);
4718 xfs_mod_frextents(mp, rtexts);
4722 * Update the inode delalloc counter now and wait to update the
4723 * sb counters as we might have to borrow some blocks for the
4724 * indirect block accounting.
4726 error = xfs_trans_reserve_quota_nblks(NULL, ip,
4727 -((long)del->br_blockcount), 0,
4728 isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4731 ip->i_delayed_blks -= del->br_blockcount;
4733 if (got->br_startoff == del->br_startoff)
4734 state |= BMAP_LEFT_FILLING;
4735 if (got_endoff == del_endoff)
4736 state |= BMAP_RIGHT_FILLING;
4738 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4739 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4741 * Matches the whole extent. Delete the entry.
4743 xfs_iext_remove(ip, icur, state);
4744 xfs_iext_prev(ifp, icur);
4746 case BMAP_LEFT_FILLING:
4748 * Deleting the first part of the extent.
4750 got->br_startoff = del_endoff;
4751 got->br_blockcount -= del->br_blockcount;
4752 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4753 got->br_blockcount), da_old);
4754 got->br_startblock = nullstartblock((int)da_new);
4755 xfs_iext_update_extent(ip, state, icur, got);
4757 case BMAP_RIGHT_FILLING:
4759 * Deleting the last part of the extent.
4761 got->br_blockcount = got->br_blockcount - del->br_blockcount;
4762 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4763 got->br_blockcount), da_old);
4764 got->br_startblock = nullstartblock((int)da_new);
4765 xfs_iext_update_extent(ip, state, icur, got);
4769 * Deleting the middle of the extent.
4771 * Distribute the original indlen reservation across the two new
4772 * extents. Steal blocks from the deleted extent if necessary.
4773 * Stealing blocks simply fudges the fdblocks accounting below.
4774 * Warn if either of the new indlen reservations is zero as this
4775 * can lead to delalloc problems.
4777 got->br_blockcount = del->br_startoff - got->br_startoff;
4778 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
4780 new.br_blockcount = got_endoff - del_endoff;
4781 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
4783 WARN_ON_ONCE(!got_indlen || !new_indlen);
4784 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
4785 del->br_blockcount);
4787 got->br_startblock = nullstartblock((int)got_indlen);
4789 new.br_startoff = del_endoff;
4790 new.br_state = got->br_state;
4791 new.br_startblock = nullstartblock((int)new_indlen);
4793 xfs_iext_update_extent(ip, state, icur, got);
4794 xfs_iext_next(ifp, icur);
4795 xfs_iext_insert(ip, icur, &new, state);
4797 da_new = got_indlen + new_indlen - stolen;
4798 del->br_blockcount -= stolen;
4802 ASSERT(da_old >= da_new);
4803 da_diff = da_old - da_new;
4805 da_diff += del->br_blockcount;
4807 xfs_mod_fdblocks(mp, da_diff, false);
4812 xfs_bmap_del_extent_cow(
4813 struct xfs_inode *ip,
4814 struct xfs_iext_cursor *icur,
4815 struct xfs_bmbt_irec *got,
4816 struct xfs_bmbt_irec *del)
4818 struct xfs_mount *mp = ip->i_mount;
4819 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
4820 struct xfs_bmbt_irec new;
4821 xfs_fileoff_t del_endoff, got_endoff;
4822 int state = BMAP_COWFORK;
4824 XFS_STATS_INC(mp, xs_del_exlist);
4826 del_endoff = del->br_startoff + del->br_blockcount;
4827 got_endoff = got->br_startoff + got->br_blockcount;
4829 ASSERT(del->br_blockcount > 0);
4830 ASSERT(got->br_startoff <= del->br_startoff);
4831 ASSERT(got_endoff >= del_endoff);
4832 ASSERT(!isnullstartblock(got->br_startblock));
4834 if (got->br_startoff == del->br_startoff)
4835 state |= BMAP_LEFT_FILLING;
4836 if (got_endoff == del_endoff)
4837 state |= BMAP_RIGHT_FILLING;
4839 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4840 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4842 * Matches the whole extent. Delete the entry.
4844 xfs_iext_remove(ip, icur, state);
4845 xfs_iext_prev(ifp, icur);
4847 case BMAP_LEFT_FILLING:
4849 * Deleting the first part of the extent.
4851 got->br_startoff = del_endoff;
4852 got->br_blockcount -= del->br_blockcount;
4853 got->br_startblock = del->br_startblock + del->br_blockcount;
4854 xfs_iext_update_extent(ip, state, icur, got);
4856 case BMAP_RIGHT_FILLING:
4858 * Deleting the last part of the extent.
4860 got->br_blockcount -= del->br_blockcount;
4861 xfs_iext_update_extent(ip, state, icur, got);
4865 * Deleting the middle of the extent.
4867 got->br_blockcount = del->br_startoff - got->br_startoff;
4869 new.br_startoff = del_endoff;
4870 new.br_blockcount = got_endoff - del_endoff;
4871 new.br_state = got->br_state;
4872 new.br_startblock = del->br_startblock + del->br_blockcount;
4874 xfs_iext_update_extent(ip, state, icur, got);
4875 xfs_iext_next(ifp, icur);
4876 xfs_iext_insert(ip, icur, &new, state);
4879 ip->i_delayed_blks -= del->br_blockcount;
4883 * Called by xfs_bmapi to update file extent records and the btree
4884 * after removing space.
4886 STATIC int /* error */
4887 xfs_bmap_del_extent_real(
4888 xfs_inode_t *ip, /* incore inode pointer */
4889 xfs_trans_t *tp, /* current transaction pointer */
4890 struct xfs_iext_cursor *icur,
4891 struct xfs_defer_ops *dfops, /* list of extents to be freed */
4892 xfs_btree_cur_t *cur, /* if null, not a btree */
4893 xfs_bmbt_irec_t *del, /* data to remove from extents */
4894 int *logflagsp, /* inode logging flags */
4895 int whichfork, /* data or attr fork */
4896 int bflags) /* bmapi flags */
4898 xfs_fsblock_t del_endblock=0; /* first block past del */
4899 xfs_fileoff_t del_endoff; /* first offset past del */
4900 int do_fx; /* free extent at end of routine */
4901 int error; /* error return value */
4902 int flags = 0;/* inode logging flags */
4903 struct xfs_bmbt_irec got; /* current extent entry */
4904 xfs_fileoff_t got_endoff; /* first offset past got */
4905 int i; /* temp state */
4906 xfs_ifork_t *ifp; /* inode fork pointer */
4907 xfs_mount_t *mp; /* mount structure */
4908 xfs_filblks_t nblks; /* quota/sb block count */
4909 xfs_bmbt_irec_t new; /* new record to be inserted */
4911 uint qfield; /* quota field to update */
4912 int state = xfs_bmap_fork_to_state(whichfork);
4913 struct xfs_bmbt_irec old;
4916 XFS_STATS_INC(mp, xs_del_exlist);
4918 ifp = XFS_IFORK_PTR(ip, whichfork);
4919 ASSERT(del->br_blockcount > 0);
4920 xfs_iext_get_extent(ifp, icur, &got);
4921 ASSERT(got.br_startoff <= del->br_startoff);
4922 del_endoff = del->br_startoff + del->br_blockcount;
4923 got_endoff = got.br_startoff + got.br_blockcount;
4924 ASSERT(got_endoff >= del_endoff);
4925 ASSERT(!isnullstartblock(got.br_startblock));
4930 * If it's the case where the directory code is running with no block
4931 * reservation, and the deleted block is in the middle of its extent,
4932 * and the resulting insert of an extent would cause transformation to
4933 * btree format, then reject it. The calling code will then swap blocks
4934 * around instead. We have to do this now, rather than waiting for the
4935 * conversion to btree format, since the transaction will be dirty then.
4937 if (tp->t_blk_res == 0 &&
4938 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
4939 XFS_IFORK_NEXTENTS(ip, whichfork) >=
4940 XFS_IFORK_MAXEXT(ip, whichfork) &&
4941 del->br_startoff > got.br_startoff && del_endoff < got_endoff)
4944 flags = XFS_ILOG_CORE;
4945 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
4949 ASSERT(do_mod(del->br_blockcount, mp->m_sb.sb_rextsize) == 0);
4950 ASSERT(do_mod(del->br_startblock, mp->m_sb.sb_rextsize) == 0);
4951 bno = del->br_startblock;
4952 len = del->br_blockcount;
4953 do_div(bno, mp->m_sb.sb_rextsize);
4954 do_div(len, mp->m_sb.sb_rextsize);
4955 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
4959 nblks = len * mp->m_sb.sb_rextsize;
4960 qfield = XFS_TRANS_DQ_RTBCOUNT;
4963 nblks = del->br_blockcount;
4964 qfield = XFS_TRANS_DQ_BCOUNT;
4967 del_endblock = del->br_startblock + del->br_blockcount;
4969 error = xfs_bmbt_lookup_eq(cur, &got, &i);
4972 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
4975 if (got.br_startoff == del->br_startoff)
4976 state |= BMAP_LEFT_FILLING;
4977 if (got_endoff == del_endoff)
4978 state |= BMAP_RIGHT_FILLING;
4980 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4981 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4983 * Matches the whole extent. Delete the entry.
4985 xfs_iext_remove(ip, icur, state);
4986 xfs_iext_prev(ifp, icur);
4987 XFS_IFORK_NEXT_SET(ip, whichfork,
4988 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
4989 flags |= XFS_ILOG_CORE;
4991 flags |= xfs_ilog_fext(whichfork);
4994 if ((error = xfs_btree_delete(cur, &i)))
4996 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
4998 case BMAP_LEFT_FILLING:
5000 * Deleting the first part of the extent.
5002 got.br_startoff = del_endoff;
5003 got.br_startblock = del_endblock;
5004 got.br_blockcount -= del->br_blockcount;
5005 xfs_iext_update_extent(ip, state, icur, &got);
5007 flags |= xfs_ilog_fext(whichfork);
5010 error = xfs_bmbt_update(cur, &got);
5014 case BMAP_RIGHT_FILLING:
5016 * Deleting the last part of the extent.
5018 got.br_blockcount -= del->br_blockcount;
5019 xfs_iext_update_extent(ip, state, icur, &got);
5021 flags |= xfs_ilog_fext(whichfork);
5024 error = xfs_bmbt_update(cur, &got);
5030 * Deleting the middle of the extent.
5034 got.br_blockcount = del->br_startoff - got.br_startoff;
5035 xfs_iext_update_extent(ip, state, icur, &got);
5037 new.br_startoff = del_endoff;
5038 new.br_blockcount = got_endoff - del_endoff;
5039 new.br_state = got.br_state;
5040 new.br_startblock = del_endblock;
5042 flags |= XFS_ILOG_CORE;
5044 error = xfs_bmbt_update(cur, &got);
5047 error = xfs_btree_increment(cur, 0, &i);
5050 cur->bc_rec.b = new;
5051 error = xfs_btree_insert(cur, &i);
5052 if (error && error != -ENOSPC)
5055 * If get no-space back from btree insert, it tried a
5056 * split, and we have a zero block reservation. Fix up
5057 * our state and return the error.
5059 if (error == -ENOSPC) {
5061 * Reset the cursor, don't trust it after any
5064 error = xfs_bmbt_lookup_eq(cur, &got, &i);
5067 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5069 * Update the btree record back
5070 * to the original value.
5072 error = xfs_bmbt_update(cur, &old);
5076 * Reset the extent record back
5077 * to the original value.
5079 xfs_iext_update_extent(ip, state, icur, &old);
5084 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5086 flags |= xfs_ilog_fext(whichfork);
5087 XFS_IFORK_NEXT_SET(ip, whichfork,
5088 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
5089 xfs_iext_next(ifp, icur);
5090 xfs_iext_insert(ip, icur, &new, state);
5094 /* remove reverse mapping */
5095 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, del);
5100 * If we need to, add to list of extents to delete.
5102 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) {
5103 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
5104 error = xfs_refcount_decrease_extent(mp, dfops, del);
5108 xfs_bmap_add_free(mp, dfops, del->br_startblock,
5109 del->br_blockcount, NULL);
5113 * Adjust inode # blocks in the file.
5116 ip->i_d.di_nblocks -= nblks;
5118 * Adjust quota data.
5120 if (qfield && !(bflags & XFS_BMAPI_REMAP))
5121 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5129 * Unmap (remove) blocks from a file.
5130 * If nexts is nonzero then the number of extents to remove is limited to
5131 * that value. If not all extents in the block range can be removed then
5136 xfs_trans_t *tp, /* transaction pointer */
5137 struct xfs_inode *ip, /* incore inode */
5138 xfs_fileoff_t start, /* first file offset deleted */
5139 xfs_filblks_t *rlen, /* i/o: amount remaining */
5140 int flags, /* misc flags */
5141 xfs_extnum_t nexts, /* number of extents max */
5142 xfs_fsblock_t *firstblock, /* first allocated block
5143 controls a.g. for allocs */
5144 struct xfs_defer_ops *dfops) /* i/o: deferred updates */
5146 xfs_btree_cur_t *cur; /* bmap btree cursor */
5147 xfs_bmbt_irec_t del; /* extent being deleted */
5148 int error; /* error return value */
5149 xfs_extnum_t extno; /* extent number in list */
5150 xfs_bmbt_irec_t got; /* current extent record */
5151 xfs_ifork_t *ifp; /* inode fork pointer */
5152 int isrt; /* freeing in rt area */
5153 int logflags; /* transaction logging flags */
5154 xfs_extlen_t mod; /* rt extent offset */
5155 xfs_mount_t *mp; /* mount structure */
5156 int tmp_logflags; /* partial logging flags */
5157 int wasdel; /* was a delayed alloc extent */
5158 int whichfork; /* data or attribute fork */
5160 xfs_filblks_t len = *rlen; /* length to unmap in file */
5161 xfs_fileoff_t max_len;
5162 xfs_agnumber_t prev_agno = NULLAGNUMBER, agno;
5164 struct xfs_iext_cursor icur;
5167 trace_xfs_bunmap(ip, start, len, flags, _RET_IP_);
5169 whichfork = xfs_bmapi_whichfork(flags);
5170 ASSERT(whichfork != XFS_COW_FORK);
5171 ifp = XFS_IFORK_PTR(ip, whichfork);
5173 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5174 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
5175 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
5177 return -EFSCORRUPTED;
5180 if (XFS_FORCED_SHUTDOWN(mp))
5183 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5188 * Guesstimate how many blocks we can unmap without running the risk of
5189 * blowing out the transaction with a mix of EFIs and reflink
5192 if (tp && xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK)
5193 max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res));
5197 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5198 (error = xfs_iread_extents(tp, ip, whichfork)))
5200 if (xfs_iext_count(ifp) == 0) {
5204 XFS_STATS_INC(mp, xs_blk_unmap);
5205 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5208 if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) {
5215 if (ifp->if_flags & XFS_IFBROOT) {
5216 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
5217 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5218 cur->bc_private.b.firstblock = *firstblock;
5219 cur->bc_private.b.dfops = dfops;
5220 cur->bc_private.b.flags = 0;
5226 * Synchronize by locking the bitmap inode.
5228 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
5229 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
5230 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
5231 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
5235 while (end != (xfs_fileoff_t)-1 && end >= start &&
5236 (nexts == 0 || extno < nexts) && max_len > 0) {
5238 * Is the found extent after a hole in which end lives?
5239 * Just back up to the previous extent, if so.
5241 if (got.br_startoff > end &&
5242 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5247 * Is the last block of this extent before the range
5248 * we're supposed to delete? If so, we're done.
5250 end = XFS_FILEOFF_MIN(end,
5251 got.br_startoff + got.br_blockcount - 1);
5255 * Then deal with the (possibly delayed) allocated space
5259 wasdel = isnullstartblock(del.br_startblock);
5262 * Make sure we don't touch multiple AGF headers out of order
5263 * in a single transaction, as that could cause AB-BA deadlocks.
5266 agno = XFS_FSB_TO_AGNO(mp, del.br_startblock);
5267 if (prev_agno != NULLAGNUMBER && prev_agno > agno)
5271 if (got.br_startoff < start) {
5272 del.br_startoff = start;
5273 del.br_blockcount -= start - got.br_startoff;
5275 del.br_startblock += start - got.br_startoff;
5277 if (del.br_startoff + del.br_blockcount > end + 1)
5278 del.br_blockcount = end + 1 - del.br_startoff;
5280 /* How much can we safely unmap? */
5281 if (max_len < del.br_blockcount) {
5282 del.br_startoff += del.br_blockcount - max_len;
5284 del.br_startblock += del.br_blockcount - max_len;
5285 del.br_blockcount = max_len;
5288 sum = del.br_startblock + del.br_blockcount;
5290 (mod = do_mod(sum, mp->m_sb.sb_rextsize))) {
5292 * Realtime extent not lined up at the end.
5293 * The extent could have been split into written
5294 * and unwritten pieces, or we could just be
5295 * unmapping part of it. But we can't really
5296 * get rid of part of a realtime extent.
5298 if (del.br_state == XFS_EXT_UNWRITTEN ||
5299 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5301 * This piece is unwritten, or we're not
5302 * using unwritten extents. Skip over it.
5305 end -= mod > del.br_blockcount ?
5306 del.br_blockcount : mod;
5307 if (end < got.br_startoff &&
5308 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5315 * It's written, turn it unwritten.
5316 * This is better than zeroing it.
5318 ASSERT(del.br_state == XFS_EXT_NORM);
5319 ASSERT(tp->t_blk_res > 0);
5321 * If this spans a realtime extent boundary,
5322 * chop it back to the start of the one we end at.
5324 if (del.br_blockcount > mod) {
5325 del.br_startoff += del.br_blockcount - mod;
5326 del.br_startblock += del.br_blockcount - mod;
5327 del.br_blockcount = mod;
5329 del.br_state = XFS_EXT_UNWRITTEN;
5330 error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5331 whichfork, &icur, &cur, &del,
5332 firstblock, dfops, &logflags);
5337 if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) {
5339 * Realtime extent is lined up at the end but not
5340 * at the front. We'll get rid of full extents if
5343 mod = mp->m_sb.sb_rextsize - mod;
5344 if (del.br_blockcount > mod) {
5345 del.br_blockcount -= mod;
5346 del.br_startoff += mod;
5347 del.br_startblock += mod;
5348 } else if ((del.br_startoff == start &&
5349 (del.br_state == XFS_EXT_UNWRITTEN ||
5350 tp->t_blk_res == 0)) ||
5351 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5353 * Can't make it unwritten. There isn't
5354 * a full extent here so just skip it.
5356 ASSERT(end >= del.br_blockcount);
5357 end -= del.br_blockcount;
5358 if (got.br_startoff > end &&
5359 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5364 } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5365 struct xfs_bmbt_irec prev;
5368 * This one is already unwritten.
5369 * It must have a written left neighbor.
5370 * Unwrite the killed part of that one and
5373 if (!xfs_iext_prev_extent(ifp, &icur, &prev))
5375 ASSERT(prev.br_state == XFS_EXT_NORM);
5376 ASSERT(!isnullstartblock(prev.br_startblock));
5377 ASSERT(del.br_startblock ==
5378 prev.br_startblock + prev.br_blockcount);
5379 if (prev.br_startoff < start) {
5380 mod = start - prev.br_startoff;
5381 prev.br_blockcount -= mod;
5382 prev.br_startblock += mod;
5383 prev.br_startoff = start;
5385 prev.br_state = XFS_EXT_UNWRITTEN;
5386 error = xfs_bmap_add_extent_unwritten_real(tp,
5387 ip, whichfork, &icur, &cur,
5388 &prev, firstblock, dfops,
5394 ASSERT(del.br_state == XFS_EXT_NORM);
5395 del.br_state = XFS_EXT_UNWRITTEN;
5396 error = xfs_bmap_add_extent_unwritten_real(tp,
5397 ip, whichfork, &icur, &cur,
5398 &del, firstblock, dfops,
5407 error = xfs_bmap_del_extent_delay(ip, whichfork, &icur,
5410 error = xfs_bmap_del_extent_real(ip, tp, &icur, dfops,
5411 cur, &del, &tmp_logflags, whichfork,
5413 logflags |= tmp_logflags;
5419 max_len -= del.br_blockcount;
5420 end = del.br_startoff - 1;
5423 * If not done go on to the next (previous) record.
5425 if (end != (xfs_fileoff_t)-1 && end >= start) {
5426 if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5427 (got.br_startoff > end &&
5428 !xfs_iext_prev_extent(ifp, &icur, &got))) {
5435 if (done || end == (xfs_fileoff_t)-1 || end < start)
5438 *rlen = end - start + 1;
5441 * Convert to a btree if necessary.
5443 if (xfs_bmap_needs_btree(ip, whichfork)) {
5444 ASSERT(cur == NULL);
5445 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops,
5446 &cur, 0, &tmp_logflags, whichfork);
5447 logflags |= tmp_logflags;
5452 * transform from btree to extents, give it cur
5454 else if (xfs_bmap_wants_extents(ip, whichfork)) {
5455 ASSERT(cur != NULL);
5456 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
5458 logflags |= tmp_logflags;
5463 * transform from extents to local?
5468 * Log everything. Do this after conversion, there's no point in
5469 * logging the extent records if we've converted to btree format.
5471 if ((logflags & xfs_ilog_fext(whichfork)) &&
5472 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5473 logflags &= ~xfs_ilog_fext(whichfork);
5474 else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5475 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5476 logflags &= ~xfs_ilog_fbroot(whichfork);
5478 * Log inode even in the error case, if the transaction
5479 * is dirty we'll need to shut down the filesystem.
5482 xfs_trans_log_inode(tp, ip, logflags);
5485 *firstblock = cur->bc_private.b.firstblock;
5486 cur->bc_private.b.allocated = 0;
5488 xfs_btree_del_cursor(cur,
5489 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5494 /* Unmap a range of a file. */
5498 struct xfs_inode *ip,
5503 xfs_fsblock_t *firstblock,
5504 struct xfs_defer_ops *dfops,
5509 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts, firstblock,
5516 * Determine whether an extent shift can be accomplished by a merge with the
5517 * extent that precedes the target hole of the shift.
5521 struct xfs_bmbt_irec *left, /* preceding extent */
5522 struct xfs_bmbt_irec *got, /* current extent to shift */
5523 xfs_fileoff_t shift) /* shift fsb */
5525 xfs_fileoff_t startoff;
5527 startoff = got->br_startoff - shift;
5530 * The extent, once shifted, must be adjacent in-file and on-disk with
5531 * the preceding extent.
5533 if ((left->br_startoff + left->br_blockcount != startoff) ||
5534 (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5535 (left->br_state != got->br_state) ||
5536 (left->br_blockcount + got->br_blockcount > MAXEXTLEN))
5543 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5544 * hole in the file. If an extent shift would result in the extent being fully
5545 * adjacent to the extent that currently precedes the hole, we can merge with
5546 * the preceding extent rather than do the shift.
5548 * This function assumes the caller has verified a shift-by-merge is possible
5549 * with the provided extents via xfs_bmse_can_merge().
5553 struct xfs_inode *ip,
5555 xfs_fileoff_t shift, /* shift fsb */
5556 struct xfs_iext_cursor *icur,
5557 struct xfs_bmbt_irec *got, /* extent to shift */
5558 struct xfs_bmbt_irec *left, /* preceding extent */
5559 struct xfs_btree_cur *cur,
5560 int *logflags, /* output */
5561 struct xfs_defer_ops *dfops)
5563 struct xfs_bmbt_irec new;
5564 xfs_filblks_t blockcount;
5566 struct xfs_mount *mp = ip->i_mount;
5568 blockcount = left->br_blockcount + got->br_blockcount;
5570 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5571 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5572 ASSERT(xfs_bmse_can_merge(left, got, shift));
5575 new.br_blockcount = blockcount;
5578 * Update the on-disk extent count, the btree if necessary and log the
5581 XFS_IFORK_NEXT_SET(ip, whichfork,
5582 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
5583 *logflags |= XFS_ILOG_CORE;
5585 *logflags |= XFS_ILOG_DEXT;
5589 /* lookup and remove the extent to merge */
5590 error = xfs_bmbt_lookup_eq(cur, got, &i);
5593 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5595 error = xfs_btree_delete(cur, &i);
5598 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5600 /* lookup and update size of the previous extent */
5601 error = xfs_bmbt_lookup_eq(cur, left, &i);
5604 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5606 error = xfs_bmbt_update(cur, &new);
5611 xfs_iext_remove(ip, icur, 0);
5612 xfs_iext_prev(XFS_IFORK_PTR(ip, whichfork), icur);
5613 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5616 /* update reverse mapping. rmap functions merge the rmaps for us */
5617 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, got);
5620 memcpy(&new, got, sizeof(new));
5621 new.br_startoff = left->br_startoff + left->br_blockcount;
5622 return xfs_rmap_map_extent(mp, dfops, ip, whichfork, &new);
5626 xfs_bmap_shift_update_extent(
5627 struct xfs_inode *ip,
5629 struct xfs_iext_cursor *icur,
5630 struct xfs_bmbt_irec *got,
5631 struct xfs_btree_cur *cur,
5633 struct xfs_defer_ops *dfops,
5634 xfs_fileoff_t startoff)
5636 struct xfs_mount *mp = ip->i_mount;
5637 struct xfs_bmbt_irec prev = *got;
5640 *logflags |= XFS_ILOG_CORE;
5642 got->br_startoff = startoff;
5645 error = xfs_bmbt_lookup_eq(cur, &prev, &i);
5648 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5650 error = xfs_bmbt_update(cur, got);
5654 *logflags |= XFS_ILOG_DEXT;
5657 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5660 /* update reverse mapping */
5661 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, &prev);
5664 return xfs_rmap_map_extent(mp, dfops, ip, whichfork, got);
5668 xfs_bmap_collapse_extents(
5669 struct xfs_trans *tp,
5670 struct xfs_inode *ip,
5671 xfs_fileoff_t *next_fsb,
5672 xfs_fileoff_t offset_shift_fsb,
5674 xfs_fsblock_t *firstblock,
5675 struct xfs_defer_ops *dfops)
5677 int whichfork = XFS_DATA_FORK;
5678 struct xfs_mount *mp = ip->i_mount;
5679 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
5680 struct xfs_btree_cur *cur = NULL;
5681 struct xfs_bmbt_irec got, prev;
5682 struct xfs_iext_cursor icur;
5683 xfs_fileoff_t new_startoff;
5687 if (unlikely(XFS_TEST_ERROR(
5688 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5689 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
5690 mp, XFS_ERRTAG_BMAPIFORMAT))) {
5691 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
5692 return -EFSCORRUPTED;
5695 if (XFS_FORCED_SHUTDOWN(mp))
5698 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
5700 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5701 error = xfs_iread_extents(tp, ip, whichfork);
5706 if (ifp->if_flags & XFS_IFBROOT) {
5707 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5708 cur->bc_private.b.firstblock = *firstblock;
5709 cur->bc_private.b.dfops = dfops;
5710 cur->bc_private.b.flags = 0;
5713 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5717 XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock),
5720 new_startoff = got.br_startoff - offset_shift_fsb;
5721 if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
5722 if (new_startoff < prev.br_startoff + prev.br_blockcount) {
5727 if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) {
5728 error = xfs_bmse_merge(ip, whichfork, offset_shift_fsb,
5729 &icur, &got, &prev, cur, &logflags,
5736 if (got.br_startoff < offset_shift_fsb) {
5742 error = xfs_bmap_shift_update_extent(ip, whichfork, &icur, &got, cur,
5743 &logflags, dfops, new_startoff);
5748 if (!xfs_iext_next_extent(ifp, &icur, &got)) {
5753 *next_fsb = got.br_startoff;
5756 xfs_btree_del_cursor(cur,
5757 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5759 xfs_trans_log_inode(tp, ip, logflags);
5764 xfs_bmap_insert_extents(
5765 struct xfs_trans *tp,
5766 struct xfs_inode *ip,
5767 xfs_fileoff_t *next_fsb,
5768 xfs_fileoff_t offset_shift_fsb,
5770 xfs_fileoff_t stop_fsb,
5771 xfs_fsblock_t *firstblock,
5772 struct xfs_defer_ops *dfops)
5774 int whichfork = XFS_DATA_FORK;
5775 struct xfs_mount *mp = ip->i_mount;
5776 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
5777 struct xfs_btree_cur *cur = NULL;
5778 struct xfs_bmbt_irec got, next;
5779 struct xfs_iext_cursor icur;
5780 xfs_fileoff_t new_startoff;
5784 if (unlikely(XFS_TEST_ERROR(
5785 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5786 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
5787 mp, XFS_ERRTAG_BMAPIFORMAT))) {
5788 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
5789 return -EFSCORRUPTED;
5792 if (XFS_FORCED_SHUTDOWN(mp))
5795 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
5797 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5798 error = xfs_iread_extents(tp, ip, whichfork);
5803 if (ifp->if_flags & XFS_IFBROOT) {
5804 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5805 cur->bc_private.b.firstblock = *firstblock;
5806 cur->bc_private.b.dfops = dfops;
5807 cur->bc_private.b.flags = 0;
5810 if (*next_fsb == NULLFSBLOCK) {
5811 xfs_iext_last(ifp, &icur);
5812 if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5813 stop_fsb > got.br_startoff) {
5818 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5823 XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock),
5826 if (stop_fsb >= got.br_startoff + got.br_blockcount) {
5831 new_startoff = got.br_startoff + offset_shift_fsb;
5832 if (xfs_iext_peek_next_extent(ifp, &icur, &next)) {
5833 if (new_startoff + got.br_blockcount > next.br_startoff) {
5839 * Unlike a left shift (which involves a hole punch), a right
5840 * shift does not modify extent neighbors in any way. We should
5841 * never find mergeable extents in this scenario. Check anyways
5842 * and warn if we encounter two extents that could be one.
5844 if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb))
5848 error = xfs_bmap_shift_update_extent(ip, whichfork, &icur, &got, cur,
5849 &logflags, dfops, new_startoff);
5853 if (!xfs_iext_prev_extent(ifp, &icur, &got) ||
5854 stop_fsb >= got.br_startoff + got.br_blockcount) {
5859 *next_fsb = got.br_startoff;
5862 xfs_btree_del_cursor(cur,
5863 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5865 xfs_trans_log_inode(tp, ip, logflags);
5870 * Splits an extent into two extents at split_fsb block such that it is the
5871 * first block of the current_ext. @ext is a target extent to be split.
5872 * @split_fsb is a block where the extents is split. If split_fsb lies in a
5873 * hole or the first block of extents, just return 0.
5876 xfs_bmap_split_extent_at(
5877 struct xfs_trans *tp,
5878 struct xfs_inode *ip,
5879 xfs_fileoff_t split_fsb,
5880 xfs_fsblock_t *firstfsb,
5881 struct xfs_defer_ops *dfops)
5883 int whichfork = XFS_DATA_FORK;
5884 struct xfs_btree_cur *cur = NULL;
5885 struct xfs_bmbt_irec got;
5886 struct xfs_bmbt_irec new; /* split extent */
5887 struct xfs_mount *mp = ip->i_mount;
5888 struct xfs_ifork *ifp;
5889 xfs_fsblock_t gotblkcnt; /* new block count for got */
5890 struct xfs_iext_cursor icur;
5895 if (unlikely(XFS_TEST_ERROR(
5896 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5897 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
5898 mp, XFS_ERRTAG_BMAPIFORMAT))) {
5899 XFS_ERROR_REPORT("xfs_bmap_split_extent_at",
5900 XFS_ERRLEVEL_LOW, mp);
5901 return -EFSCORRUPTED;
5904 if (XFS_FORCED_SHUTDOWN(mp))
5907 ifp = XFS_IFORK_PTR(ip, whichfork);
5908 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5909 /* Read in all the extents */
5910 error = xfs_iread_extents(tp, ip, whichfork);
5916 * If there are not extents, or split_fsb lies in a hole we are done.
5918 if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) ||
5919 got.br_startoff >= split_fsb)
5922 gotblkcnt = split_fsb - got.br_startoff;
5923 new.br_startoff = split_fsb;
5924 new.br_startblock = got.br_startblock + gotblkcnt;
5925 new.br_blockcount = got.br_blockcount - gotblkcnt;
5926 new.br_state = got.br_state;
5928 if (ifp->if_flags & XFS_IFBROOT) {
5929 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5930 cur->bc_private.b.firstblock = *firstfsb;
5931 cur->bc_private.b.dfops = dfops;
5932 cur->bc_private.b.flags = 0;
5933 error = xfs_bmbt_lookup_eq(cur, &got, &i);
5936 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
5939 got.br_blockcount = gotblkcnt;
5940 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur,
5943 logflags = XFS_ILOG_CORE;
5945 error = xfs_bmbt_update(cur, &got);
5949 logflags |= XFS_ILOG_DEXT;
5951 /* Add new extent */
5952 xfs_iext_next(ifp, &icur);
5953 xfs_iext_insert(ip, &icur, &new, 0);
5954 XFS_IFORK_NEXT_SET(ip, whichfork,
5955 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
5958 error = xfs_bmbt_lookup_eq(cur, &new, &i);
5961 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor);
5962 error = xfs_btree_insert(cur, &i);
5965 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
5969 * Convert to a btree if necessary.
5971 if (xfs_bmap_needs_btree(ip, whichfork)) {
5972 int tmp_logflags; /* partial log flag return val */
5974 ASSERT(cur == NULL);
5975 error = xfs_bmap_extents_to_btree(tp, ip, firstfsb, dfops,
5976 &cur, 0, &tmp_logflags, whichfork);
5977 logflags |= tmp_logflags;
5982 cur->bc_private.b.allocated = 0;
5983 xfs_btree_del_cursor(cur,
5984 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5988 xfs_trans_log_inode(tp, ip, logflags);
5993 xfs_bmap_split_extent(
5994 struct xfs_inode *ip,
5995 xfs_fileoff_t split_fsb)
5997 struct xfs_mount *mp = ip->i_mount;
5998 struct xfs_trans *tp;
5999 struct xfs_defer_ops dfops;
6000 xfs_fsblock_t firstfsb;
6003 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
6004 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
6008 xfs_ilock(ip, XFS_ILOCK_EXCL);
6009 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
6011 xfs_defer_init(&dfops, &firstfsb);
6013 error = xfs_bmap_split_extent_at(tp, ip, split_fsb,
6018 error = xfs_defer_finish(&tp, &dfops);
6022 return xfs_trans_commit(tp);
6025 xfs_defer_cancel(&dfops);
6026 xfs_trans_cancel(tp);
6030 /* Deferred mapping is only for real extents in the data fork. */
6032 xfs_bmap_is_update_needed(
6033 struct xfs_bmbt_irec *bmap)
6035 return bmap->br_startblock != HOLESTARTBLOCK &&
6036 bmap->br_startblock != DELAYSTARTBLOCK;
6039 /* Record a bmap intent. */
6042 struct xfs_mount *mp,
6043 struct xfs_defer_ops *dfops,
6044 enum xfs_bmap_intent_type type,
6045 struct xfs_inode *ip,
6047 struct xfs_bmbt_irec *bmap)
6050 struct xfs_bmap_intent *bi;
6052 trace_xfs_bmap_defer(mp,
6053 XFS_FSB_TO_AGNO(mp, bmap->br_startblock),
6055 XFS_FSB_TO_AGBNO(mp, bmap->br_startblock),
6056 ip->i_ino, whichfork,
6058 bmap->br_blockcount,
6061 bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_SLEEP | KM_NOFS);
6062 INIT_LIST_HEAD(&bi->bi_list);
6065 bi->bi_whichfork = whichfork;
6066 bi->bi_bmap = *bmap;
6068 error = xfs_defer_ijoin(dfops, bi->bi_owner);
6074 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list);
6078 /* Map an extent into a file. */
6080 xfs_bmap_map_extent(
6081 struct xfs_mount *mp,
6082 struct xfs_defer_ops *dfops,
6083 struct xfs_inode *ip,
6084 struct xfs_bmbt_irec *PREV)
6086 if (!xfs_bmap_is_update_needed(PREV))
6089 return __xfs_bmap_add(mp, dfops, XFS_BMAP_MAP, ip,
6090 XFS_DATA_FORK, PREV);
6093 /* Unmap an extent out of a file. */
6095 xfs_bmap_unmap_extent(
6096 struct xfs_mount *mp,
6097 struct xfs_defer_ops *dfops,
6098 struct xfs_inode *ip,
6099 struct xfs_bmbt_irec *PREV)
6101 if (!xfs_bmap_is_update_needed(PREV))
6104 return __xfs_bmap_add(mp, dfops, XFS_BMAP_UNMAP, ip,
6105 XFS_DATA_FORK, PREV);
6109 * Process one of the deferred bmap operations. We pass back the
6110 * btree cursor to maintain our lock on the bmapbt between calls.
6113 xfs_bmap_finish_one(
6114 struct xfs_trans *tp,
6115 struct xfs_defer_ops *dfops,
6116 struct xfs_inode *ip,
6117 enum xfs_bmap_intent_type type,
6119 xfs_fileoff_t startoff,
6120 xfs_fsblock_t startblock,
6121 xfs_filblks_t *blockcount,
6124 xfs_fsblock_t firstfsb;
6128 * firstfsb is tied to the transaction lifetime and is used to
6129 * ensure correct AG locking order and schedule work item
6130 * continuations. XFS_BUI_MAX_FAST_EXTENTS (== 1) restricts us
6131 * to only making one bmap call per transaction, so it should
6132 * be safe to have it as a local variable here.
6134 firstfsb = NULLFSBLOCK;
6136 trace_xfs_bmap_deferred(tp->t_mountp,
6137 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type,
6138 XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
6139 ip->i_ino, whichfork, startoff, *blockcount, state);
6141 if (WARN_ON_ONCE(whichfork != XFS_DATA_FORK))
6142 return -EFSCORRUPTED;
6144 if (XFS_TEST_ERROR(false, tp->t_mountp,
6145 XFS_ERRTAG_BMAP_FINISH_ONE))
6150 error = xfs_bmapi_remap(tp, ip, startoff, *blockcount,
6154 case XFS_BMAP_UNMAP:
6155 error = __xfs_bunmapi(tp, ip, startoff, blockcount,
6156 XFS_BMAPI_REMAP, 1, &firstfsb, dfops);
6160 error = -EFSCORRUPTED;
6166 /* Check that an inode's extent does not have invalid flags or bad ranges. */
6168 xfs_bmap_validate_extent(
6169 struct xfs_inode *ip,
6171 struct xfs_bmbt_irec *irec)
6173 struct xfs_mount *mp = ip->i_mount;
6174 xfs_fsblock_t endfsb;
6177 isrt = XFS_IS_REALTIME_INODE(ip);
6178 endfsb = irec->br_startblock + irec->br_blockcount - 1;
6180 if (!xfs_verify_rtbno(mp, irec->br_startblock))
6181 return __this_address;
6182 if (!xfs_verify_rtbno(mp, endfsb))
6183 return __this_address;
6185 if (!xfs_verify_fsbno(mp, irec->br_startblock))
6186 return __this_address;
6187 if (!xfs_verify_fsbno(mp, endfsb))
6188 return __this_address;
6189 if (XFS_FSB_TO_AGNO(mp, irec->br_startblock) !=
6190 XFS_FSB_TO_AGNO(mp, endfsb))
6191 return __this_address;
6193 if (irec->br_state != XFS_EXT_NORM) {
6194 if (whichfork != XFS_DATA_FORK)
6195 return __this_address;
6196 if (!xfs_sb_version_hasextflgbit(&mp->m_sb))
6197 return __this_address;