1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
17 #include "xfs_inode.h"
18 #include "xfs_btree.h"
19 #include "xfs_trans.h"
20 #include "xfs_alloc.h"
22 #include "xfs_bmap_util.h"
23 #include "xfs_bmap_btree.h"
24 #include "xfs_rtalloc.h"
25 #include "xfs_errortag.h"
26 #include "xfs_error.h"
27 #include "xfs_quota.h"
28 #include "xfs_trans_space.h"
29 #include "xfs_buf_item.h"
30 #include "xfs_trace.h"
31 #include "xfs_attr_leaf.h"
32 #include "xfs_filestream.h"
34 #include "xfs_ag_resv.h"
35 #include "xfs_refcount.h"
36 #include "xfs_icache.h"
39 kmem_zone_t *xfs_bmap_free_item_zone;
42 * Miscellaneous helper functions
46 * Compute and fill in the value of the maximum depth of a bmap btree
47 * in this filesystem. Done once, during mount.
50 xfs_bmap_compute_maxlevels(
51 xfs_mount_t *mp, /* file system mount structure */
52 int whichfork) /* data or attr fork */
54 int level; /* btree level */
55 uint maxblocks; /* max blocks at this level */
56 uint maxleafents; /* max leaf entries possible */
57 int maxrootrecs; /* max records in root block */
58 int minleafrecs; /* min records in leaf block */
59 int minnoderecs; /* min records in node block */
60 int sz; /* root block size */
63 * The maximum number of extents in a file, hence the maximum
64 * number of leaf entries, is controlled by the type of di_nextents
65 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
66 * (a signed 16-bit number, xfs_aextnum_t).
68 * Note that we can no longer assume that if we are in ATTR1 that
69 * the fork offset of all the inodes will be
70 * (xfs_default_attroffset(ip) >> 3) because we could have mounted
71 * with ATTR2 and then mounted back with ATTR1, keeping the
72 * di_forkoff's fixed but probably at various positions. Therefore,
73 * for both ATTR1 and ATTR2 we have to assume the worst case scenario
74 * of a minimum size available.
76 if (whichfork == XFS_DATA_FORK) {
77 maxleafents = MAXEXTNUM;
78 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
80 maxleafents = MAXAEXTNUM;
81 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
83 maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
84 minleafrecs = mp->m_bmap_dmnr[0];
85 minnoderecs = mp->m_bmap_dmnr[1];
86 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
87 for (level = 1; maxblocks > 1; level++) {
88 if (maxblocks <= maxrootrecs)
91 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
93 mp->m_bm_maxlevels[whichfork] = level;
96 STATIC int /* error */
98 struct xfs_btree_cur *cur,
99 struct xfs_bmbt_irec *irec,
100 int *stat) /* success/failure */
102 cur->bc_rec.b = *irec;
103 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
106 STATIC int /* error */
107 xfs_bmbt_lookup_first(
108 struct xfs_btree_cur *cur,
109 int *stat) /* success/failure */
111 cur->bc_rec.b.br_startoff = 0;
112 cur->bc_rec.b.br_startblock = 0;
113 cur->bc_rec.b.br_blockcount = 0;
114 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
118 * Check if the inode needs to be converted to btree format.
120 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
122 return whichfork != XFS_COW_FORK &&
123 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
124 XFS_IFORK_NEXTENTS(ip, whichfork) >
125 XFS_IFORK_MAXEXT(ip, whichfork);
129 * Check if the inode should be converted to extent format.
131 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
133 return whichfork != XFS_COW_FORK &&
134 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
135 XFS_IFORK_NEXTENTS(ip, whichfork) <=
136 XFS_IFORK_MAXEXT(ip, whichfork);
140 * Update the record referred to by cur to the value given by irec
141 * This either works (return 0) or gets an EFSCORRUPTED error.
145 struct xfs_btree_cur *cur,
146 struct xfs_bmbt_irec *irec)
148 union xfs_btree_rec rec;
150 xfs_bmbt_disk_set_all(&rec.bmbt, irec);
151 return xfs_btree_update(cur, &rec);
155 * Compute the worst-case number of indirect blocks that will be used
156 * for ip's delayed extent of length "len".
159 xfs_bmap_worst_indlen(
160 xfs_inode_t *ip, /* incore inode pointer */
161 xfs_filblks_t len) /* delayed extent length */
163 int level; /* btree level number */
164 int maxrecs; /* maximum record count at this level */
165 xfs_mount_t *mp; /* mount structure */
166 xfs_filblks_t rval; /* return value */
169 maxrecs = mp->m_bmap_dmxr[0];
170 for (level = 0, rval = 0;
171 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
174 do_div(len, maxrecs);
177 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
180 maxrecs = mp->m_bmap_dmxr[1];
186 * Calculate the default attribute fork offset for newly created inodes.
189 xfs_default_attroffset(
190 struct xfs_inode *ip)
192 struct xfs_mount *mp = ip->i_mount;
195 if (mp->m_sb.sb_inodesize == 256) {
196 offset = XFS_LITINO(mp, ip->i_d.di_version) -
197 XFS_BMDR_SPACE_CALC(MINABTPTRS);
199 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
202 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version));
207 * Helper routine to reset inode di_forkoff field when switching
208 * attribute fork from local to extent format - we reset it where
209 * possible to make space available for inline data fork extents.
212 xfs_bmap_forkoff_reset(
216 if (whichfork == XFS_ATTR_FORK &&
217 ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
218 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
219 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
221 if (dfl_forkoff > ip->i_d.di_forkoff)
222 ip->i_d.di_forkoff = dfl_forkoff;
227 STATIC struct xfs_buf *
229 struct xfs_btree_cur *cur,
232 struct xfs_log_item *lip;
238 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
239 if (!cur->bc_bufs[i])
241 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
242 return cur->bc_bufs[i];
245 /* Chase down all the log items to see if the bp is there */
246 list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) {
247 struct xfs_buf_log_item *bip = (struct xfs_buf_log_item *)lip;
249 if (bip->bli_item.li_type == XFS_LI_BUF &&
250 XFS_BUF_ADDR(bip->bli_buf) == bno)
259 struct xfs_btree_block *block,
265 __be64 *pp, *thispa; /* pointer to block address */
266 xfs_bmbt_key_t *prevp, *keyp;
268 ASSERT(be16_to_cpu(block->bb_level) > 0);
271 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
272 dmxr = mp->m_bmap_dmxr[0];
273 keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
276 ASSERT(be64_to_cpu(prevp->br_startoff) <
277 be64_to_cpu(keyp->br_startoff));
282 * Compare the block numbers to see if there are dups.
285 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
287 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
289 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
291 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
293 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
294 if (*thispa == *pp) {
295 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld",
297 (unsigned long long)be64_to_cpu(*thispa));
298 xfs_err(mp, "%s: ptrs are equal in node\n",
300 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
307 * Check that the extents for the inode ip are in the right order in all
308 * btree leaves. THis becomes prohibitively expensive for large extent count
309 * files, so don't bother with inodes that have more than 10,000 extents in
310 * them. The btree record ordering checks will still be done, so for such large
311 * bmapbt constructs that is going to catch most corruptions.
314 xfs_bmap_check_leaf_extents(
315 xfs_btree_cur_t *cur, /* btree cursor or null */
316 xfs_inode_t *ip, /* incore inode pointer */
317 int whichfork) /* data or attr fork */
319 struct xfs_btree_block *block; /* current btree block */
320 xfs_fsblock_t bno; /* block # of "block" */
321 xfs_buf_t *bp; /* buffer for "block" */
322 int error; /* error return value */
323 xfs_extnum_t i=0, j; /* index into the extents list */
324 struct xfs_ifork *ifp; /* fork structure */
325 int level; /* btree level, for checking */
326 xfs_mount_t *mp; /* file system mount structure */
327 __be64 *pp; /* pointer to block address */
328 xfs_bmbt_rec_t *ep; /* pointer to current extent */
329 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
330 xfs_bmbt_rec_t *nextp; /* pointer to next extent */
333 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
337 /* skip large extent count inodes */
338 if (ip->i_d.di_nextents > 10000)
343 ifp = XFS_IFORK_PTR(ip, whichfork);
344 block = ifp->if_broot;
346 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
348 level = be16_to_cpu(block->bb_level);
350 xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
351 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
352 bno = be64_to_cpu(*pp);
354 ASSERT(bno != NULLFSBLOCK);
355 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
356 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
359 * Go down the tree until leaf level is reached, following the first
360 * pointer (leftmost) at each level.
362 while (level-- > 0) {
363 /* See if buf is in cur first */
365 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
368 error = xfs_btree_read_bufl(mp, NULL, bno, &bp,
374 block = XFS_BUF_TO_BLOCK(bp);
379 * Check this block for basic sanity (increasing keys and
380 * no duplicate blocks).
383 xfs_check_block(block, mp, 0, 0);
384 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
385 bno = be64_to_cpu(*pp);
386 XFS_WANT_CORRUPTED_GOTO(mp,
387 xfs_verify_fsbno(mp, bno), error0);
390 xfs_trans_brelse(NULL, bp);
395 * Here with bp and block set to the leftmost leaf node in the tree.
400 * Loop over all leaf nodes checking that all extents are in the right order.
403 xfs_fsblock_t nextbno;
404 xfs_extnum_t num_recs;
407 num_recs = xfs_btree_get_numrecs(block);
410 * Read-ahead the next leaf block, if any.
413 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
416 * Check all the extents to make sure they are OK.
417 * If we had a previous block, the last entry should
418 * conform with the first entry in this one.
421 ep = XFS_BMBT_REC_ADDR(mp, block, 1);
423 ASSERT(xfs_bmbt_disk_get_startoff(&last) +
424 xfs_bmbt_disk_get_blockcount(&last) <=
425 xfs_bmbt_disk_get_startoff(ep));
427 for (j = 1; j < num_recs; j++) {
428 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
429 ASSERT(xfs_bmbt_disk_get_startoff(ep) +
430 xfs_bmbt_disk_get_blockcount(ep) <=
431 xfs_bmbt_disk_get_startoff(nextp));
439 xfs_trans_brelse(NULL, bp);
443 * If we've reached the end, stop.
445 if (bno == NULLFSBLOCK)
449 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
452 error = xfs_btree_read_bufl(mp, NULL, bno, &bp,
458 block = XFS_BUF_TO_BLOCK(bp);
464 xfs_warn(mp, "%s: at error0", __func__);
466 xfs_trans_brelse(NULL, bp);
468 xfs_warn(mp, "%s: BAD after btree leaves for %d extents",
470 xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__);
471 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
476 * Validate that the bmbt_irecs being returned from bmapi are valid
477 * given the caller's original parameters. Specifically check the
478 * ranges of the returned irecs to ensure that they only extend beyond
479 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
482 xfs_bmap_validate_ret(
486 xfs_bmbt_irec_t *mval,
490 int i; /* index to map values */
492 ASSERT(ret_nmap <= nmap);
494 for (i = 0; i < ret_nmap; i++) {
495 ASSERT(mval[i].br_blockcount > 0);
496 if (!(flags & XFS_BMAPI_ENTIRE)) {
497 ASSERT(mval[i].br_startoff >= bno);
498 ASSERT(mval[i].br_blockcount <= len);
499 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
502 ASSERT(mval[i].br_startoff < bno + len);
503 ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
507 mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
508 mval[i].br_startoff);
509 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
510 mval[i].br_startblock != HOLESTARTBLOCK);
511 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
512 mval[i].br_state == XFS_EXT_UNWRITTEN);
517 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
518 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
522 * bmap free list manipulation functions
526 * Add the extent to the list of extents to be free at transaction end.
527 * The list is maintained sorted (by block number).
531 struct xfs_trans *tp,
534 const struct xfs_owner_info *oinfo,
537 struct xfs_extent_free_item *new; /* new element */
539 struct xfs_mount *mp = tp->t_mountp;
543 ASSERT(bno != NULLFSBLOCK);
545 ASSERT(len <= MAXEXTLEN);
546 ASSERT(!isnullstartblock(bno));
547 agno = XFS_FSB_TO_AGNO(mp, bno);
548 agbno = XFS_FSB_TO_AGBNO(mp, bno);
549 ASSERT(agno < mp->m_sb.sb_agcount);
550 ASSERT(agbno < mp->m_sb.sb_agblocks);
551 ASSERT(len < mp->m_sb.sb_agblocks);
552 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
554 ASSERT(xfs_bmap_free_item_zone != NULL);
556 new = kmem_zone_alloc(xfs_bmap_free_item_zone, 0);
557 new->xefi_startblock = bno;
558 new->xefi_blockcount = (xfs_extlen_t)len;
560 new->xefi_oinfo = *oinfo;
562 new->xefi_oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
563 new->xefi_skip_discard = skip_discard;
564 trace_xfs_bmap_free_defer(tp->t_mountp,
565 XFS_FSB_TO_AGNO(tp->t_mountp, bno), 0,
566 XFS_FSB_TO_AGBNO(tp->t_mountp, bno), len);
567 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list);
571 * Inode fork format manipulation functions
575 * Convert the inode format to extent format if it currently is in btree format,
576 * but the extent list is small enough that it fits into the extent format.
578 * Since the extents are already in-core, all we have to do is give up the space
579 * for the btree root and pitch the leaf block.
581 STATIC int /* error */
582 xfs_bmap_btree_to_extents(
583 struct xfs_trans *tp, /* transaction pointer */
584 struct xfs_inode *ip, /* incore inode pointer */
585 struct xfs_btree_cur *cur, /* btree cursor */
586 int *logflagsp, /* inode logging flags */
587 int whichfork) /* data or attr fork */
589 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
590 struct xfs_mount *mp = ip->i_mount;
591 struct xfs_btree_block *rblock = ifp->if_broot;
592 struct xfs_btree_block *cblock;/* child btree block */
593 xfs_fsblock_t cbno; /* child block number */
594 xfs_buf_t *cbp; /* child block's buffer */
595 int error; /* error return value */
596 __be64 *pp; /* ptr to block address */
597 struct xfs_owner_info oinfo;
599 /* check if we actually need the extent format first: */
600 if (!xfs_bmap_wants_extents(ip, whichfork))
604 ASSERT(whichfork != XFS_COW_FORK);
605 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
606 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
607 ASSERT(be16_to_cpu(rblock->bb_level) == 1);
608 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
609 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
611 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
612 cbno = be64_to_cpu(*pp);
614 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp,
615 xfs_btree_check_lptr(cur, cbno, 1));
617 error = xfs_btree_read_bufl(mp, tp, cbno, &cbp, XFS_BMAP_BTREE_REF,
621 cblock = XFS_BUF_TO_BLOCK(cbp);
622 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
624 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
625 xfs_bmap_add_free(cur->bc_tp, cbno, 1, &oinfo);
626 ip->i_d.di_nblocks--;
627 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
628 xfs_trans_binval(tp, cbp);
629 if (cur->bc_bufs[0] == cbp)
630 cur->bc_bufs[0] = NULL;
631 xfs_iroot_realloc(ip, -1, whichfork);
632 ASSERT(ifp->if_broot == NULL);
633 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
634 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
635 *logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
640 * Convert an extents-format file into a btree-format file.
641 * The new file will have a root block (in the inode) and a single child block.
643 STATIC int /* error */
644 xfs_bmap_extents_to_btree(
645 struct xfs_trans *tp, /* transaction pointer */
646 struct xfs_inode *ip, /* incore inode pointer */
647 struct xfs_btree_cur **curp, /* cursor returned to caller */
648 int wasdel, /* converting a delayed alloc */
649 int *logflagsp, /* inode logging flags */
650 int whichfork) /* data or attr fork */
652 struct xfs_btree_block *ablock; /* allocated (child) bt block */
653 struct xfs_buf *abp; /* buffer for ablock */
654 struct xfs_alloc_arg args; /* allocation arguments */
655 struct xfs_bmbt_rec *arp; /* child record pointer */
656 struct xfs_btree_block *block; /* btree root block */
657 struct xfs_btree_cur *cur; /* bmap btree cursor */
658 int error; /* error return value */
659 struct xfs_ifork *ifp; /* inode fork pointer */
660 struct xfs_bmbt_key *kp; /* root block key pointer */
661 struct xfs_mount *mp; /* mount structure */
662 xfs_bmbt_ptr_t *pp; /* root block address pointer */
663 struct xfs_iext_cursor icur;
664 struct xfs_bmbt_irec rec;
665 xfs_extnum_t cnt = 0;
668 ASSERT(whichfork != XFS_COW_FORK);
669 ifp = XFS_IFORK_PTR(ip, whichfork);
670 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
673 * Make space in the inode incore. This needs to be undone if we fail
674 * to expand the root.
676 xfs_iroot_realloc(ip, 1, whichfork);
677 ifp->if_flags |= XFS_IFBROOT;
682 block = ifp->if_broot;
683 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
684 XFS_BTNUM_BMAP, 1, 1, ip->i_ino,
685 XFS_BTREE_LONG_PTRS);
687 * Need a cursor. Can't allocate until bb_level is filled in.
689 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
690 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
692 * Convert to a btree with two levels, one record in root.
694 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
695 memset(&args, 0, sizeof(args));
698 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
699 if (tp->t_firstblock == NULLFSBLOCK) {
700 args.type = XFS_ALLOCTYPE_START_BNO;
701 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
702 } else if (tp->t_flags & XFS_TRANS_LOWMODE) {
703 args.type = XFS_ALLOCTYPE_START_BNO;
704 args.fsbno = tp->t_firstblock;
706 args.type = XFS_ALLOCTYPE_NEAR_BNO;
707 args.fsbno = tp->t_firstblock;
709 args.minlen = args.maxlen = args.prod = 1;
710 args.wasdel = wasdel;
712 error = xfs_alloc_vextent(&args);
714 goto out_root_realloc;
716 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
718 goto out_root_realloc;
722 * Allocation can't fail, the space was reserved.
724 ASSERT(tp->t_firstblock == NULLFSBLOCK ||
725 args.agno >= XFS_FSB_TO_AGNO(mp, tp->t_firstblock));
726 tp->t_firstblock = args.fsbno;
727 cur->bc_private.b.allocated++;
728 ip->i_d.di_nblocks++;
729 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
730 abp = xfs_btree_get_bufl(mp, tp, args.fsbno);
732 error = -EFSCORRUPTED;
733 goto out_unreserve_dquot;
737 * Fill in the child block.
739 abp->b_ops = &xfs_bmbt_buf_ops;
740 ablock = XFS_BUF_TO_BLOCK(abp);
741 xfs_btree_init_block_int(mp, ablock, abp->b_bn,
742 XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
743 XFS_BTREE_LONG_PTRS);
745 for_each_xfs_iext(ifp, &icur, &rec) {
746 if (isnullstartblock(rec.br_startblock))
748 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt);
749 xfs_bmbt_disk_set_all(arp, &rec);
752 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
753 xfs_btree_set_numrecs(ablock, cnt);
756 * Fill in the root key and pointer.
758 kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
759 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
760 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
761 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
762 be16_to_cpu(block->bb_level)));
763 *pp = cpu_to_be64(args.fsbno);
766 * Do all this logging at the end so that
767 * the root is at the right level.
769 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
770 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
771 ASSERT(*curp == NULL);
773 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
777 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
779 xfs_iroot_realloc(ip, -1, whichfork);
780 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
781 ASSERT(ifp->if_broot == NULL);
782 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
788 * Convert a local file to an extents file.
789 * This code is out of bounds for data forks of regular files,
790 * since the file data needs to get logged so things will stay consistent.
791 * (The bmap-level manipulations are ok, though).
794 xfs_bmap_local_to_extents_empty(
795 struct xfs_trans *tp,
796 struct xfs_inode *ip,
799 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
801 ASSERT(whichfork != XFS_COW_FORK);
802 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
803 ASSERT(ifp->if_bytes == 0);
804 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
806 xfs_bmap_forkoff_reset(ip, whichfork);
807 ifp->if_flags &= ~XFS_IFINLINE;
808 ifp->if_flags |= XFS_IFEXTENTS;
809 ifp->if_u1.if_root = NULL;
811 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
812 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
816 STATIC int /* error */
817 xfs_bmap_local_to_extents(
818 xfs_trans_t *tp, /* transaction pointer */
819 xfs_inode_t *ip, /* incore inode pointer */
820 xfs_extlen_t total, /* total blocks needed by transaction */
821 int *logflagsp, /* inode logging flags */
823 void (*init_fn)(struct xfs_trans *tp,
825 struct xfs_inode *ip,
826 struct xfs_ifork *ifp))
829 int flags; /* logging flags returned */
830 struct xfs_ifork *ifp; /* inode fork pointer */
831 xfs_alloc_arg_t args; /* allocation arguments */
832 xfs_buf_t *bp; /* buffer for extent block */
833 struct xfs_bmbt_irec rec;
834 struct xfs_iext_cursor icur;
837 * We don't want to deal with the case of keeping inode data inline yet.
838 * So sending the data fork of a regular inode is invalid.
840 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
841 ifp = XFS_IFORK_PTR(ip, whichfork);
842 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
844 if (!ifp->if_bytes) {
845 xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
846 flags = XFS_ILOG_CORE;
852 ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS)) == XFS_IFINLINE);
853 memset(&args, 0, sizeof(args));
855 args.mp = ip->i_mount;
856 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
858 * Allocate a block. We know we need only one, since the
859 * file currently fits in an inode.
861 if (tp->t_firstblock == NULLFSBLOCK) {
862 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
863 args.type = XFS_ALLOCTYPE_START_BNO;
865 args.fsbno = tp->t_firstblock;
866 args.type = XFS_ALLOCTYPE_NEAR_BNO;
869 args.minlen = args.maxlen = args.prod = 1;
870 error = xfs_alloc_vextent(&args);
874 /* Can't fail, the space was reserved. */
875 ASSERT(args.fsbno != NULLFSBLOCK);
876 ASSERT(args.len == 1);
877 tp->t_firstblock = args.fsbno;
878 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno);
881 * Initialize the block, copy the data and log the remote buffer.
883 * The callout is responsible for logging because the remote format
884 * might differ from the local format and thus we don't know how much to
885 * log here. Note that init_fn must also set the buffer log item type
888 init_fn(tp, bp, ip, ifp);
890 /* account for the change in fork size */
891 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
892 xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
893 flags |= XFS_ILOG_CORE;
895 ifp->if_u1.if_root = NULL;
899 rec.br_startblock = args.fsbno;
900 rec.br_blockcount = 1;
901 rec.br_state = XFS_EXT_NORM;
902 xfs_iext_first(ifp, &icur);
903 xfs_iext_insert(ip, &icur, &rec, 0);
905 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
906 ip->i_d.di_nblocks = 1;
907 xfs_trans_mod_dquot_byino(tp, ip,
908 XFS_TRANS_DQ_BCOUNT, 1L);
909 flags |= xfs_ilog_fext(whichfork);
917 * Called from xfs_bmap_add_attrfork to handle btree format files.
919 STATIC int /* error */
920 xfs_bmap_add_attrfork_btree(
921 xfs_trans_t *tp, /* transaction pointer */
922 xfs_inode_t *ip, /* incore inode pointer */
923 int *flags) /* inode logging flags */
925 xfs_btree_cur_t *cur; /* btree cursor */
926 int error; /* error return value */
927 xfs_mount_t *mp; /* file system mount struct */
928 int stat; /* newroot status */
931 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
932 *flags |= XFS_ILOG_DBROOT;
934 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
935 error = xfs_bmbt_lookup_first(cur, &stat);
938 /* must be at least one entry */
939 XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0);
940 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
943 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
946 cur->bc_private.b.allocated = 0;
947 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
951 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
956 * Called from xfs_bmap_add_attrfork to handle extents format files.
958 STATIC int /* error */
959 xfs_bmap_add_attrfork_extents(
960 struct xfs_trans *tp, /* transaction pointer */
961 struct xfs_inode *ip, /* incore inode pointer */
962 int *flags) /* inode logging flags */
964 xfs_btree_cur_t *cur; /* bmap btree cursor */
965 int error; /* error return value */
967 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
970 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags,
973 cur->bc_private.b.allocated = 0;
974 xfs_btree_del_cursor(cur, error);
980 * Called from xfs_bmap_add_attrfork to handle local format files. Each
981 * different data fork content type needs a different callout to do the
982 * conversion. Some are basic and only require special block initialisation
983 * callouts for the data formating, others (directories) are so specialised they
984 * handle everything themselves.
986 * XXX (dgc): investigate whether directory conversion can use the generic
987 * formatting callout. It should be possible - it's just a very complex
990 STATIC int /* error */
991 xfs_bmap_add_attrfork_local(
992 struct xfs_trans *tp, /* transaction pointer */
993 struct xfs_inode *ip, /* incore inode pointer */
994 int *flags) /* inode logging flags */
996 struct xfs_da_args dargs; /* args for dir/attr code */
998 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
1001 if (S_ISDIR(VFS_I(ip)->i_mode)) {
1002 memset(&dargs, 0, sizeof(dargs));
1003 dargs.geo = ip->i_mount->m_dir_geo;
1005 dargs.total = dargs.geo->fsbcount;
1006 dargs.whichfork = XFS_DATA_FORK;
1008 return xfs_dir2_sf_to_block(&dargs);
1011 if (S_ISLNK(VFS_I(ip)->i_mode))
1012 return xfs_bmap_local_to_extents(tp, ip, 1, flags,
1014 xfs_symlink_local_to_remote);
1016 /* should only be called for types that support local format data */
1018 return -EFSCORRUPTED;
1021 /* Set an inode attr fork off based on the format */
1023 xfs_bmap_set_attrforkoff(
1024 struct xfs_inode *ip,
1028 switch (ip->i_d.di_format) {
1029 case XFS_DINODE_FMT_DEV:
1030 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
1032 case XFS_DINODE_FMT_LOCAL:
1033 case XFS_DINODE_FMT_EXTENTS:
1034 case XFS_DINODE_FMT_BTREE:
1035 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
1036 if (!ip->i_d.di_forkoff)
1037 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
1038 else if ((ip->i_mount->m_flags & XFS_MOUNT_ATTR2) && version)
1050 * Convert inode from non-attributed to attributed.
1051 * Must not be in a transaction, ip must not be locked.
1053 int /* error code */
1054 xfs_bmap_add_attrfork(
1055 xfs_inode_t *ip, /* incore inode pointer */
1056 int size, /* space new attribute needs */
1057 int rsvd) /* xact may use reserved blks */
1059 xfs_mount_t *mp; /* mount structure */
1060 xfs_trans_t *tp; /* transaction pointer */
1061 int blks; /* space reservation */
1062 int version = 1; /* superblock attr version */
1063 int logflags; /* logging flags */
1064 int error; /* error return value */
1066 ASSERT(XFS_IFORK_Q(ip) == 0);
1069 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1071 blks = XFS_ADDAFORK_SPACE_RES(mp);
1073 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0,
1074 rsvd ? XFS_TRANS_RESERVE : 0, &tp);
1078 xfs_ilock(ip, XFS_ILOCK_EXCL);
1079 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
1080 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
1081 XFS_QMOPT_RES_REGBLKS);
1084 if (XFS_IFORK_Q(ip))
1086 if (ip->i_d.di_anextents != 0) {
1087 error = -EFSCORRUPTED;
1090 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
1092 * For inodes coming from pre-6.2 filesystems.
1094 ASSERT(ip->i_d.di_aformat == 0);
1095 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1098 xfs_trans_ijoin(tp, ip, 0);
1099 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1100 error = xfs_bmap_set_attrforkoff(ip, size, &version);
1103 ASSERT(ip->i_afp == NULL);
1104 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, 0);
1105 ip->i_afp->if_flags = XFS_IFEXTENTS;
1107 switch (ip->i_d.di_format) {
1108 case XFS_DINODE_FMT_LOCAL:
1109 error = xfs_bmap_add_attrfork_local(tp, ip, &logflags);
1111 case XFS_DINODE_FMT_EXTENTS:
1112 error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags);
1114 case XFS_DINODE_FMT_BTREE:
1115 error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags);
1122 xfs_trans_log_inode(tp, ip, logflags);
1125 if (!xfs_sb_version_hasattr(&mp->m_sb) ||
1126 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
1127 bool log_sb = false;
1129 spin_lock(&mp->m_sb_lock);
1130 if (!xfs_sb_version_hasattr(&mp->m_sb)) {
1131 xfs_sb_version_addattr(&mp->m_sb);
1134 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
1135 xfs_sb_version_addattr2(&mp->m_sb);
1138 spin_unlock(&mp->m_sb_lock);
1143 error = xfs_trans_commit(tp);
1144 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1148 xfs_trans_cancel(tp);
1149 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1154 * Internal and external extent tree search functions.
1158 * Read in extents from a btree-format inode.
1162 struct xfs_trans *tp,
1163 struct xfs_inode *ip,
1166 struct xfs_mount *mp = ip->i_mount;
1167 int state = xfs_bmap_fork_to_state(whichfork);
1168 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1169 xfs_extnum_t nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
1170 struct xfs_btree_block *block = ifp->if_broot;
1171 struct xfs_iext_cursor icur;
1172 struct xfs_bmbt_irec new;
1180 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1182 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
1183 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
1184 return -EFSCORRUPTED;
1188 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
1190 level = be16_to_cpu(block->bb_level);
1191 if (unlikely(level == 0)) {
1192 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
1193 return -EFSCORRUPTED;
1195 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
1196 bno = be64_to_cpu(*pp);
1199 * Go down the tree until leaf level is reached, following the first
1200 * pointer (leftmost) at each level.
1202 while (level-- > 0) {
1203 error = xfs_btree_read_bufl(mp, tp, bno, &bp,
1204 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1207 block = XFS_BUF_TO_BLOCK(bp);
1210 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
1211 bno = be64_to_cpu(*pp);
1212 XFS_WANT_CORRUPTED_GOTO(mp,
1213 xfs_verify_fsbno(mp, bno), out_brelse);
1214 xfs_trans_brelse(tp, bp);
1218 * Here with bp and block set to the leftmost leaf node in the tree.
1221 xfs_iext_first(ifp, &icur);
1224 * Loop over all leaf nodes. Copy information to the extent records.
1227 xfs_bmbt_rec_t *frp;
1228 xfs_fsblock_t nextbno;
1229 xfs_extnum_t num_recs;
1231 num_recs = xfs_btree_get_numrecs(block);
1232 if (unlikely(i + num_recs > nextents)) {
1233 xfs_warn(ip->i_mount,
1234 "corrupt dinode %Lu, (btree extents).",
1235 (unsigned long long) ip->i_ino);
1236 xfs_inode_verifier_error(ip, -EFSCORRUPTED,
1237 __func__, block, sizeof(*block),
1239 error = -EFSCORRUPTED;
1243 * Read-ahead the next leaf block, if any.
1245 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
1246 if (nextbno != NULLFSBLOCK)
1247 xfs_btree_reada_bufl(mp, nextbno, 1,
1250 * Copy records into the extent records.
1252 frp = XFS_BMBT_REC_ADDR(mp, block, 1);
1253 for (j = 0; j < num_recs; j++, frp++, i++) {
1256 xfs_bmbt_disk_get_all(frp, &new);
1257 fa = xfs_bmap_validate_extent(ip, whichfork, &new);
1259 error = -EFSCORRUPTED;
1260 xfs_inode_verifier_error(ip, error,
1261 "xfs_iread_extents(2)",
1262 frp, sizeof(*frp), fa);
1265 xfs_iext_insert(ip, &icur, &new, state);
1266 trace_xfs_read_extent(ip, &icur, state, _THIS_IP_);
1267 xfs_iext_next(ifp, &icur);
1269 xfs_trans_brelse(tp, bp);
1272 * If we've reached the end, stop.
1274 if (bno == NULLFSBLOCK)
1276 error = xfs_btree_read_bufl(mp, tp, bno, &bp,
1277 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1280 block = XFS_BUF_TO_BLOCK(bp);
1283 if (i != XFS_IFORK_NEXTENTS(ip, whichfork)) {
1284 error = -EFSCORRUPTED;
1287 ASSERT(i == xfs_iext_count(ifp));
1289 ifp->if_flags |= XFS_IFEXTENTS;
1293 xfs_trans_brelse(tp, bp);
1295 xfs_iext_destroy(ifp);
1300 * Returns the relative block number of the first unused block(s) in the given
1301 * fork with at least "len" logically contiguous blocks free. This is the
1302 * lowest-address hole if the fork has holes, else the first block past the end
1303 * of fork. Return 0 if the fork is currently local (in-inode).
1306 xfs_bmap_first_unused(
1307 struct xfs_trans *tp, /* transaction pointer */
1308 struct xfs_inode *ip, /* incore inode */
1309 xfs_extlen_t len, /* size of hole to find */
1310 xfs_fileoff_t *first_unused, /* unused block */
1311 int whichfork) /* data or attr fork */
1313 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1314 struct xfs_bmbt_irec got;
1315 struct xfs_iext_cursor icur;
1316 xfs_fileoff_t lastaddr = 0;
1317 xfs_fileoff_t lowest, max;
1320 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
1321 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
1322 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
1324 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
1329 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1330 error = xfs_iread_extents(tp, ip, whichfork);
1335 lowest = max = *first_unused;
1336 for_each_xfs_iext(ifp, &icur, &got) {
1338 * See if the hole before this extent will work.
1340 if (got.br_startoff >= lowest + len &&
1341 got.br_startoff - max >= len)
1343 lastaddr = got.br_startoff + got.br_blockcount;
1344 max = XFS_FILEOFF_MAX(lastaddr, lowest);
1347 *first_unused = max;
1352 * Returns the file-relative block number of the last block - 1 before
1353 * last_block (input value) in the file.
1354 * This is not based on i_size, it is based on the extent records.
1355 * Returns 0 for local files, as they do not have extent records.
1358 xfs_bmap_last_before(
1359 struct xfs_trans *tp, /* transaction pointer */
1360 struct xfs_inode *ip, /* incore inode */
1361 xfs_fileoff_t *last_block, /* last block */
1362 int whichfork) /* data or attr fork */
1364 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1365 struct xfs_bmbt_irec got;
1366 struct xfs_iext_cursor icur;
1369 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
1370 case XFS_DINODE_FMT_LOCAL:
1373 case XFS_DINODE_FMT_BTREE:
1374 case XFS_DINODE_FMT_EXTENTS:
1380 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1381 error = xfs_iread_extents(tp, ip, whichfork);
1386 if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got))
1392 xfs_bmap_last_extent(
1393 struct xfs_trans *tp,
1394 struct xfs_inode *ip,
1396 struct xfs_bmbt_irec *rec,
1399 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1400 struct xfs_iext_cursor icur;
1403 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1404 error = xfs_iread_extents(tp, ip, whichfork);
1409 xfs_iext_last(ifp, &icur);
1410 if (!xfs_iext_get_extent(ifp, &icur, rec))
1418 * Check the last inode extent to determine whether this allocation will result
1419 * in blocks being allocated at the end of the file. When we allocate new data
1420 * blocks at the end of the file which do not start at the previous data block,
1421 * we will try to align the new blocks at stripe unit boundaries.
1423 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1424 * at, or past the EOF.
1428 struct xfs_bmalloca *bma,
1431 struct xfs_bmbt_irec rec;
1436 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1447 * Check if we are allocation or past the last extent, or at least into
1448 * the last delayed allocated extent.
1450 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1451 (bma->offset >= rec.br_startoff &&
1452 isnullstartblock(rec.br_startblock));
1457 * Returns the file-relative block number of the first block past eof in
1458 * the file. This is not based on i_size, it is based on the extent records.
1459 * Returns 0 for local files, as they do not have extent records.
1462 xfs_bmap_last_offset(
1463 struct xfs_inode *ip,
1464 xfs_fileoff_t *last_block,
1467 struct xfs_bmbt_irec rec;
1473 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL)
1476 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
1477 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1480 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1481 if (error || is_empty)
1484 *last_block = rec.br_startoff + rec.br_blockcount;
1489 * Returns whether the selected fork of the inode has exactly one
1490 * block or not. For the data fork we check this matches di_size,
1491 * implying the file's range is 0..bsize-1.
1493 int /* 1=>1 block, 0=>otherwise */
1495 xfs_inode_t *ip, /* incore inode */
1496 int whichfork) /* data or attr fork */
1498 struct xfs_ifork *ifp; /* inode fork pointer */
1499 int rval; /* return value */
1500 xfs_bmbt_irec_t s; /* internal version of extent */
1501 struct xfs_iext_cursor icur;
1504 if (whichfork == XFS_DATA_FORK)
1505 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize;
1507 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
1509 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1511 ifp = XFS_IFORK_PTR(ip, whichfork);
1512 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
1513 xfs_iext_first(ifp, &icur);
1514 xfs_iext_get_extent(ifp, &icur, &s);
1515 rval = s.br_startoff == 0 && s.br_blockcount == 1;
1516 if (rval && whichfork == XFS_DATA_FORK)
1517 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
1522 * Extent tree manipulation functions used during allocation.
1526 * Convert a delayed allocation to a real allocation.
1528 STATIC int /* error */
1529 xfs_bmap_add_extent_delay_real(
1530 struct xfs_bmalloca *bma,
1533 struct xfs_bmbt_irec *new = &bma->got;
1534 int error; /* error return value */
1535 int i; /* temp state */
1536 struct xfs_ifork *ifp; /* inode fork pointer */
1537 xfs_fileoff_t new_endoff; /* end offset of new entry */
1538 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
1539 /* left is 0, right is 1, prev is 2 */
1540 int rval=0; /* return value (logging flags) */
1541 int state = xfs_bmap_fork_to_state(whichfork);
1542 xfs_filblks_t da_new; /* new count del alloc blocks used */
1543 xfs_filblks_t da_old; /* old count del alloc blocks used */
1544 xfs_filblks_t temp=0; /* value for da_new calculations */
1545 int tmp_rval; /* partial logging flags */
1546 struct xfs_mount *mp;
1547 xfs_extnum_t *nextents;
1548 struct xfs_bmbt_irec old;
1550 mp = bma->ip->i_mount;
1551 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
1552 ASSERT(whichfork != XFS_ATTR_FORK);
1553 nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents :
1554 &bma->ip->i_d.di_nextents);
1556 ASSERT(!isnullstartblock(new->br_startblock));
1558 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
1560 XFS_STATS_INC(mp, xs_add_exlist);
1567 * Set up a bunch of variables to make the tests simpler.
1569 xfs_iext_get_extent(ifp, &bma->icur, &PREV);
1570 new_endoff = new->br_startoff + new->br_blockcount;
1571 ASSERT(isnullstartblock(PREV.br_startblock));
1572 ASSERT(PREV.br_startoff <= new->br_startoff);
1573 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1575 da_old = startblockval(PREV.br_startblock);
1579 * Set flags determining what part of the previous delayed allocation
1580 * extent is being replaced by a real allocation.
1582 if (PREV.br_startoff == new->br_startoff)
1583 state |= BMAP_LEFT_FILLING;
1584 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1585 state |= BMAP_RIGHT_FILLING;
1588 * Check and set flags if this segment has a left neighbor.
1589 * Don't set contiguous if the combined extent would be too large.
1591 if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) {
1592 state |= BMAP_LEFT_VALID;
1593 if (isnullstartblock(LEFT.br_startblock))
1594 state |= BMAP_LEFT_DELAY;
1597 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1598 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1599 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1600 LEFT.br_state == new->br_state &&
1601 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1602 state |= BMAP_LEFT_CONTIG;
1605 * Check and set flags if this segment has a right neighbor.
1606 * Don't set contiguous if the combined extent would be too large.
1607 * Also check for all-three-contiguous being too large.
1609 if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) {
1610 state |= BMAP_RIGHT_VALID;
1611 if (isnullstartblock(RIGHT.br_startblock))
1612 state |= BMAP_RIGHT_DELAY;
1615 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1616 new_endoff == RIGHT.br_startoff &&
1617 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1618 new->br_state == RIGHT.br_state &&
1619 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1620 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1621 BMAP_RIGHT_FILLING)) !=
1622 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1623 BMAP_RIGHT_FILLING) ||
1624 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1626 state |= BMAP_RIGHT_CONTIG;
1630 * Switch out based on the FILLING and CONTIG state bits.
1632 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1633 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1634 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1635 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1637 * Filling in all of a previously delayed allocation extent.
1638 * The left and right neighbors are both contiguous with new.
1640 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
1642 xfs_iext_remove(bma->ip, &bma->icur, state);
1643 xfs_iext_remove(bma->ip, &bma->icur, state);
1644 xfs_iext_prev(ifp, &bma->icur);
1645 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1648 if (bma->cur == NULL)
1649 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1651 rval = XFS_ILOG_CORE;
1652 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1655 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1656 error = xfs_btree_delete(bma->cur, &i);
1659 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1660 error = xfs_btree_decrement(bma->cur, 0, &i);
1663 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1664 error = xfs_bmbt_update(bma->cur, &LEFT);
1670 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1672 * Filling in all of a previously delayed allocation extent.
1673 * The left neighbor is contiguous, the right is not.
1676 LEFT.br_blockcount += PREV.br_blockcount;
1678 xfs_iext_remove(bma->ip, &bma->icur, state);
1679 xfs_iext_prev(ifp, &bma->icur);
1680 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1682 if (bma->cur == NULL)
1683 rval = XFS_ILOG_DEXT;
1686 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1689 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1690 error = xfs_bmbt_update(bma->cur, &LEFT);
1696 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1698 * Filling in all of a previously delayed allocation extent.
1699 * The right neighbor is contiguous, the left is not. Take care
1700 * with delay -> unwritten extent allocation here because the
1701 * delalloc record we are overwriting is always written.
1703 PREV.br_startblock = new->br_startblock;
1704 PREV.br_blockcount += RIGHT.br_blockcount;
1705 PREV.br_state = new->br_state;
1707 xfs_iext_next(ifp, &bma->icur);
1708 xfs_iext_remove(bma->ip, &bma->icur, state);
1709 xfs_iext_prev(ifp, &bma->icur);
1710 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1712 if (bma->cur == NULL)
1713 rval = XFS_ILOG_DEXT;
1716 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1719 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1720 error = xfs_bmbt_update(bma->cur, &PREV);
1726 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1728 * Filling in all of a previously delayed allocation extent.
1729 * Neither the left nor right neighbors are contiguous with
1732 PREV.br_startblock = new->br_startblock;
1733 PREV.br_state = new->br_state;
1734 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1737 if (bma->cur == NULL)
1738 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1740 rval = XFS_ILOG_CORE;
1741 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1744 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1745 error = xfs_btree_insert(bma->cur, &i);
1748 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1752 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1754 * Filling in the first part of a previous delayed allocation.
1755 * The left neighbor is contiguous.
1758 temp = PREV.br_blockcount - new->br_blockcount;
1759 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1760 startblockval(PREV.br_startblock));
1762 LEFT.br_blockcount += new->br_blockcount;
1764 PREV.br_blockcount = temp;
1765 PREV.br_startoff += new->br_blockcount;
1766 PREV.br_startblock = nullstartblock(da_new);
1768 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1769 xfs_iext_prev(ifp, &bma->icur);
1770 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1772 if (bma->cur == NULL)
1773 rval = XFS_ILOG_DEXT;
1776 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1779 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1780 error = xfs_bmbt_update(bma->cur, &LEFT);
1786 case BMAP_LEFT_FILLING:
1788 * Filling in the first part of a previous delayed allocation.
1789 * The left neighbor is not contiguous.
1791 xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1793 if (bma->cur == NULL)
1794 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1796 rval = XFS_ILOG_CORE;
1797 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1800 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1801 error = xfs_btree_insert(bma->cur, &i);
1804 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1807 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1808 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1809 &bma->cur, 1, &tmp_rval, whichfork);
1815 temp = PREV.br_blockcount - new->br_blockcount;
1816 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1817 startblockval(PREV.br_startblock) -
1818 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
1820 PREV.br_startoff = new_endoff;
1821 PREV.br_blockcount = temp;
1822 PREV.br_startblock = nullstartblock(da_new);
1823 xfs_iext_next(ifp, &bma->icur);
1824 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1825 xfs_iext_prev(ifp, &bma->icur);
1828 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1830 * Filling in the last part of a previous delayed allocation.
1831 * The right neighbor is contiguous with the new allocation.
1834 RIGHT.br_startoff = new->br_startoff;
1835 RIGHT.br_startblock = new->br_startblock;
1836 RIGHT.br_blockcount += new->br_blockcount;
1838 if (bma->cur == NULL)
1839 rval = XFS_ILOG_DEXT;
1842 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1845 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1846 error = xfs_bmbt_update(bma->cur, &RIGHT);
1851 temp = PREV.br_blockcount - new->br_blockcount;
1852 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1853 startblockval(PREV.br_startblock));
1855 PREV.br_blockcount = temp;
1856 PREV.br_startblock = nullstartblock(da_new);
1858 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1859 xfs_iext_next(ifp, &bma->icur);
1860 xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT);
1863 case BMAP_RIGHT_FILLING:
1865 * Filling in the last part of a previous delayed allocation.
1866 * The right neighbor is not contiguous.
1868 xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1870 if (bma->cur == NULL)
1871 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1873 rval = XFS_ILOG_CORE;
1874 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1877 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1878 error = xfs_btree_insert(bma->cur, &i);
1881 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1884 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1885 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1886 &bma->cur, 1, &tmp_rval, whichfork);
1892 temp = PREV.br_blockcount - new->br_blockcount;
1893 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1894 startblockval(PREV.br_startblock) -
1895 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
1897 PREV.br_startblock = nullstartblock(da_new);
1898 PREV.br_blockcount = temp;
1899 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1900 xfs_iext_next(ifp, &bma->icur);
1905 * Filling in the middle part of a previous delayed allocation.
1906 * Contiguity is impossible here.
1907 * This case is avoided almost all the time.
1909 * We start with a delayed allocation:
1911 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
1914 * and we are allocating:
1915 * +rrrrrrrrrrrrrrrrr+
1918 * and we set it up for insertion as:
1919 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
1921 * PREV @ idx LEFT RIGHT
1922 * inserted at idx + 1
1926 /* LEFT is the new middle */
1929 /* RIGHT is the new right */
1930 RIGHT.br_state = PREV.br_state;
1931 RIGHT.br_startoff = new_endoff;
1932 RIGHT.br_blockcount =
1933 PREV.br_startoff + PREV.br_blockcount - new_endoff;
1934 RIGHT.br_startblock =
1935 nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1936 RIGHT.br_blockcount));
1939 PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
1940 PREV.br_startblock =
1941 nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1942 PREV.br_blockcount));
1943 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1945 xfs_iext_next(ifp, &bma->icur);
1946 xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state);
1947 xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state);
1950 if (bma->cur == NULL)
1951 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1953 rval = XFS_ILOG_CORE;
1954 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1957 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1958 error = xfs_btree_insert(bma->cur, &i);
1961 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1964 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1965 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1966 &bma->cur, 1, &tmp_rval, whichfork);
1972 da_new = startblockval(PREV.br_startblock) +
1973 startblockval(RIGHT.br_startblock);
1976 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1977 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1978 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1979 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1980 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1981 case BMAP_LEFT_CONTIG:
1982 case BMAP_RIGHT_CONTIG:
1984 * These cases are all impossible.
1989 /* add reverse mapping unless caller opted out */
1990 if (!(bma->flags & XFS_BMAPI_NORMAP))
1991 xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new);
1993 /* convert to a btree if necessary */
1994 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1995 int tmp_logflags; /* partial log flag return val */
1997 ASSERT(bma->cur == NULL);
1998 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1999 &bma->cur, da_old > 0, &tmp_logflags,
2001 bma->logflags |= tmp_logflags;
2006 if (da_new != da_old)
2007 xfs_mod_delalloc(mp, (int64_t)da_new - da_old);
2010 da_new += bma->cur->bc_private.b.allocated;
2011 bma->cur->bc_private.b.allocated = 0;
2014 /* adjust for changes in reserved delayed indirect blocks */
2015 if (da_new != da_old) {
2016 ASSERT(state == 0 || da_new < da_old);
2017 error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new),
2021 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
2023 if (whichfork != XFS_COW_FORK)
2024 bma->logflags |= rval;
2032 * Convert an unwritten allocation to a real allocation or vice versa.
2035 xfs_bmap_add_extent_unwritten_real(
2036 struct xfs_trans *tp,
2037 xfs_inode_t *ip, /* incore inode pointer */
2039 struct xfs_iext_cursor *icur,
2040 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
2041 xfs_bmbt_irec_t *new, /* new data to add to file extents */
2042 int *logflagsp) /* inode logging flags */
2044 xfs_btree_cur_t *cur; /* btree cursor */
2045 int error; /* error return value */
2046 int i; /* temp state */
2047 struct xfs_ifork *ifp; /* inode fork pointer */
2048 xfs_fileoff_t new_endoff; /* end offset of new entry */
2049 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
2050 /* left is 0, right is 1, prev is 2 */
2051 int rval=0; /* return value (logging flags) */
2052 int state = xfs_bmap_fork_to_state(whichfork);
2053 struct xfs_mount *mp = ip->i_mount;
2054 struct xfs_bmbt_irec old;
2059 ifp = XFS_IFORK_PTR(ip, whichfork);
2061 ASSERT(!isnullstartblock(new->br_startblock));
2063 XFS_STATS_INC(mp, xs_add_exlist);
2070 * Set up a bunch of variables to make the tests simpler.
2073 xfs_iext_get_extent(ifp, icur, &PREV);
2074 ASSERT(new->br_state != PREV.br_state);
2075 new_endoff = new->br_startoff + new->br_blockcount;
2076 ASSERT(PREV.br_startoff <= new->br_startoff);
2077 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
2080 * Set flags determining what part of the previous oldext allocation
2081 * extent is being replaced by a newext allocation.
2083 if (PREV.br_startoff == new->br_startoff)
2084 state |= BMAP_LEFT_FILLING;
2085 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2086 state |= BMAP_RIGHT_FILLING;
2089 * Check and set flags if this segment has a left neighbor.
2090 * Don't set contiguous if the combined extent would be too large.
2092 if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) {
2093 state |= BMAP_LEFT_VALID;
2094 if (isnullstartblock(LEFT.br_startblock))
2095 state |= BMAP_LEFT_DELAY;
2098 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2099 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2100 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2101 LEFT.br_state == new->br_state &&
2102 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2103 state |= BMAP_LEFT_CONTIG;
2106 * Check and set flags if this segment has a right neighbor.
2107 * Don't set contiguous if the combined extent would be too large.
2108 * Also check for all-three-contiguous being too large.
2110 if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) {
2111 state |= BMAP_RIGHT_VALID;
2112 if (isnullstartblock(RIGHT.br_startblock))
2113 state |= BMAP_RIGHT_DELAY;
2116 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2117 new_endoff == RIGHT.br_startoff &&
2118 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2119 new->br_state == RIGHT.br_state &&
2120 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
2121 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2122 BMAP_RIGHT_FILLING)) !=
2123 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2124 BMAP_RIGHT_FILLING) ||
2125 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2127 state |= BMAP_RIGHT_CONTIG;
2130 * Switch out based on the FILLING and CONTIG state bits.
2132 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2133 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2134 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2135 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2137 * Setting all of a previous oldext extent to newext.
2138 * The left and right neighbors are both contiguous with new.
2140 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
2142 xfs_iext_remove(ip, icur, state);
2143 xfs_iext_remove(ip, icur, state);
2144 xfs_iext_prev(ifp, icur);
2145 xfs_iext_update_extent(ip, state, icur, &LEFT);
2146 XFS_IFORK_NEXT_SET(ip, whichfork,
2147 XFS_IFORK_NEXTENTS(ip, whichfork) - 2);
2149 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2151 rval = XFS_ILOG_CORE;
2152 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2155 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2156 if ((error = xfs_btree_delete(cur, &i)))
2158 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2159 if ((error = xfs_btree_decrement(cur, 0, &i)))
2161 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2162 if ((error = xfs_btree_delete(cur, &i)))
2164 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2165 if ((error = xfs_btree_decrement(cur, 0, &i)))
2167 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2168 error = xfs_bmbt_update(cur, &LEFT);
2174 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2176 * Setting all of a previous oldext extent to newext.
2177 * The left neighbor is contiguous, the right is not.
2179 LEFT.br_blockcount += PREV.br_blockcount;
2181 xfs_iext_remove(ip, icur, state);
2182 xfs_iext_prev(ifp, icur);
2183 xfs_iext_update_extent(ip, state, icur, &LEFT);
2184 XFS_IFORK_NEXT_SET(ip, whichfork,
2185 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2187 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2189 rval = XFS_ILOG_CORE;
2190 error = xfs_bmbt_lookup_eq(cur, &PREV, &i);
2193 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2194 if ((error = xfs_btree_delete(cur, &i)))
2196 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2197 if ((error = xfs_btree_decrement(cur, 0, &i)))
2199 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2200 error = xfs_bmbt_update(cur, &LEFT);
2206 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2208 * Setting all of a previous oldext extent to newext.
2209 * The right neighbor is contiguous, the left is not.
2211 PREV.br_blockcount += RIGHT.br_blockcount;
2212 PREV.br_state = new->br_state;
2214 xfs_iext_next(ifp, icur);
2215 xfs_iext_remove(ip, icur, state);
2216 xfs_iext_prev(ifp, icur);
2217 xfs_iext_update_extent(ip, state, icur, &PREV);
2219 XFS_IFORK_NEXT_SET(ip, whichfork,
2220 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2222 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2224 rval = XFS_ILOG_CORE;
2225 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2228 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2229 if ((error = xfs_btree_delete(cur, &i)))
2231 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2232 if ((error = xfs_btree_decrement(cur, 0, &i)))
2234 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2235 error = xfs_bmbt_update(cur, &PREV);
2241 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2243 * Setting all of a previous oldext extent to newext.
2244 * Neither the left nor right neighbors are contiguous with
2247 PREV.br_state = new->br_state;
2248 xfs_iext_update_extent(ip, state, icur, &PREV);
2251 rval = XFS_ILOG_DEXT;
2254 error = xfs_bmbt_lookup_eq(cur, new, &i);
2257 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2258 error = xfs_bmbt_update(cur, &PREV);
2264 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2266 * Setting the first part of a previous oldext extent to newext.
2267 * The left neighbor is contiguous.
2269 LEFT.br_blockcount += new->br_blockcount;
2272 PREV.br_startoff += new->br_blockcount;
2273 PREV.br_startblock += new->br_blockcount;
2274 PREV.br_blockcount -= new->br_blockcount;
2276 xfs_iext_update_extent(ip, state, icur, &PREV);
2277 xfs_iext_prev(ifp, icur);
2278 xfs_iext_update_extent(ip, state, icur, &LEFT);
2281 rval = XFS_ILOG_DEXT;
2284 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2287 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2288 error = xfs_bmbt_update(cur, &PREV);
2291 error = xfs_btree_decrement(cur, 0, &i);
2294 error = xfs_bmbt_update(cur, &LEFT);
2300 case BMAP_LEFT_FILLING:
2302 * Setting the first part of a previous oldext extent to newext.
2303 * The left neighbor is not contiguous.
2306 PREV.br_startoff += new->br_blockcount;
2307 PREV.br_startblock += new->br_blockcount;
2308 PREV.br_blockcount -= new->br_blockcount;
2310 xfs_iext_update_extent(ip, state, icur, &PREV);
2311 xfs_iext_insert(ip, icur, new, state);
2312 XFS_IFORK_NEXT_SET(ip, whichfork,
2313 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2315 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2317 rval = XFS_ILOG_CORE;
2318 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2321 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2322 error = xfs_bmbt_update(cur, &PREV);
2325 cur->bc_rec.b = *new;
2326 if ((error = xfs_btree_insert(cur, &i)))
2328 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2332 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2334 * Setting the last part of a previous oldext extent to newext.
2335 * The right neighbor is contiguous with the new allocation.
2338 PREV.br_blockcount -= new->br_blockcount;
2340 RIGHT.br_startoff = new->br_startoff;
2341 RIGHT.br_startblock = new->br_startblock;
2342 RIGHT.br_blockcount += new->br_blockcount;
2344 xfs_iext_update_extent(ip, state, icur, &PREV);
2345 xfs_iext_next(ifp, icur);
2346 xfs_iext_update_extent(ip, state, icur, &RIGHT);
2349 rval = XFS_ILOG_DEXT;
2352 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2355 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2356 error = xfs_bmbt_update(cur, &PREV);
2359 error = xfs_btree_increment(cur, 0, &i);
2362 error = xfs_bmbt_update(cur, &RIGHT);
2368 case BMAP_RIGHT_FILLING:
2370 * Setting the last part of a previous oldext extent to newext.
2371 * The right neighbor is not contiguous.
2374 PREV.br_blockcount -= new->br_blockcount;
2376 xfs_iext_update_extent(ip, state, icur, &PREV);
2377 xfs_iext_next(ifp, icur);
2378 xfs_iext_insert(ip, icur, new, state);
2380 XFS_IFORK_NEXT_SET(ip, whichfork,
2381 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2383 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2385 rval = XFS_ILOG_CORE;
2386 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2389 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2390 error = xfs_bmbt_update(cur, &PREV);
2393 error = xfs_bmbt_lookup_eq(cur, new, &i);
2396 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2397 if ((error = xfs_btree_insert(cur, &i)))
2399 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2405 * Setting the middle part of a previous oldext extent to
2406 * newext. Contiguity is impossible here.
2407 * One extent becomes three extents.
2410 PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
2413 r[1].br_startoff = new_endoff;
2414 r[1].br_blockcount =
2415 old.br_startoff + old.br_blockcount - new_endoff;
2416 r[1].br_startblock = new->br_startblock + new->br_blockcount;
2417 r[1].br_state = PREV.br_state;
2419 xfs_iext_update_extent(ip, state, icur, &PREV);
2420 xfs_iext_next(ifp, icur);
2421 xfs_iext_insert(ip, icur, &r[1], state);
2422 xfs_iext_insert(ip, icur, &r[0], state);
2424 XFS_IFORK_NEXT_SET(ip, whichfork,
2425 XFS_IFORK_NEXTENTS(ip, whichfork) + 2);
2427 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2429 rval = XFS_ILOG_CORE;
2430 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2433 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2434 /* new right extent - oldext */
2435 error = xfs_bmbt_update(cur, &r[1]);
2438 /* new left extent - oldext */
2439 cur->bc_rec.b = PREV;
2440 if ((error = xfs_btree_insert(cur, &i)))
2442 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2444 * Reset the cursor to the position of the new extent
2445 * we are about to insert as we can't trust it after
2446 * the previous insert.
2448 error = xfs_bmbt_lookup_eq(cur, new, &i);
2451 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2452 /* new middle extent - newext */
2453 if ((error = xfs_btree_insert(cur, &i)))
2455 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2459 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2460 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2461 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2462 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2463 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2464 case BMAP_LEFT_CONTIG:
2465 case BMAP_RIGHT_CONTIG:
2467 * These cases are all impossible.
2472 /* update reverse mappings */
2473 xfs_rmap_convert_extent(mp, tp, ip, whichfork, new);
2475 /* convert to a btree if necessary */
2476 if (xfs_bmap_needs_btree(ip, whichfork)) {
2477 int tmp_logflags; /* partial log flag return val */
2479 ASSERT(cur == NULL);
2480 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
2481 &tmp_logflags, whichfork);
2482 *logflagsp |= tmp_logflags;
2487 /* clear out the allocated field, done with it now in any case. */
2489 cur->bc_private.b.allocated = 0;
2493 xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
2503 * Convert a hole to a delayed allocation.
2506 xfs_bmap_add_extent_hole_delay(
2507 xfs_inode_t *ip, /* incore inode pointer */
2509 struct xfs_iext_cursor *icur,
2510 xfs_bmbt_irec_t *new) /* new data to add to file extents */
2512 struct xfs_ifork *ifp; /* inode fork pointer */
2513 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2514 xfs_filblks_t newlen=0; /* new indirect size */
2515 xfs_filblks_t oldlen=0; /* old indirect size */
2516 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2517 int state = xfs_bmap_fork_to_state(whichfork);
2518 xfs_filblks_t temp; /* temp for indirect calculations */
2520 ifp = XFS_IFORK_PTR(ip, whichfork);
2521 ASSERT(isnullstartblock(new->br_startblock));
2524 * Check and set flags if this segment has a left neighbor
2526 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2527 state |= BMAP_LEFT_VALID;
2528 if (isnullstartblock(left.br_startblock))
2529 state |= BMAP_LEFT_DELAY;
2533 * Check and set flags if the current (right) segment exists.
2534 * If it doesn't exist, we're converting the hole at end-of-file.
2536 if (xfs_iext_get_extent(ifp, icur, &right)) {
2537 state |= BMAP_RIGHT_VALID;
2538 if (isnullstartblock(right.br_startblock))
2539 state |= BMAP_RIGHT_DELAY;
2543 * Set contiguity flags on the left and right neighbors.
2544 * Don't let extents get too large, even if the pieces are contiguous.
2546 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
2547 left.br_startoff + left.br_blockcount == new->br_startoff &&
2548 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2549 state |= BMAP_LEFT_CONTIG;
2551 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
2552 new->br_startoff + new->br_blockcount == right.br_startoff &&
2553 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2554 (!(state & BMAP_LEFT_CONTIG) ||
2555 (left.br_blockcount + new->br_blockcount +
2556 right.br_blockcount <= MAXEXTLEN)))
2557 state |= BMAP_RIGHT_CONTIG;
2560 * Switch out based on the contiguity flags.
2562 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2563 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2565 * New allocation is contiguous with delayed allocations
2566 * on the left and on the right.
2567 * Merge all three into a single extent record.
2569 temp = left.br_blockcount + new->br_blockcount +
2570 right.br_blockcount;
2572 oldlen = startblockval(left.br_startblock) +
2573 startblockval(new->br_startblock) +
2574 startblockval(right.br_startblock);
2575 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2577 left.br_startblock = nullstartblock(newlen);
2578 left.br_blockcount = temp;
2580 xfs_iext_remove(ip, icur, state);
2581 xfs_iext_prev(ifp, icur);
2582 xfs_iext_update_extent(ip, state, icur, &left);
2585 case BMAP_LEFT_CONTIG:
2587 * New allocation is contiguous with a delayed allocation
2589 * Merge the new allocation with the left neighbor.
2591 temp = left.br_blockcount + new->br_blockcount;
2593 oldlen = startblockval(left.br_startblock) +
2594 startblockval(new->br_startblock);
2595 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2597 left.br_blockcount = temp;
2598 left.br_startblock = nullstartblock(newlen);
2600 xfs_iext_prev(ifp, icur);
2601 xfs_iext_update_extent(ip, state, icur, &left);
2604 case BMAP_RIGHT_CONTIG:
2606 * New allocation is contiguous with a delayed allocation
2608 * Merge the new allocation with the right neighbor.
2610 temp = new->br_blockcount + right.br_blockcount;
2611 oldlen = startblockval(new->br_startblock) +
2612 startblockval(right.br_startblock);
2613 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2615 right.br_startoff = new->br_startoff;
2616 right.br_startblock = nullstartblock(newlen);
2617 right.br_blockcount = temp;
2618 xfs_iext_update_extent(ip, state, icur, &right);
2623 * New allocation is not contiguous with another
2624 * delayed allocation.
2625 * Insert a new entry.
2627 oldlen = newlen = 0;
2628 xfs_iext_insert(ip, icur, new, state);
2631 if (oldlen != newlen) {
2632 ASSERT(oldlen > newlen);
2633 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
2636 * Nothing to do for disk quota accounting here.
2638 xfs_mod_delalloc(ip->i_mount, (int64_t)newlen - oldlen);
2643 * Convert a hole to a real allocation.
2645 STATIC int /* error */
2646 xfs_bmap_add_extent_hole_real(
2647 struct xfs_trans *tp,
2648 struct xfs_inode *ip,
2650 struct xfs_iext_cursor *icur,
2651 struct xfs_btree_cur **curp,
2652 struct xfs_bmbt_irec *new,
2656 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
2657 struct xfs_mount *mp = ip->i_mount;
2658 struct xfs_btree_cur *cur = *curp;
2659 int error; /* error return value */
2660 int i; /* temp state */
2661 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2662 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2663 int rval=0; /* return value (logging flags) */
2664 int state = xfs_bmap_fork_to_state(whichfork);
2665 struct xfs_bmbt_irec old;
2667 ASSERT(!isnullstartblock(new->br_startblock));
2668 ASSERT(!cur || !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
2670 XFS_STATS_INC(mp, xs_add_exlist);
2673 * Check and set flags if this segment has a left neighbor.
2675 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2676 state |= BMAP_LEFT_VALID;
2677 if (isnullstartblock(left.br_startblock))
2678 state |= BMAP_LEFT_DELAY;
2682 * Check and set flags if this segment has a current value.
2683 * Not true if we're inserting into the "hole" at eof.
2685 if (xfs_iext_get_extent(ifp, icur, &right)) {
2686 state |= BMAP_RIGHT_VALID;
2687 if (isnullstartblock(right.br_startblock))
2688 state |= BMAP_RIGHT_DELAY;
2692 * We're inserting a real allocation between "left" and "right".
2693 * Set the contiguity flags. Don't let extents get too large.
2695 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2696 left.br_startoff + left.br_blockcount == new->br_startoff &&
2697 left.br_startblock + left.br_blockcount == new->br_startblock &&
2698 left.br_state == new->br_state &&
2699 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2700 state |= BMAP_LEFT_CONTIG;
2702 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2703 new->br_startoff + new->br_blockcount == right.br_startoff &&
2704 new->br_startblock + new->br_blockcount == right.br_startblock &&
2705 new->br_state == right.br_state &&
2706 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2707 (!(state & BMAP_LEFT_CONTIG) ||
2708 left.br_blockcount + new->br_blockcount +
2709 right.br_blockcount <= MAXEXTLEN))
2710 state |= BMAP_RIGHT_CONTIG;
2714 * Select which case we're in here, and implement it.
2716 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2717 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2719 * New allocation is contiguous with real allocations on the
2720 * left and on the right.
2721 * Merge all three into a single extent record.
2723 left.br_blockcount += new->br_blockcount + right.br_blockcount;
2725 xfs_iext_remove(ip, icur, state);
2726 xfs_iext_prev(ifp, icur);
2727 xfs_iext_update_extent(ip, state, icur, &left);
2729 XFS_IFORK_NEXT_SET(ip, whichfork,
2730 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2732 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2734 rval = XFS_ILOG_CORE;
2735 error = xfs_bmbt_lookup_eq(cur, &right, &i);
2738 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2739 error = xfs_btree_delete(cur, &i);
2742 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2743 error = xfs_btree_decrement(cur, 0, &i);
2746 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2747 error = xfs_bmbt_update(cur, &left);
2753 case BMAP_LEFT_CONTIG:
2755 * New allocation is contiguous with a real allocation
2757 * Merge the new allocation with the left neighbor.
2760 left.br_blockcount += new->br_blockcount;
2762 xfs_iext_prev(ifp, icur);
2763 xfs_iext_update_extent(ip, state, icur, &left);
2766 rval = xfs_ilog_fext(whichfork);
2769 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2772 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2773 error = xfs_bmbt_update(cur, &left);
2779 case BMAP_RIGHT_CONTIG:
2781 * New allocation is contiguous with a real allocation
2783 * Merge the new allocation with the right neighbor.
2787 right.br_startoff = new->br_startoff;
2788 right.br_startblock = new->br_startblock;
2789 right.br_blockcount += new->br_blockcount;
2790 xfs_iext_update_extent(ip, state, icur, &right);
2793 rval = xfs_ilog_fext(whichfork);
2796 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2799 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2800 error = xfs_bmbt_update(cur, &right);
2808 * New allocation is not contiguous with another
2810 * Insert a new entry.
2812 xfs_iext_insert(ip, icur, new, state);
2813 XFS_IFORK_NEXT_SET(ip, whichfork,
2814 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2816 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2818 rval = XFS_ILOG_CORE;
2819 error = xfs_bmbt_lookup_eq(cur, new, &i);
2822 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2823 error = xfs_btree_insert(cur, &i);
2826 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2831 /* add reverse mapping unless caller opted out */
2832 if (!(flags & XFS_BMAPI_NORMAP))
2833 xfs_rmap_map_extent(tp, ip, whichfork, new);
2835 /* convert to a btree if necessary */
2836 if (xfs_bmap_needs_btree(ip, whichfork)) {
2837 int tmp_logflags; /* partial log flag return val */
2839 ASSERT(cur == NULL);
2840 error = xfs_bmap_extents_to_btree(tp, ip, curp, 0,
2841 &tmp_logflags, whichfork);
2842 *logflagsp |= tmp_logflags;
2848 /* clear out the allocated field, done with it now in any case. */
2850 cur->bc_private.b.allocated = 0;
2852 xfs_bmap_check_leaf_extents(cur, ip, whichfork);
2859 * Functions used in the extent read, allocate and remove paths
2863 * Adjust the size of the new extent based on di_extsize and rt extsize.
2866 xfs_bmap_extsize_align(
2868 xfs_bmbt_irec_t *gotp, /* next extent pointer */
2869 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
2870 xfs_extlen_t extsz, /* align to this extent size */
2871 int rt, /* is this a realtime inode? */
2872 int eof, /* is extent at end-of-file? */
2873 int delay, /* creating delalloc extent? */
2874 int convert, /* overwriting unwritten extent? */
2875 xfs_fileoff_t *offp, /* in/out: aligned offset */
2876 xfs_extlen_t *lenp) /* in/out: aligned length */
2878 xfs_fileoff_t orig_off; /* original offset */
2879 xfs_extlen_t orig_alen; /* original length */
2880 xfs_fileoff_t orig_end; /* original off+len */
2881 xfs_fileoff_t nexto; /* next file offset */
2882 xfs_fileoff_t prevo; /* previous file offset */
2883 xfs_fileoff_t align_off; /* temp for offset */
2884 xfs_extlen_t align_alen; /* temp for length */
2885 xfs_extlen_t temp; /* temp for calculations */
2890 orig_off = align_off = *offp;
2891 orig_alen = align_alen = *lenp;
2892 orig_end = orig_off + orig_alen;
2895 * If this request overlaps an existing extent, then don't
2896 * attempt to perform any additional alignment.
2898 if (!delay && !eof &&
2899 (orig_off >= gotp->br_startoff) &&
2900 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
2905 * If the file offset is unaligned vs. the extent size
2906 * we need to align it. This will be possible unless
2907 * the file was previously written with a kernel that didn't
2908 * perform this alignment, or if a truncate shot us in the
2911 div_u64_rem(orig_off, extsz, &temp);
2917 /* Same adjustment for the end of the requested area. */
2918 temp = (align_alen % extsz);
2920 align_alen += extsz - temp;
2923 * For large extent hint sizes, the aligned extent might be larger than
2924 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls
2925 * the length back under MAXEXTLEN. The outer allocation loops handle
2926 * short allocation just fine, so it is safe to do this. We only want to
2927 * do it when we are forced to, though, because it means more allocation
2928 * operations are required.
2930 while (align_alen > MAXEXTLEN)
2931 align_alen -= extsz;
2932 ASSERT(align_alen <= MAXEXTLEN);
2935 * If the previous block overlaps with this proposed allocation
2936 * then move the start forward without adjusting the length.
2938 if (prevp->br_startoff != NULLFILEOFF) {
2939 if (prevp->br_startblock == HOLESTARTBLOCK)
2940 prevo = prevp->br_startoff;
2942 prevo = prevp->br_startoff + prevp->br_blockcount;
2945 if (align_off != orig_off && align_off < prevo)
2948 * If the next block overlaps with this proposed allocation
2949 * then move the start back without adjusting the length,
2950 * but not before offset 0.
2951 * This may of course make the start overlap previous block,
2952 * and if we hit the offset 0 limit then the next block
2953 * can still overlap too.
2955 if (!eof && gotp->br_startoff != NULLFILEOFF) {
2956 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
2957 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
2958 nexto = gotp->br_startoff + gotp->br_blockcount;
2960 nexto = gotp->br_startoff;
2962 nexto = NULLFILEOFF;
2964 align_off + align_alen != orig_end &&
2965 align_off + align_alen > nexto)
2966 align_off = nexto > align_alen ? nexto - align_alen : 0;
2968 * If we're now overlapping the next or previous extent that
2969 * means we can't fit an extsz piece in this hole. Just move
2970 * the start forward to the first valid spot and set
2971 * the length so we hit the end.
2973 if (align_off != orig_off && align_off < prevo)
2975 if (align_off + align_alen != orig_end &&
2976 align_off + align_alen > nexto &&
2977 nexto != NULLFILEOFF) {
2978 ASSERT(nexto > prevo);
2979 align_alen = nexto - align_off;
2983 * If realtime, and the result isn't a multiple of the realtime
2984 * extent size we need to remove blocks until it is.
2986 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
2988 * We're not covering the original request, or
2989 * we won't be able to once we fix the length.
2991 if (orig_off < align_off ||
2992 orig_end > align_off + align_alen ||
2993 align_alen - temp < orig_alen)
2996 * Try to fix it by moving the start up.
2998 if (align_off + temp <= orig_off) {
3003 * Try to fix it by moving the end in.
3005 else if (align_off + align_alen - temp >= orig_end)
3008 * Set the start to the minimum then trim the length.
3011 align_alen -= orig_off - align_off;
3012 align_off = orig_off;
3013 align_alen -= align_alen % mp->m_sb.sb_rextsize;
3016 * Result doesn't cover the request, fail it.
3018 if (orig_off < align_off || orig_end > align_off + align_alen)
3021 ASSERT(orig_off >= align_off);
3022 /* see MAXEXTLEN handling above */
3023 ASSERT(orig_end <= align_off + align_alen ||
3024 align_alen + extsz > MAXEXTLEN);
3028 if (!eof && gotp->br_startoff != NULLFILEOFF)
3029 ASSERT(align_off + align_alen <= gotp->br_startoff);
3030 if (prevp->br_startoff != NULLFILEOFF)
3031 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
3039 #define XFS_ALLOC_GAP_UNITS 4
3043 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3045 xfs_fsblock_t adjust; /* adjustment to block numbers */
3046 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3047 xfs_mount_t *mp; /* mount point structure */
3048 int nullfb; /* true if ap->firstblock isn't set */
3049 int rt; /* true if inode is realtime */
3051 #define ISVALID(x,y) \
3053 (x) < mp->m_sb.sb_rblocks : \
3054 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3055 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3056 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3058 mp = ap->ip->i_mount;
3059 nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
3060 rt = XFS_IS_REALTIME_INODE(ap->ip) &&
3061 xfs_alloc_is_userdata(ap->datatype);
3062 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp,
3063 ap->tp->t_firstblock);
3065 * If allocating at eof, and there's a previous real block,
3066 * try to use its last block as our starting point.
3068 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3069 !isnullstartblock(ap->prev.br_startblock) &&
3070 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
3071 ap->prev.br_startblock)) {
3072 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3074 * Adjust for the gap between prevp and us.
3076 adjust = ap->offset -
3077 (ap->prev.br_startoff + ap->prev.br_blockcount);
3079 ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
3080 ap->blkno += adjust;
3083 * If not at eof, then compare the two neighbor blocks.
3084 * Figure out whether either one gives us a good starting point,
3085 * and pick the better one.
3087 else if (!ap->eof) {
3088 xfs_fsblock_t gotbno; /* right side block number */
3089 xfs_fsblock_t gotdiff=0; /* right side difference */
3090 xfs_fsblock_t prevbno; /* left side block number */
3091 xfs_fsblock_t prevdiff=0; /* left side difference */
3094 * If there's a previous (left) block, select a requested
3095 * start block based on it.
3097 if (ap->prev.br_startoff != NULLFILEOFF &&
3098 !isnullstartblock(ap->prev.br_startblock) &&
3099 (prevbno = ap->prev.br_startblock +
3100 ap->prev.br_blockcount) &&
3101 ISVALID(prevbno, ap->prev.br_startblock)) {
3103 * Calculate gap to end of previous block.
3105 adjust = prevdiff = ap->offset -
3106 (ap->prev.br_startoff +
3107 ap->prev.br_blockcount);
3109 * Figure the startblock based on the previous block's
3110 * end and the gap size.
3112 * If the gap is large relative to the piece we're
3113 * allocating, or using it gives us an invalid block
3114 * number, then just use the end of the previous block.
3116 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3117 ISVALID(prevbno + prevdiff,
3118 ap->prev.br_startblock))
3123 * If the firstblock forbids it, can't use it,
3126 if (!rt && !nullfb &&
3127 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
3128 prevbno = NULLFSBLOCK;
3131 * No previous block or can't follow it, just default.
3134 prevbno = NULLFSBLOCK;
3136 * If there's a following (right) block, select a requested
3137 * start block based on it.
3139 if (!isnullstartblock(ap->got.br_startblock)) {
3141 * Calculate gap to start of next block.
3143 adjust = gotdiff = ap->got.br_startoff - ap->offset;
3145 * Figure the startblock based on the next block's
3146 * start and the gap size.
3148 gotbno = ap->got.br_startblock;
3151 * If the gap is large relative to the piece we're
3152 * allocating, or using it gives us an invalid block
3153 * number, then just use the start of the next block
3154 * offset by our length.
3156 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3157 ISVALID(gotbno - gotdiff, gotbno))
3159 else if (ISVALID(gotbno - ap->length, gotbno)) {
3160 gotbno -= ap->length;
3161 gotdiff += adjust - ap->length;
3165 * If the firstblock forbids it, can't use it,
3168 if (!rt && !nullfb &&
3169 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
3170 gotbno = NULLFSBLOCK;
3173 * No next block, just default.
3176 gotbno = NULLFSBLOCK;
3178 * If both valid, pick the better one, else the only good
3179 * one, else ap->blkno is already set (to 0 or the inode block).
3181 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
3182 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3183 else if (prevbno != NULLFSBLOCK)
3184 ap->blkno = prevbno;
3185 else if (gotbno != NULLFSBLOCK)
3192 xfs_bmap_longest_free_extent(
3193 struct xfs_trans *tp,
3198 struct xfs_mount *mp = tp->t_mountp;
3199 struct xfs_perag *pag;
3200 xfs_extlen_t longest;
3203 pag = xfs_perag_get(mp, ag);
3204 if (!pag->pagf_init) {
3205 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
3209 if (!pag->pagf_init) {
3215 longest = xfs_alloc_longest_free_extent(pag,
3216 xfs_alloc_min_freelist(mp, pag),
3217 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
3218 if (*blen < longest)
3227 xfs_bmap_select_minlen(
3228 struct xfs_bmalloca *ap,
3229 struct xfs_alloc_arg *args,
3233 if (notinit || *blen < ap->minlen) {
3235 * Since we did a BUF_TRYLOCK above, it is possible that
3236 * there is space for this request.
3238 args->minlen = ap->minlen;
3239 } else if (*blen < args->maxlen) {
3241 * If the best seen length is less than the request length,
3242 * use the best as the minimum.
3244 args->minlen = *blen;
3247 * Otherwise we've seen an extent as big as maxlen, use that
3250 args->minlen = args->maxlen;
3255 xfs_bmap_btalloc_nullfb(
3256 struct xfs_bmalloca *ap,
3257 struct xfs_alloc_arg *args,
3260 struct xfs_mount *mp = ap->ip->i_mount;
3261 xfs_agnumber_t ag, startag;
3265 args->type = XFS_ALLOCTYPE_START_BNO;
3266 args->total = ap->total;
3268 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3269 if (startag == NULLAGNUMBER)
3272 while (*blen < args->maxlen) {
3273 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3278 if (++ag == mp->m_sb.sb_agcount)
3284 xfs_bmap_select_minlen(ap, args, blen, notinit);
3289 xfs_bmap_btalloc_filestreams(
3290 struct xfs_bmalloca *ap,
3291 struct xfs_alloc_arg *args,
3294 struct xfs_mount *mp = ap->ip->i_mount;
3299 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3300 args->total = ap->total;
3302 ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3303 if (ag == NULLAGNUMBER)
3306 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, ¬init);
3310 if (*blen < args->maxlen) {
3311 error = xfs_filestream_new_ag(ap, &ag);
3315 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3322 xfs_bmap_select_minlen(ap, args, blen, notinit);
3325 * Set the failure fallback case to look in the selected AG as stream
3328 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
3332 /* Update all inode and quota accounting for the allocation we just did. */
3334 xfs_bmap_btalloc_accounting(
3335 struct xfs_bmalloca *ap,
3336 struct xfs_alloc_arg *args)
3338 if (ap->flags & XFS_BMAPI_COWFORK) {
3340 * COW fork blocks are in-core only and thus are treated as
3341 * in-core quota reservation (like delalloc blocks) even when
3342 * converted to real blocks. The quota reservation is not
3343 * accounted to disk until blocks are remapped to the data
3344 * fork. So if these blocks were previously delalloc, we
3345 * already have quota reservation and there's nothing to do
3349 xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len);
3354 * Otherwise, we've allocated blocks in a hole. The transaction
3355 * has acquired in-core quota reservation for this extent.
3356 * Rather than account these as real blocks, however, we reduce
3357 * the transaction quota reservation based on the allocation.
3358 * This essentially transfers the transaction quota reservation
3359 * to that of a delalloc extent.
3361 ap->ip->i_delayed_blks += args->len;
3362 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, XFS_TRANS_DQ_RES_BLKS,
3367 /* data/attr fork only */
3368 ap->ip->i_d.di_nblocks += args->len;
3369 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3371 ap->ip->i_delayed_blks -= args->len;
3372 xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len);
3374 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
3375 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : XFS_TRANS_DQ_BCOUNT,
3381 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3383 xfs_mount_t *mp; /* mount point structure */
3384 xfs_alloctype_t atype = 0; /* type for allocation routines */
3385 xfs_extlen_t align = 0; /* minimum allocation alignment */
3386 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3388 xfs_alloc_arg_t args;
3389 xfs_fileoff_t orig_offset;
3390 xfs_extlen_t orig_length;
3392 xfs_extlen_t nextminlen = 0;
3393 int nullfb; /* true if ap->firstblock isn't set */
3400 orig_offset = ap->offset;
3401 orig_length = ap->length;
3403 mp = ap->ip->i_mount;
3405 /* stripe alignment for allocation is determined by mount parameters */
3407 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
3408 stripe_align = mp->m_swidth;
3409 else if (mp->m_dalign)
3410 stripe_align = mp->m_dalign;
3412 if (ap->flags & XFS_BMAPI_COWFORK)
3413 align = xfs_get_cowextsz_hint(ap->ip);
3414 else if (xfs_alloc_is_userdata(ap->datatype))
3415 align = xfs_get_extsz_hint(ap->ip);
3417 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
3418 align, 0, ap->eof, 0, ap->conv,
3419 &ap->offset, &ap->length);
3425 nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
3426 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp,
3427 ap->tp->t_firstblock);
3429 if (xfs_alloc_is_userdata(ap->datatype) &&
3430 xfs_inode_is_filestream(ap->ip)) {
3431 ag = xfs_filestream_lookup_ag(ap->ip);
3432 ag = (ag != NULLAGNUMBER) ? ag : 0;
3433 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0);
3435 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
3438 ap->blkno = ap->tp->t_firstblock;
3440 xfs_bmap_adjacent(ap);
3443 * If allowed, use ap->blkno; otherwise must use firstblock since
3444 * it's in the right allocation group.
3446 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
3449 ap->blkno = ap->tp->t_firstblock;
3451 * Normal allocation, done through xfs_alloc_vextent.
3453 tryagain = isaligned = 0;
3454 memset(&args, 0, sizeof(args));
3457 args.fsbno = ap->blkno;
3458 args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
3460 /* Trim the allocation back to the maximum an AG can fit. */
3461 args.maxlen = min(ap->length, mp->m_ag_max_usable);
3465 * Search for an allocation group with a single extent large
3466 * enough for the request. If one isn't found, then adjust
3467 * the minimum allocation size to the largest space found.
3469 if (xfs_alloc_is_userdata(ap->datatype) &&
3470 xfs_inode_is_filestream(ap->ip))
3471 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);
3473 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
3476 } else if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
3477 if (xfs_inode_is_filestream(ap->ip))
3478 args.type = XFS_ALLOCTYPE_FIRST_AG;
3480 args.type = XFS_ALLOCTYPE_START_BNO;
3481 args.total = args.minlen = ap->minlen;
3483 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3484 args.total = ap->total;
3485 args.minlen = ap->minlen;
3487 /* apply extent size hints if obtained earlier */
3490 div_u64_rem(ap->offset, args.prod, &args.mod);
3492 args.mod = args.prod - args.mod;
3493 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3497 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3498 div_u64_rem(ap->offset, args.prod, &args.mod);
3500 args.mod = args.prod - args.mod;
3503 * If we are not low on available data blocks, and the
3504 * underlying logical volume manager is a stripe, and
3505 * the file offset is zero then try to allocate data
3506 * blocks on stripe unit boundary.
3507 * NOTE: ap->aeof is only set if the allocation length
3508 * is >= the stripe unit and the allocation offset is
3509 * at the end of file.
3511 if (!(ap->tp->t_flags & XFS_TRANS_LOWMODE) && ap->aeof) {
3513 args.alignment = stripe_align;
3517 * Adjust for alignment
3519 if (blen > args.alignment && blen <= args.maxlen)
3520 args.minlen = blen - args.alignment;
3521 args.minalignslop = 0;
3524 * First try an exact bno allocation.
3525 * If it fails then do a near or start bno
3526 * allocation with alignment turned on.
3530 args.type = XFS_ALLOCTYPE_THIS_BNO;
3533 * Compute the minlen+alignment for the
3534 * next case. Set slop so that the value
3535 * of minlen+alignment+slop doesn't go up
3536 * between the calls.
3538 if (blen > stripe_align && blen <= args.maxlen)
3539 nextminlen = blen - stripe_align;
3541 nextminlen = args.minlen;
3542 if (nextminlen + stripe_align > args.minlen + 1)
3544 nextminlen + stripe_align -
3547 args.minalignslop = 0;
3551 args.minalignslop = 0;
3553 args.minleft = ap->minleft;
3554 args.wasdel = ap->wasdel;
3555 args.resv = XFS_AG_RESV_NONE;
3556 args.datatype = ap->datatype;
3557 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO)
3560 error = xfs_alloc_vextent(&args);
3564 if (tryagain && args.fsbno == NULLFSBLOCK) {
3566 * Exact allocation failed. Now try with alignment
3570 args.fsbno = ap->blkno;
3571 args.alignment = stripe_align;
3572 args.minlen = nextminlen;
3573 args.minalignslop = 0;
3575 if ((error = xfs_alloc_vextent(&args)))
3578 if (isaligned && args.fsbno == NULLFSBLOCK) {
3580 * allocation failed, so turn off alignment and
3584 args.fsbno = ap->blkno;
3586 if ((error = xfs_alloc_vextent(&args)))
3589 if (args.fsbno == NULLFSBLOCK && nullfb &&
3590 args.minlen > ap->minlen) {
3591 args.minlen = ap->minlen;
3592 args.type = XFS_ALLOCTYPE_START_BNO;
3593 args.fsbno = ap->blkno;
3594 if ((error = xfs_alloc_vextent(&args)))
3597 if (args.fsbno == NULLFSBLOCK && nullfb) {
3599 args.type = XFS_ALLOCTYPE_FIRST_AG;
3600 args.total = ap->minlen;
3601 if ((error = xfs_alloc_vextent(&args)))
3603 ap->tp->t_flags |= XFS_TRANS_LOWMODE;
3605 if (args.fsbno != NULLFSBLOCK) {
3607 * check the allocation happened at the same or higher AG than
3608 * the first block that was allocated.
3610 ASSERT(ap->tp->t_firstblock == NULLFSBLOCK ||
3611 XFS_FSB_TO_AGNO(mp, ap->tp->t_firstblock) <=
3612 XFS_FSB_TO_AGNO(mp, args.fsbno));
3614 ap->blkno = args.fsbno;
3615 if (ap->tp->t_firstblock == NULLFSBLOCK)
3616 ap->tp->t_firstblock = args.fsbno;
3617 ASSERT(nullfb || fb_agno <= args.agno);
3618 ap->length = args.len;
3620 * If the extent size hint is active, we tried to round the
3621 * caller's allocation request offset down to extsz and the
3622 * length up to another extsz boundary. If we found a free
3623 * extent we mapped it in starting at this new offset. If the
3624 * newly mapped space isn't long enough to cover any of the
3625 * range of offsets that was originally requested, move the
3626 * mapping up so that we can fill as much of the caller's
3627 * original request as possible. Free space is apparently
3628 * very fragmented so we're unlikely to be able to satisfy the
3631 if (ap->length <= orig_length)
3632 ap->offset = orig_offset;
3633 else if (ap->offset + ap->length < orig_offset + orig_length)
3634 ap->offset = orig_offset + orig_length - ap->length;
3635 xfs_bmap_btalloc_accounting(ap, &args);
3637 ap->blkno = NULLFSBLOCK;
3644 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
3645 * It figures out where to ask the underlying allocator to put the new extent.
3649 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3651 if (XFS_IS_REALTIME_INODE(ap->ip) &&
3652 xfs_alloc_is_userdata(ap->datatype))
3653 return xfs_bmap_rtalloc(ap);
3654 return xfs_bmap_btalloc(ap);
3657 /* Trim extent to fit a logical block range. */
3660 struct xfs_bmbt_irec *irec,
3664 xfs_fileoff_t distance;
3665 xfs_fileoff_t end = bno + len;
3667 if (irec->br_startoff + irec->br_blockcount <= bno ||
3668 irec->br_startoff >= end) {
3669 irec->br_blockcount = 0;
3673 if (irec->br_startoff < bno) {
3674 distance = bno - irec->br_startoff;
3675 if (isnullstartblock(irec->br_startblock))
3676 irec->br_startblock = DELAYSTARTBLOCK;
3677 if (irec->br_startblock != DELAYSTARTBLOCK &&
3678 irec->br_startblock != HOLESTARTBLOCK)
3679 irec->br_startblock += distance;
3680 irec->br_startoff += distance;
3681 irec->br_blockcount -= distance;
3684 if (end < irec->br_startoff + irec->br_blockcount) {
3685 distance = irec->br_startoff + irec->br_blockcount - end;
3686 irec->br_blockcount -= distance;
3691 * Trim the returned map to the required bounds
3695 struct xfs_bmbt_irec *mval,
3696 struct xfs_bmbt_irec *got,
3704 if ((flags & XFS_BMAPI_ENTIRE) ||
3705 got->br_startoff + got->br_blockcount <= obno) {
3707 if (isnullstartblock(got->br_startblock))
3708 mval->br_startblock = DELAYSTARTBLOCK;
3714 ASSERT((*bno >= obno) || (n == 0));
3716 mval->br_startoff = *bno;
3717 if (isnullstartblock(got->br_startblock))
3718 mval->br_startblock = DELAYSTARTBLOCK;
3720 mval->br_startblock = got->br_startblock +
3721 (*bno - got->br_startoff);
3723 * Return the minimum of what we got and what we asked for for
3724 * the length. We can use the len variable here because it is
3725 * modified below and we could have been there before coming
3726 * here if the first part of the allocation didn't overlap what
3729 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
3730 got->br_blockcount - (*bno - got->br_startoff));
3731 mval->br_state = got->br_state;
3732 ASSERT(mval->br_blockcount <= len);
3737 * Update and validate the extent map to return
3740 xfs_bmapi_update_map(
3741 struct xfs_bmbt_irec **map,
3749 xfs_bmbt_irec_t *mval = *map;
3751 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
3752 ((mval->br_startoff + mval->br_blockcount) <= end));
3753 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
3754 (mval->br_startoff < obno));
3756 *bno = mval->br_startoff + mval->br_blockcount;
3758 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
3759 /* update previous map with new information */
3760 ASSERT(mval->br_startblock == mval[-1].br_startblock);
3761 ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
3762 ASSERT(mval->br_state == mval[-1].br_state);
3763 mval[-1].br_blockcount = mval->br_blockcount;
3764 mval[-1].br_state = mval->br_state;
3765 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
3766 mval[-1].br_startblock != DELAYSTARTBLOCK &&
3767 mval[-1].br_startblock != HOLESTARTBLOCK &&
3768 mval->br_startblock == mval[-1].br_startblock +
3769 mval[-1].br_blockcount &&
3770 mval[-1].br_state == mval->br_state) {
3771 ASSERT(mval->br_startoff ==
3772 mval[-1].br_startoff + mval[-1].br_blockcount);
3773 mval[-1].br_blockcount += mval->br_blockcount;
3774 } else if (*n > 0 &&
3775 mval->br_startblock == DELAYSTARTBLOCK &&
3776 mval[-1].br_startblock == DELAYSTARTBLOCK &&
3777 mval->br_startoff ==
3778 mval[-1].br_startoff + mval[-1].br_blockcount) {
3779 mval[-1].br_blockcount += mval->br_blockcount;
3780 mval[-1].br_state = mval->br_state;
3781 } else if (!((*n == 0) &&
3782 ((mval->br_startoff + mval->br_blockcount) <=
3791 * Map file blocks to filesystem blocks without allocation.
3795 struct xfs_inode *ip,
3798 struct xfs_bmbt_irec *mval,
3802 struct xfs_mount *mp = ip->i_mount;
3803 struct xfs_ifork *ifp;
3804 struct xfs_bmbt_irec got;
3807 struct xfs_iext_cursor icur;
3811 int whichfork = xfs_bmapi_whichfork(flags);
3814 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
3815 XFS_BMAPI_COWFORK)));
3816 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
3818 if (unlikely(XFS_TEST_ERROR(
3819 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
3820 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
3821 mp, XFS_ERRTAG_BMAPIFORMAT))) {
3822 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp);
3823 return -EFSCORRUPTED;
3826 if (XFS_FORCED_SHUTDOWN(mp))
3829 XFS_STATS_INC(mp, xs_blk_mapr);
3831 ifp = XFS_IFORK_PTR(ip, whichfork);
3833 /* No CoW fork? Return a hole. */
3834 if (whichfork == XFS_COW_FORK) {
3835 mval->br_startoff = bno;
3836 mval->br_startblock = HOLESTARTBLOCK;
3837 mval->br_blockcount = len;
3838 mval->br_state = XFS_EXT_NORM;
3844 * A missing attr ifork implies that the inode says we're in
3845 * extents or btree format but failed to pass the inode fork
3846 * verifier while trying to load it. Treat that as a file
3850 xfs_alert(mp, "%s: inode %llu missing fork %d",
3851 __func__, ip->i_ino, whichfork);
3853 return -EFSCORRUPTED;
3856 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
3857 error = xfs_iread_extents(NULL, ip, whichfork);
3862 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got))
3867 while (bno < end && n < *nmap) {
3868 /* Reading past eof, act as though there's a hole up to end. */
3870 got.br_startoff = end;
3871 if (got.br_startoff > bno) {
3872 /* Reading in a hole. */
3873 mval->br_startoff = bno;
3874 mval->br_startblock = HOLESTARTBLOCK;
3875 mval->br_blockcount =
3876 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
3877 mval->br_state = XFS_EXT_NORM;
3878 bno += mval->br_blockcount;
3879 len -= mval->br_blockcount;
3885 /* set up the extent map to return. */
3886 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
3887 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
3889 /* If we're done, stop now. */
3890 if (bno >= end || n >= *nmap)
3893 /* Else go on to the next record. */
3894 if (!xfs_iext_next_extent(ifp, &icur, &got))
3902 * Add a delayed allocation extent to an inode. Blocks are reserved from the
3903 * global pool and the extent inserted into the inode in-core extent tree.
3905 * On entry, got refers to the first extent beyond the offset of the extent to
3906 * allocate or eof is specified if no such extent exists. On return, got refers
3907 * to the extent record that was inserted to the inode fork.
3909 * Note that the allocated extent may have been merged with contiguous extents
3910 * during insertion into the inode fork. Thus, got does not reflect the current
3911 * state of the inode fork on return. If necessary, the caller can use lastx to
3912 * look up the updated record in the inode fork.
3915 xfs_bmapi_reserve_delalloc(
3916 struct xfs_inode *ip,
3920 xfs_filblks_t prealloc,
3921 struct xfs_bmbt_irec *got,
3922 struct xfs_iext_cursor *icur,
3925 struct xfs_mount *mp = ip->i_mount;
3926 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
3928 xfs_extlen_t indlen;
3930 xfs_fileoff_t aoff = off;
3933 * Cap the alloc length. Keep track of prealloc so we know whether to
3934 * tag the inode before we return.
3936 alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN);
3938 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
3939 if (prealloc && alen >= len)
3940 prealloc = alen - len;
3942 /* Figure out the extent size, adjust alen */
3943 if (whichfork == XFS_COW_FORK) {
3944 struct xfs_bmbt_irec prev;
3945 xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip);
3947 if (!xfs_iext_peek_prev_extent(ifp, icur, &prev))
3948 prev.br_startoff = NULLFILEOFF;
3950 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof,
3951 1, 0, &aoff, &alen);
3956 * Make a transaction-less quota reservation for delayed allocation
3957 * blocks. This number gets adjusted later. We return if we haven't
3958 * allocated blocks already inside this loop.
3960 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0,
3961 XFS_QMOPT_RES_REGBLKS);
3966 * Split changing sb for alen and indlen since they could be coming
3967 * from different places.
3969 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
3972 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
3974 goto out_unreserve_quota;
3976 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
3978 goto out_unreserve_blocks;
3981 ip->i_delayed_blks += alen;
3982 xfs_mod_delalloc(ip->i_mount, alen + indlen);
3984 got->br_startoff = aoff;
3985 got->br_startblock = nullstartblock(indlen);
3986 got->br_blockcount = alen;
3987 got->br_state = XFS_EXT_NORM;
3989 xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got);
3992 * Tag the inode if blocks were preallocated. Note that COW fork
3993 * preallocation can occur at the start or end of the extent, even when
3994 * prealloc == 0, so we must also check the aligned offset and length.
3996 if (whichfork == XFS_DATA_FORK && prealloc)
3997 xfs_inode_set_eofblocks_tag(ip);
3998 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
3999 xfs_inode_set_cowblocks_tag(ip);
4003 out_unreserve_blocks:
4004 xfs_mod_fdblocks(mp, alen, false);
4005 out_unreserve_quota:
4006 if (XFS_IS_QUOTA_ON(mp))
4007 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0,
4008 XFS_QMOPT_RES_REGBLKS);
4014 struct xfs_bmalloca *bma)
4016 struct xfs_mount *mp = bma->ip->i_mount;
4017 int whichfork = xfs_bmapi_whichfork(bma->flags);
4018 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4019 int tmp_logflags = 0;
4022 ASSERT(bma->length > 0);
4025 * For the wasdelay case, we could also just allocate the stuff asked
4026 * for in this bmap call but that wouldn't be as good.
4029 bma->length = (xfs_extlen_t)bma->got.br_blockcount;
4030 bma->offset = bma->got.br_startoff;
4031 xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev);
4033 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
4035 bma->length = XFS_FILBLKS_MIN(bma->length,
4036 bma->got.br_startoff - bma->offset);
4040 * Set the data type being allocated. For the data fork, the first data
4041 * in the file is treated differently to all other allocations. For the
4042 * attribute fork, we only need to ensure the allocated range is not on
4045 if (!(bma->flags & XFS_BMAPI_METADATA)) {
4046 bma->datatype = XFS_ALLOC_NOBUSY;
4047 if (whichfork == XFS_DATA_FORK) {
4048 if (bma->offset == 0)
4049 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
4051 bma->datatype |= XFS_ALLOC_USERDATA;
4053 if (bma->flags & XFS_BMAPI_ZERO)
4054 bma->datatype |= XFS_ALLOC_USERDATA_ZERO;
4057 bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
4060 * Only want to do the alignment at the eof if it is userdata and
4061 * allocation length is larger than a stripe unit.
4063 if (mp->m_dalign && bma->length >= mp->m_dalign &&
4064 !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) {
4065 error = xfs_bmap_isaeof(bma, whichfork);
4070 error = xfs_bmap_alloc(bma);
4074 if (bma->blkno == NULLFSBLOCK)
4076 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur)
4077 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4079 * Bump the number of extents we've allocated
4085 bma->cur->bc_private.b.flags =
4086 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
4088 bma->got.br_startoff = bma->offset;
4089 bma->got.br_startblock = bma->blkno;
4090 bma->got.br_blockcount = bma->length;
4091 bma->got.br_state = XFS_EXT_NORM;
4094 * In the data fork, a wasdelay extent has been initialized, so
4095 * shouldn't be flagged as unwritten.
4097 * For the cow fork, however, we convert delalloc reservations
4098 * (extents allocated for speculative preallocation) to
4099 * allocated unwritten extents, and only convert the unwritten
4100 * extents to real extents when we're about to write the data.
4102 if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) &&
4103 (bma->flags & XFS_BMAPI_PREALLOC))
4104 bma->got.br_state = XFS_EXT_UNWRITTEN;
4107 error = xfs_bmap_add_extent_delay_real(bma, whichfork);
4109 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
4110 whichfork, &bma->icur, &bma->cur, &bma->got,
4111 &bma->logflags, bma->flags);
4113 bma->logflags |= tmp_logflags;
4118 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4119 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4120 * the neighbouring ones.
4122 xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4124 ASSERT(bma->got.br_startoff <= bma->offset);
4125 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4126 bma->offset + bma->length);
4127 ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4128 bma->got.br_state == XFS_EXT_UNWRITTEN);
4133 xfs_bmapi_convert_unwritten(
4134 struct xfs_bmalloca *bma,
4135 struct xfs_bmbt_irec *mval,
4139 int whichfork = xfs_bmapi_whichfork(flags);
4140 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4141 int tmp_logflags = 0;
4144 /* check if we need to do unwritten->real conversion */
4145 if (mval->br_state == XFS_EXT_UNWRITTEN &&
4146 (flags & XFS_BMAPI_PREALLOC))
4149 /* check if we need to do real->unwritten conversion */
4150 if (mval->br_state == XFS_EXT_NORM &&
4151 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4152 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4156 * Modify (by adding) the state flag, if writing.
4158 ASSERT(mval->br_blockcount <= len);
4159 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4160 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4161 bma->ip, whichfork);
4163 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4164 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4167 * Before insertion into the bmbt, zero the range being converted
4170 if (flags & XFS_BMAPI_ZERO) {
4171 error = xfs_zero_extent(bma->ip, mval->br_startblock,
4172 mval->br_blockcount);
4177 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
4178 &bma->icur, &bma->cur, mval, &tmp_logflags);
4180 * Log the inode core unconditionally in the unwritten extent conversion
4181 * path because the conversion might not have done so (e.g., if the
4182 * extent count hasn't changed). We need to make sure the inode is dirty
4183 * in the transaction for the sake of fsync(), even if nothing has
4184 * changed, because fsync() will not force the log for this transaction
4185 * unless it sees the inode pinned.
4187 * Note: If we're only converting cow fork extents, there aren't
4188 * any on-disk updates to make, so we don't need to log anything.
4190 if (whichfork != XFS_COW_FORK)
4191 bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4196 * Update our extent pointer, given that
4197 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4198 * of the neighbouring ones.
4200 xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4203 * We may have combined previously unwritten space with written space,
4204 * so generate another request.
4206 if (mval->br_blockcount < len)
4211 static inline xfs_extlen_t
4213 struct xfs_trans *tp,
4214 struct xfs_inode *ip,
4217 if (tp && tp->t_firstblock != NULLFSBLOCK)
4219 if (XFS_IFORK_FORMAT(ip, fork) != XFS_DINODE_FMT_BTREE)
4221 return be16_to_cpu(XFS_IFORK_PTR(ip, fork)->if_broot->bb_level) + 1;
4225 * Log whatever the flags say, even if error. Otherwise we might miss detecting
4226 * a case where the data is changed, there's an error, and it's not logged so we
4227 * don't shutdown when we should. Don't bother logging extents/btree changes if
4228 * we converted to the other format.
4232 struct xfs_bmalloca *bma,
4236 if ((bma->logflags & xfs_ilog_fext(whichfork)) &&
4237 XFS_IFORK_FORMAT(bma->ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
4238 bma->logflags &= ~xfs_ilog_fext(whichfork);
4239 else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) &&
4240 XFS_IFORK_FORMAT(bma->ip, whichfork) != XFS_DINODE_FMT_BTREE)
4241 bma->logflags &= ~xfs_ilog_fbroot(whichfork);
4244 xfs_trans_log_inode(bma->tp, bma->ip, bma->logflags);
4246 xfs_btree_del_cursor(bma->cur, error);
4250 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4251 * extent state if necessary. Details behaviour is controlled by the flags
4252 * parameter. Only allocates blocks from a single allocation group, to avoid
4257 struct xfs_trans *tp, /* transaction pointer */
4258 struct xfs_inode *ip, /* incore inode */
4259 xfs_fileoff_t bno, /* starting file offs. mapped */
4260 xfs_filblks_t len, /* length to map in file */
4261 int flags, /* XFS_BMAPI_... */
4262 xfs_extlen_t total, /* total blocks needed */
4263 struct xfs_bmbt_irec *mval, /* output: map values */
4264 int *nmap) /* i/o: mval size/count */
4266 struct xfs_bmalloca bma = {
4271 struct xfs_mount *mp = ip->i_mount;
4272 struct xfs_ifork *ifp;
4273 xfs_fileoff_t end; /* end of mapped file region */
4274 bool eof = false; /* after the end of extents */
4275 int error; /* error return */
4276 int n; /* current extent index */
4277 xfs_fileoff_t obno; /* old block number (offset) */
4278 int whichfork; /* data or attr fork */
4281 xfs_fileoff_t orig_bno; /* original block number value */
4282 int orig_flags; /* original flags arg value */
4283 xfs_filblks_t orig_len; /* original value of len arg */
4284 struct xfs_bmbt_irec *orig_mval; /* original value of mval */
4285 int orig_nmap; /* original value of *nmap */
4293 whichfork = xfs_bmapi_whichfork(flags);
4296 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4299 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
4300 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4301 ASSERT(!(flags & XFS_BMAPI_REMAP));
4303 /* zeroing is for currently only for data extents, not metadata */
4304 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4305 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4307 * we can allocate unwritten extents or pre-zero allocated blocks,
4308 * but it makes no sense to do both at once. This would result in
4309 * zeroing the unwritten extent twice, but it still being an
4310 * unwritten extent....
4312 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4313 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4315 if (unlikely(XFS_TEST_ERROR(
4316 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4317 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4318 mp, XFS_ERRTAG_BMAPIFORMAT))) {
4319 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp);
4320 return -EFSCORRUPTED;
4323 if (XFS_FORCED_SHUTDOWN(mp))
4326 ifp = XFS_IFORK_PTR(ip, whichfork);
4328 XFS_STATS_INC(mp, xs_blk_mapw);
4330 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4331 error = xfs_iread_extents(tp, ip, whichfork);
4336 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got))
4338 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4339 bma.prev.br_startoff = NULLFILEOFF;
4340 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4345 while (bno < end && n < *nmap) {
4346 bool need_alloc = false, wasdelay = false;
4348 /* in hole or beyond EOF? */
4349 if (eof || bma.got.br_startoff > bno) {
4351 * CoW fork conversions should /never/ hit EOF or
4352 * holes. There should always be something for us
4355 ASSERT(!((flags & XFS_BMAPI_CONVERT) &&
4356 (flags & XFS_BMAPI_COWFORK)));
4359 } else if (isnullstartblock(bma.got.br_startblock)) {
4364 * First, deal with the hole before the allocated space
4365 * that we found, if any.
4367 if (need_alloc || wasdelay) {
4369 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4370 bma.wasdel = wasdelay;
4375 * There's a 32/64 bit type mismatch between the
4376 * allocation length request (which can be 64 bits in
4377 * length) and the bma length request, which is
4378 * xfs_extlen_t and therefore 32 bits. Hence we have to
4379 * check for 32-bit overflows and handle them here.
4381 if (len > (xfs_filblks_t)MAXEXTLEN)
4382 bma.length = MAXEXTLEN;
4387 ASSERT(bma.length > 0);
4388 error = xfs_bmapi_allocate(&bma);
4391 if (bma.blkno == NULLFSBLOCK)
4395 * If this is a CoW allocation, record the data in
4396 * the refcount btree for orphan recovery.
4398 if (whichfork == XFS_COW_FORK)
4399 xfs_refcount_alloc_cow_extent(tp, bma.blkno,
4403 /* Deal with the allocated space we found. */
4404 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4407 /* Execute unwritten extent conversion if necessary */
4408 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4409 if (error == -EAGAIN)
4414 /* update the extent map to return */
4415 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4418 * If we're done, stop now. Stop when we've allocated
4419 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4420 * the transaction may get too big.
4422 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4425 /* Else go on to the next record. */
4427 if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got))
4432 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4437 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
4438 XFS_IFORK_NEXTENTS(ip, whichfork) >
4439 XFS_IFORK_MAXEXT(ip, whichfork));
4440 xfs_bmapi_finish(&bma, whichfork, 0);
4441 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4445 xfs_bmapi_finish(&bma, whichfork, error);
4450 * Convert an existing delalloc extent to real blocks based on file offset. This
4451 * attempts to allocate the entire delalloc extent and may require multiple
4452 * invocations to allocate the target offset if a large enough physical extent
4456 xfs_bmapi_convert_delalloc(
4457 struct xfs_inode *ip,
4459 xfs_fileoff_t offset_fsb,
4460 struct xfs_bmbt_irec *imap,
4463 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4464 struct xfs_mount *mp = ip->i_mount;
4465 struct xfs_bmalloca bma = { NULL };
4466 struct xfs_trans *tp;
4470 * Space for the extent and indirect blocks was reserved when the
4471 * delalloc extent was created so there's no need to do so here.
4473 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0,
4474 XFS_TRANS_RESERVE, &tp);
4478 xfs_ilock(ip, XFS_ILOCK_EXCL);
4479 xfs_trans_ijoin(tp, ip, 0);
4481 if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) ||
4482 bma.got.br_startoff > offset_fsb) {
4484 * No extent found in the range we are trying to convert. This
4485 * should only happen for the COW fork, where another thread
4486 * might have moved the extent to the data fork in the meantime.
4488 WARN_ON_ONCE(whichfork != XFS_COW_FORK);
4490 goto out_trans_cancel;
4494 * If we find a real extent here we raced with another thread converting
4495 * the extent. Just return the real extent at this offset.
4497 if (!isnullstartblock(bma.got.br_startblock)) {
4499 *seq = READ_ONCE(ifp->if_seq);
4500 goto out_trans_cancel;
4506 bma.offset = bma.got.br_startoff;
4507 bma.length = max_t(xfs_filblks_t, bma.got.br_blockcount, MAXEXTLEN);
4508 bma.total = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK);
4509 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4510 if (whichfork == XFS_COW_FORK)
4511 bma.flags = XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC;
4513 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4514 bma.prev.br_startoff = NULLFILEOFF;
4516 error = xfs_bmapi_allocate(&bma);
4521 if (WARN_ON_ONCE(bma.blkno == NULLFSBLOCK))
4523 error = -EFSCORRUPTED;
4524 if (WARN_ON_ONCE(!xfs_valid_startblock(ip, bma.got.br_startblock)))
4527 XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length));
4528 XFS_STATS_INC(mp, xs_xstrat_quick);
4530 ASSERT(!isnullstartblock(bma.got.br_startblock));
4532 *seq = READ_ONCE(ifp->if_seq);
4534 if (whichfork == XFS_COW_FORK)
4535 xfs_refcount_alloc_cow_extent(tp, bma.blkno, bma.length);
4537 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4542 xfs_bmapi_finish(&bma, whichfork, 0);
4543 error = xfs_trans_commit(tp);
4544 xfs_iunlock(ip, XFS_ILOCK_EXCL);
4548 xfs_bmapi_finish(&bma, whichfork, error);
4550 xfs_trans_cancel(tp);
4551 xfs_iunlock(ip, XFS_ILOCK_EXCL);
4557 struct xfs_trans *tp,
4558 struct xfs_inode *ip,
4561 xfs_fsblock_t startblock,
4564 struct xfs_mount *mp = ip->i_mount;
4565 struct xfs_ifork *ifp;
4566 struct xfs_btree_cur *cur = NULL;
4567 struct xfs_bmbt_irec got;
4568 struct xfs_iext_cursor icur;
4569 int whichfork = xfs_bmapi_whichfork(flags);
4570 int logflags = 0, error;
4572 ifp = XFS_IFORK_PTR(ip, whichfork);
4574 ASSERT(len <= (xfs_filblks_t)MAXEXTLEN);
4575 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4576 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC |
4577 XFS_BMAPI_NORMAP)));
4578 ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) !=
4579 (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC));
4581 if (unlikely(XFS_TEST_ERROR(
4582 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4583 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4584 mp, XFS_ERRTAG_BMAPIFORMAT))) {
4585 XFS_ERROR_REPORT("xfs_bmapi_remap", XFS_ERRLEVEL_LOW, mp);
4586 return -EFSCORRUPTED;
4589 if (XFS_FORCED_SHUTDOWN(mp))
4592 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4593 error = xfs_iread_extents(tp, ip, whichfork);
4598 if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
4599 /* make sure we only reflink into a hole. */
4600 ASSERT(got.br_startoff > bno);
4601 ASSERT(got.br_startoff - bno >= len);
4604 ip->i_d.di_nblocks += len;
4605 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
4607 if (ifp->if_flags & XFS_IFBROOT) {
4608 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
4609 cur->bc_private.b.flags = 0;
4612 got.br_startoff = bno;
4613 got.br_startblock = startblock;
4614 got.br_blockcount = len;
4615 if (flags & XFS_BMAPI_PREALLOC)
4616 got.br_state = XFS_EXT_UNWRITTEN;
4618 got.br_state = XFS_EXT_NORM;
4620 error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur,
4621 &cur, &got, &logflags, flags);
4625 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork);
4628 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS)
4629 logflags &= ~XFS_ILOG_DEXT;
4630 else if (ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
4631 logflags &= ~XFS_ILOG_DBROOT;
4634 xfs_trans_log_inode(tp, ip, logflags);
4636 xfs_btree_del_cursor(cur, error);
4641 * When a delalloc extent is split (e.g., due to a hole punch), the original
4642 * indlen reservation must be shared across the two new extents that are left
4645 * Given the original reservation and the worst case indlen for the two new
4646 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4647 * reservation fairly across the two new extents. If necessary, steal available
4648 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4649 * ores == 1). The number of stolen blocks is returned. The availability and
4650 * subsequent accounting of stolen blocks is the responsibility of the caller.
4652 static xfs_filblks_t
4653 xfs_bmap_split_indlen(
4654 xfs_filblks_t ores, /* original res. */
4655 xfs_filblks_t *indlen1, /* ext1 worst indlen */
4656 xfs_filblks_t *indlen2, /* ext2 worst indlen */
4657 xfs_filblks_t avail) /* stealable blocks */
4659 xfs_filblks_t len1 = *indlen1;
4660 xfs_filblks_t len2 = *indlen2;
4661 xfs_filblks_t nres = len1 + len2; /* new total res. */
4662 xfs_filblks_t stolen = 0;
4663 xfs_filblks_t resfactor;
4666 * Steal as many blocks as we can to try and satisfy the worst case
4667 * indlen for both new extents.
4669 if (ores < nres && avail)
4670 stolen = XFS_FILBLKS_MIN(nres - ores, avail);
4673 /* nothing else to do if we've satisfied the new reservation */
4678 * We can't meet the total required reservation for the two extents.
4679 * Calculate the percent of the overall shortage between both extents
4680 * and apply this percentage to each of the requested indlen values.
4681 * This distributes the shortage fairly and reduces the chances that one
4682 * of the two extents is left with nothing when extents are repeatedly
4685 resfactor = (ores * 100);
4686 do_div(resfactor, nres);
4691 ASSERT(len1 + len2 <= ores);
4692 ASSERT(len1 < *indlen1 && len2 < *indlen2);
4695 * Hand out the remainder to each extent. If one of the two reservations
4696 * is zero, we want to make sure that one gets a block first. The loop
4697 * below starts with len1, so hand len2 a block right off the bat if it
4700 ores -= (len1 + len2);
4701 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
4702 if (ores && !len2 && *indlen2) {
4707 if (len1 < *indlen1) {
4713 if (len2 < *indlen2) {
4726 xfs_bmap_del_extent_delay(
4727 struct xfs_inode *ip,
4729 struct xfs_iext_cursor *icur,
4730 struct xfs_bmbt_irec *got,
4731 struct xfs_bmbt_irec *del)
4733 struct xfs_mount *mp = ip->i_mount;
4734 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4735 struct xfs_bmbt_irec new;
4736 int64_t da_old, da_new, da_diff = 0;
4737 xfs_fileoff_t del_endoff, got_endoff;
4738 xfs_filblks_t got_indlen, new_indlen, stolen;
4739 int state = xfs_bmap_fork_to_state(whichfork);
4743 XFS_STATS_INC(mp, xs_del_exlist);
4745 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
4746 del_endoff = del->br_startoff + del->br_blockcount;
4747 got_endoff = got->br_startoff + got->br_blockcount;
4748 da_old = startblockval(got->br_startblock);
4751 ASSERT(del->br_blockcount > 0);
4752 ASSERT(got->br_startoff <= del->br_startoff);
4753 ASSERT(got_endoff >= del_endoff);
4756 uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);
4758 do_div(rtexts, mp->m_sb.sb_rextsize);
4759 xfs_mod_frextents(mp, rtexts);
4763 * Update the inode delalloc counter now and wait to update the
4764 * sb counters as we might have to borrow some blocks for the
4765 * indirect block accounting.
4767 error = xfs_trans_reserve_quota_nblks(NULL, ip,
4768 -((long)del->br_blockcount), 0,
4769 isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4772 ip->i_delayed_blks -= del->br_blockcount;
4774 if (got->br_startoff == del->br_startoff)
4775 state |= BMAP_LEFT_FILLING;
4776 if (got_endoff == del_endoff)
4777 state |= BMAP_RIGHT_FILLING;
4779 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4780 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4782 * Matches the whole extent. Delete the entry.
4784 xfs_iext_remove(ip, icur, state);
4785 xfs_iext_prev(ifp, icur);
4787 case BMAP_LEFT_FILLING:
4789 * Deleting the first part of the extent.
4791 got->br_startoff = del_endoff;
4792 got->br_blockcount -= del->br_blockcount;
4793 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4794 got->br_blockcount), da_old);
4795 got->br_startblock = nullstartblock((int)da_new);
4796 xfs_iext_update_extent(ip, state, icur, got);
4798 case BMAP_RIGHT_FILLING:
4800 * Deleting the last part of the extent.
4802 got->br_blockcount = got->br_blockcount - del->br_blockcount;
4803 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4804 got->br_blockcount), da_old);
4805 got->br_startblock = nullstartblock((int)da_new);
4806 xfs_iext_update_extent(ip, state, icur, got);
4810 * Deleting the middle of the extent.
4812 * Distribute the original indlen reservation across the two new
4813 * extents. Steal blocks from the deleted extent if necessary.
4814 * Stealing blocks simply fudges the fdblocks accounting below.
4815 * Warn if either of the new indlen reservations is zero as this
4816 * can lead to delalloc problems.
4818 got->br_blockcount = del->br_startoff - got->br_startoff;
4819 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
4821 new.br_blockcount = got_endoff - del_endoff;
4822 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
4824 WARN_ON_ONCE(!got_indlen || !new_indlen);
4825 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
4826 del->br_blockcount);
4828 got->br_startblock = nullstartblock((int)got_indlen);
4830 new.br_startoff = del_endoff;
4831 new.br_state = got->br_state;
4832 new.br_startblock = nullstartblock((int)new_indlen);
4834 xfs_iext_update_extent(ip, state, icur, got);
4835 xfs_iext_next(ifp, icur);
4836 xfs_iext_insert(ip, icur, &new, state);
4838 da_new = got_indlen + new_indlen - stolen;
4839 del->br_blockcount -= stolen;
4843 ASSERT(da_old >= da_new);
4844 da_diff = da_old - da_new;
4846 da_diff += del->br_blockcount;
4848 xfs_mod_fdblocks(mp, da_diff, false);
4849 xfs_mod_delalloc(mp, -da_diff);
4855 xfs_bmap_del_extent_cow(
4856 struct xfs_inode *ip,
4857 struct xfs_iext_cursor *icur,
4858 struct xfs_bmbt_irec *got,
4859 struct xfs_bmbt_irec *del)
4861 struct xfs_mount *mp = ip->i_mount;
4862 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
4863 struct xfs_bmbt_irec new;
4864 xfs_fileoff_t del_endoff, got_endoff;
4865 int state = BMAP_COWFORK;
4867 XFS_STATS_INC(mp, xs_del_exlist);
4869 del_endoff = del->br_startoff + del->br_blockcount;
4870 got_endoff = got->br_startoff + got->br_blockcount;
4872 ASSERT(del->br_blockcount > 0);
4873 ASSERT(got->br_startoff <= del->br_startoff);
4874 ASSERT(got_endoff >= del_endoff);
4875 ASSERT(!isnullstartblock(got->br_startblock));
4877 if (got->br_startoff == del->br_startoff)
4878 state |= BMAP_LEFT_FILLING;
4879 if (got_endoff == del_endoff)
4880 state |= BMAP_RIGHT_FILLING;
4882 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4883 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4885 * Matches the whole extent. Delete the entry.
4887 xfs_iext_remove(ip, icur, state);
4888 xfs_iext_prev(ifp, icur);
4890 case BMAP_LEFT_FILLING:
4892 * Deleting the first part of the extent.
4894 got->br_startoff = del_endoff;
4895 got->br_blockcount -= del->br_blockcount;
4896 got->br_startblock = del->br_startblock + del->br_blockcount;
4897 xfs_iext_update_extent(ip, state, icur, got);
4899 case BMAP_RIGHT_FILLING:
4901 * Deleting the last part of the extent.
4903 got->br_blockcount -= del->br_blockcount;
4904 xfs_iext_update_extent(ip, state, icur, got);
4908 * Deleting the middle of the extent.
4910 got->br_blockcount = del->br_startoff - got->br_startoff;
4912 new.br_startoff = del_endoff;
4913 new.br_blockcount = got_endoff - del_endoff;
4914 new.br_state = got->br_state;
4915 new.br_startblock = del->br_startblock + del->br_blockcount;
4917 xfs_iext_update_extent(ip, state, icur, got);
4918 xfs_iext_next(ifp, icur);
4919 xfs_iext_insert(ip, icur, &new, state);
4922 ip->i_delayed_blks -= del->br_blockcount;
4926 * Called by xfs_bmapi to update file extent records and the btree
4927 * after removing space.
4929 STATIC int /* error */
4930 xfs_bmap_del_extent_real(
4931 xfs_inode_t *ip, /* incore inode pointer */
4932 xfs_trans_t *tp, /* current transaction pointer */
4933 struct xfs_iext_cursor *icur,
4934 xfs_btree_cur_t *cur, /* if null, not a btree */
4935 xfs_bmbt_irec_t *del, /* data to remove from extents */
4936 int *logflagsp, /* inode logging flags */
4937 int whichfork, /* data or attr fork */
4938 int bflags) /* bmapi flags */
4940 xfs_fsblock_t del_endblock=0; /* first block past del */
4941 xfs_fileoff_t del_endoff; /* first offset past del */
4942 int do_fx; /* free extent at end of routine */
4943 int error; /* error return value */
4944 int flags = 0;/* inode logging flags */
4945 struct xfs_bmbt_irec got; /* current extent entry */
4946 xfs_fileoff_t got_endoff; /* first offset past got */
4947 int i; /* temp state */
4948 struct xfs_ifork *ifp; /* inode fork pointer */
4949 xfs_mount_t *mp; /* mount structure */
4950 xfs_filblks_t nblks; /* quota/sb block count */
4951 xfs_bmbt_irec_t new; /* new record to be inserted */
4953 uint qfield; /* quota field to update */
4954 int state = xfs_bmap_fork_to_state(whichfork);
4955 struct xfs_bmbt_irec old;
4958 XFS_STATS_INC(mp, xs_del_exlist);
4960 ifp = XFS_IFORK_PTR(ip, whichfork);
4961 ASSERT(del->br_blockcount > 0);
4962 xfs_iext_get_extent(ifp, icur, &got);
4963 ASSERT(got.br_startoff <= del->br_startoff);
4964 del_endoff = del->br_startoff + del->br_blockcount;
4965 got_endoff = got.br_startoff + got.br_blockcount;
4966 ASSERT(got_endoff >= del_endoff);
4967 ASSERT(!isnullstartblock(got.br_startblock));
4972 * If it's the case where the directory code is running with no block
4973 * reservation, and the deleted block is in the middle of its extent,
4974 * and the resulting insert of an extent would cause transformation to
4975 * btree format, then reject it. The calling code will then swap blocks
4976 * around instead. We have to do this now, rather than waiting for the
4977 * conversion to btree format, since the transaction will be dirty then.
4979 if (tp->t_blk_res == 0 &&
4980 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
4981 XFS_IFORK_NEXTENTS(ip, whichfork) >=
4982 XFS_IFORK_MAXEXT(ip, whichfork) &&
4983 del->br_startoff > got.br_startoff && del_endoff < got_endoff)
4986 flags = XFS_ILOG_CORE;
4987 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
4992 bno = div_u64_rem(del->br_startblock, mp->m_sb.sb_rextsize,
4995 len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize,
4999 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
5003 nblks = len * mp->m_sb.sb_rextsize;
5004 qfield = XFS_TRANS_DQ_RTBCOUNT;
5007 nblks = del->br_blockcount;
5008 qfield = XFS_TRANS_DQ_BCOUNT;
5011 del_endblock = del->br_startblock + del->br_blockcount;
5013 error = xfs_bmbt_lookup_eq(cur, &got, &i);
5016 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5019 if (got.br_startoff == del->br_startoff)
5020 state |= BMAP_LEFT_FILLING;
5021 if (got_endoff == del_endoff)
5022 state |= BMAP_RIGHT_FILLING;
5024 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
5025 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
5027 * Matches the whole extent. Delete the entry.
5029 xfs_iext_remove(ip, icur, state);
5030 xfs_iext_prev(ifp, icur);
5031 XFS_IFORK_NEXT_SET(ip, whichfork,
5032 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
5033 flags |= XFS_ILOG_CORE;
5035 flags |= xfs_ilog_fext(whichfork);
5038 if ((error = xfs_btree_delete(cur, &i)))
5040 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5042 case BMAP_LEFT_FILLING:
5044 * Deleting the first part of the extent.
5046 got.br_startoff = del_endoff;
5047 got.br_startblock = del_endblock;
5048 got.br_blockcount -= del->br_blockcount;
5049 xfs_iext_update_extent(ip, state, icur, &got);
5051 flags |= xfs_ilog_fext(whichfork);
5054 error = xfs_bmbt_update(cur, &got);
5058 case BMAP_RIGHT_FILLING:
5060 * Deleting the last part of the extent.
5062 got.br_blockcount -= del->br_blockcount;
5063 xfs_iext_update_extent(ip, state, icur, &got);
5065 flags |= xfs_ilog_fext(whichfork);
5068 error = xfs_bmbt_update(cur, &got);
5074 * Deleting the middle of the extent.
5078 got.br_blockcount = del->br_startoff - got.br_startoff;
5079 xfs_iext_update_extent(ip, state, icur, &got);
5081 new.br_startoff = del_endoff;
5082 new.br_blockcount = got_endoff - del_endoff;
5083 new.br_state = got.br_state;
5084 new.br_startblock = del_endblock;
5086 flags |= XFS_ILOG_CORE;
5088 error = xfs_bmbt_update(cur, &got);
5091 error = xfs_btree_increment(cur, 0, &i);
5094 cur->bc_rec.b = new;
5095 error = xfs_btree_insert(cur, &i);
5096 if (error && error != -ENOSPC)
5099 * If get no-space back from btree insert, it tried a
5100 * split, and we have a zero block reservation. Fix up
5101 * our state and return the error.
5103 if (error == -ENOSPC) {
5105 * Reset the cursor, don't trust it after any
5108 error = xfs_bmbt_lookup_eq(cur, &got, &i);
5111 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5113 * Update the btree record back
5114 * to the original value.
5116 error = xfs_bmbt_update(cur, &old);
5120 * Reset the extent record back
5121 * to the original value.
5123 xfs_iext_update_extent(ip, state, icur, &old);
5128 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5130 flags |= xfs_ilog_fext(whichfork);
5131 XFS_IFORK_NEXT_SET(ip, whichfork,
5132 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
5133 xfs_iext_next(ifp, icur);
5134 xfs_iext_insert(ip, icur, &new, state);
5138 /* remove reverse mapping */
5139 xfs_rmap_unmap_extent(tp, ip, whichfork, del);
5142 * If we need to, add to list of extents to delete.
5144 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) {
5145 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
5146 xfs_refcount_decrease_extent(tp, del);
5148 __xfs_bmap_add_free(tp, del->br_startblock,
5149 del->br_blockcount, NULL,
5150 (bflags & XFS_BMAPI_NODISCARD) ||
5151 del->br_state == XFS_EXT_UNWRITTEN);
5156 * Adjust inode # blocks in the file.
5159 ip->i_d.di_nblocks -= nblks;
5161 * Adjust quota data.
5163 if (qfield && !(bflags & XFS_BMAPI_REMAP))
5164 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5172 * Unmap (remove) blocks from a file.
5173 * If nexts is nonzero then the number of extents to remove is limited to
5174 * that value. If not all extents in the block range can be removed then
5179 struct xfs_trans *tp, /* transaction pointer */
5180 struct xfs_inode *ip, /* incore inode */
5181 xfs_fileoff_t start, /* first file offset deleted */
5182 xfs_filblks_t *rlen, /* i/o: amount remaining */
5183 int flags, /* misc flags */
5184 xfs_extnum_t nexts) /* number of extents max */
5186 struct xfs_btree_cur *cur; /* bmap btree cursor */
5187 struct xfs_bmbt_irec del; /* extent being deleted */
5188 int error; /* error return value */
5189 xfs_extnum_t extno; /* extent number in list */
5190 struct xfs_bmbt_irec got; /* current extent record */
5191 struct xfs_ifork *ifp; /* inode fork pointer */
5192 int isrt; /* freeing in rt area */
5193 int logflags; /* transaction logging flags */
5194 xfs_extlen_t mod; /* rt extent offset */
5195 struct xfs_mount *mp; /* mount structure */
5196 int tmp_logflags; /* partial logging flags */
5197 int wasdel; /* was a delayed alloc extent */
5198 int whichfork; /* data or attribute fork */
5200 xfs_filblks_t len = *rlen; /* length to unmap in file */
5201 xfs_fileoff_t max_len;
5202 xfs_agnumber_t prev_agno = NULLAGNUMBER, agno;
5204 struct xfs_iext_cursor icur;
5207 trace_xfs_bunmap(ip, start, len, flags, _RET_IP_);
5209 whichfork = xfs_bmapi_whichfork(flags);
5210 ASSERT(whichfork != XFS_COW_FORK);
5211 ifp = XFS_IFORK_PTR(ip, whichfork);
5213 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5214 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
5215 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
5217 return -EFSCORRUPTED;
5220 if (XFS_FORCED_SHUTDOWN(mp))
5223 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5228 * Guesstimate how many blocks we can unmap without running the risk of
5229 * blowing out the transaction with a mix of EFIs and reflink
5232 if (tp && xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK)
5233 max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res));
5237 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5238 (error = xfs_iread_extents(tp, ip, whichfork)))
5240 if (xfs_iext_count(ifp) == 0) {
5244 XFS_STATS_INC(mp, xs_blk_unmap);
5245 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5248 if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) {
5255 if (ifp->if_flags & XFS_IFBROOT) {
5256 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
5257 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5258 cur->bc_private.b.flags = 0;
5264 * Synchronize by locking the bitmap inode.
5266 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
5267 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
5268 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
5269 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
5273 while (end != (xfs_fileoff_t)-1 && end >= start &&
5274 (nexts == 0 || extno < nexts) && max_len > 0) {
5276 * Is the found extent after a hole in which end lives?
5277 * Just back up to the previous extent, if so.
5279 if (got.br_startoff > end &&
5280 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5285 * Is the last block of this extent before the range
5286 * we're supposed to delete? If so, we're done.
5288 end = XFS_FILEOFF_MIN(end,
5289 got.br_startoff + got.br_blockcount - 1);
5293 * Then deal with the (possibly delayed) allocated space
5297 wasdel = isnullstartblock(del.br_startblock);
5300 * Make sure we don't touch multiple AGF headers out of order
5301 * in a single transaction, as that could cause AB-BA deadlocks.
5304 agno = XFS_FSB_TO_AGNO(mp, del.br_startblock);
5305 if (prev_agno != NULLAGNUMBER && prev_agno > agno)
5309 if (got.br_startoff < start) {
5310 del.br_startoff = start;
5311 del.br_blockcount -= start - got.br_startoff;
5313 del.br_startblock += start - got.br_startoff;
5315 if (del.br_startoff + del.br_blockcount > end + 1)
5316 del.br_blockcount = end + 1 - del.br_startoff;
5318 /* How much can we safely unmap? */
5319 if (max_len < del.br_blockcount) {
5320 del.br_startoff += del.br_blockcount - max_len;
5322 del.br_startblock += del.br_blockcount - max_len;
5323 del.br_blockcount = max_len;
5329 sum = del.br_startblock + del.br_blockcount;
5330 div_u64_rem(sum, mp->m_sb.sb_rextsize, &mod);
5333 * Realtime extent not lined up at the end.
5334 * The extent could have been split into written
5335 * and unwritten pieces, or we could just be
5336 * unmapping part of it. But we can't really
5337 * get rid of part of a realtime extent.
5339 if (del.br_state == XFS_EXT_UNWRITTEN) {
5341 * This piece is unwritten, or we're not
5342 * using unwritten extents. Skip over it.
5345 end -= mod > del.br_blockcount ?
5346 del.br_blockcount : mod;
5347 if (end < got.br_startoff &&
5348 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5355 * It's written, turn it unwritten.
5356 * This is better than zeroing it.
5358 ASSERT(del.br_state == XFS_EXT_NORM);
5359 ASSERT(tp->t_blk_res > 0);
5361 * If this spans a realtime extent boundary,
5362 * chop it back to the start of the one we end at.
5364 if (del.br_blockcount > mod) {
5365 del.br_startoff += del.br_blockcount - mod;
5366 del.br_startblock += del.br_blockcount - mod;
5367 del.br_blockcount = mod;
5369 del.br_state = XFS_EXT_UNWRITTEN;
5370 error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5371 whichfork, &icur, &cur, &del,
5377 div_u64_rem(del.br_startblock, mp->m_sb.sb_rextsize, &mod);
5380 * Realtime extent is lined up at the end but not
5381 * at the front. We'll get rid of full extents if
5384 mod = mp->m_sb.sb_rextsize - mod;
5385 if (del.br_blockcount > mod) {
5386 del.br_blockcount -= mod;
5387 del.br_startoff += mod;
5388 del.br_startblock += mod;
5389 } else if (del.br_startoff == start &&
5390 (del.br_state == XFS_EXT_UNWRITTEN ||
5391 tp->t_blk_res == 0)) {
5393 * Can't make it unwritten. There isn't
5394 * a full extent here so just skip it.
5396 ASSERT(end >= del.br_blockcount);
5397 end -= del.br_blockcount;
5398 if (got.br_startoff > end &&
5399 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5404 } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5405 struct xfs_bmbt_irec prev;
5408 * This one is already unwritten.
5409 * It must have a written left neighbor.
5410 * Unwrite the killed part of that one and
5413 if (!xfs_iext_prev_extent(ifp, &icur, &prev))
5415 ASSERT(prev.br_state == XFS_EXT_NORM);
5416 ASSERT(!isnullstartblock(prev.br_startblock));
5417 ASSERT(del.br_startblock ==
5418 prev.br_startblock + prev.br_blockcount);
5419 if (prev.br_startoff < start) {
5420 mod = start - prev.br_startoff;
5421 prev.br_blockcount -= mod;
5422 prev.br_startblock += mod;
5423 prev.br_startoff = start;
5425 prev.br_state = XFS_EXT_UNWRITTEN;
5426 error = xfs_bmap_add_extent_unwritten_real(tp,
5427 ip, whichfork, &icur, &cur,
5433 ASSERT(del.br_state == XFS_EXT_NORM);
5434 del.br_state = XFS_EXT_UNWRITTEN;
5435 error = xfs_bmap_add_extent_unwritten_real(tp,
5436 ip, whichfork, &icur, &cur,
5446 error = xfs_bmap_del_extent_delay(ip, whichfork, &icur,
5449 error = xfs_bmap_del_extent_real(ip, tp, &icur, cur,
5450 &del, &tmp_logflags, whichfork,
5452 logflags |= tmp_logflags;
5458 max_len -= del.br_blockcount;
5459 end = del.br_startoff - 1;
5462 * If not done go on to the next (previous) record.
5464 if (end != (xfs_fileoff_t)-1 && end >= start) {
5465 if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5466 (got.br_startoff > end &&
5467 !xfs_iext_prev_extent(ifp, &icur, &got))) {
5474 if (done || end == (xfs_fileoff_t)-1 || end < start)
5477 *rlen = end - start + 1;
5480 * Convert to a btree if necessary.
5482 if (xfs_bmap_needs_btree(ip, whichfork)) {
5483 ASSERT(cur == NULL);
5484 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
5485 &tmp_logflags, whichfork);
5486 logflags |= tmp_logflags;
5488 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags,
5494 * Log everything. Do this after conversion, there's no point in
5495 * logging the extent records if we've converted to btree format.
5497 if ((logflags & xfs_ilog_fext(whichfork)) &&
5498 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5499 logflags &= ~xfs_ilog_fext(whichfork);
5500 else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5501 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5502 logflags &= ~xfs_ilog_fbroot(whichfork);
5504 * Log inode even in the error case, if the transaction
5505 * is dirty we'll need to shut down the filesystem.
5508 xfs_trans_log_inode(tp, ip, logflags);
5511 cur->bc_private.b.allocated = 0;
5512 xfs_btree_del_cursor(cur, error);
5517 /* Unmap a range of a file. */
5521 struct xfs_inode *ip,
5530 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts);
5536 * Determine whether an extent shift can be accomplished by a merge with the
5537 * extent that precedes the target hole of the shift.
5541 struct xfs_bmbt_irec *left, /* preceding extent */
5542 struct xfs_bmbt_irec *got, /* current extent to shift */
5543 xfs_fileoff_t shift) /* shift fsb */
5545 xfs_fileoff_t startoff;
5547 startoff = got->br_startoff - shift;
5550 * The extent, once shifted, must be adjacent in-file and on-disk with
5551 * the preceding extent.
5553 if ((left->br_startoff + left->br_blockcount != startoff) ||
5554 (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5555 (left->br_state != got->br_state) ||
5556 (left->br_blockcount + got->br_blockcount > MAXEXTLEN))
5563 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5564 * hole in the file. If an extent shift would result in the extent being fully
5565 * adjacent to the extent that currently precedes the hole, we can merge with
5566 * the preceding extent rather than do the shift.
5568 * This function assumes the caller has verified a shift-by-merge is possible
5569 * with the provided extents via xfs_bmse_can_merge().
5573 struct xfs_trans *tp,
5574 struct xfs_inode *ip,
5576 xfs_fileoff_t shift, /* shift fsb */
5577 struct xfs_iext_cursor *icur,
5578 struct xfs_bmbt_irec *got, /* extent to shift */
5579 struct xfs_bmbt_irec *left, /* preceding extent */
5580 struct xfs_btree_cur *cur,
5581 int *logflags) /* output */
5583 struct xfs_bmbt_irec new;
5584 xfs_filblks_t blockcount;
5586 struct xfs_mount *mp = ip->i_mount;
5588 blockcount = left->br_blockcount + got->br_blockcount;
5590 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5591 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5592 ASSERT(xfs_bmse_can_merge(left, got, shift));
5595 new.br_blockcount = blockcount;
5598 * Update the on-disk extent count, the btree if necessary and log the
5601 XFS_IFORK_NEXT_SET(ip, whichfork,
5602 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
5603 *logflags |= XFS_ILOG_CORE;
5605 *logflags |= XFS_ILOG_DEXT;
5609 /* lookup and remove the extent to merge */
5610 error = xfs_bmbt_lookup_eq(cur, got, &i);
5613 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5615 error = xfs_btree_delete(cur, &i);
5618 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5620 /* lookup and update size of the previous extent */
5621 error = xfs_bmbt_lookup_eq(cur, left, &i);
5624 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5626 error = xfs_bmbt_update(cur, &new);
5630 /* change to extent format if required after extent removal */
5631 error = xfs_bmap_btree_to_extents(tp, ip, cur, logflags, whichfork);
5636 xfs_iext_remove(ip, icur, 0);
5637 xfs_iext_prev(XFS_IFORK_PTR(ip, whichfork), icur);
5638 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5641 /* update reverse mapping. rmap functions merge the rmaps for us */
5642 xfs_rmap_unmap_extent(tp, ip, whichfork, got);
5643 memcpy(&new, got, sizeof(new));
5644 new.br_startoff = left->br_startoff + left->br_blockcount;
5645 xfs_rmap_map_extent(tp, ip, whichfork, &new);
5650 xfs_bmap_shift_update_extent(
5651 struct xfs_trans *tp,
5652 struct xfs_inode *ip,
5654 struct xfs_iext_cursor *icur,
5655 struct xfs_bmbt_irec *got,
5656 struct xfs_btree_cur *cur,
5658 xfs_fileoff_t startoff)
5660 struct xfs_mount *mp = ip->i_mount;
5661 struct xfs_bmbt_irec prev = *got;
5664 *logflags |= XFS_ILOG_CORE;
5666 got->br_startoff = startoff;
5669 error = xfs_bmbt_lookup_eq(cur, &prev, &i);
5672 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5674 error = xfs_bmbt_update(cur, got);
5678 *logflags |= XFS_ILOG_DEXT;
5681 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5684 /* update reverse mapping */
5685 xfs_rmap_unmap_extent(tp, ip, whichfork, &prev);
5686 xfs_rmap_map_extent(tp, ip, whichfork, got);
5691 xfs_bmap_collapse_extents(
5692 struct xfs_trans *tp,
5693 struct xfs_inode *ip,
5694 xfs_fileoff_t *next_fsb,
5695 xfs_fileoff_t offset_shift_fsb,
5698 int whichfork = XFS_DATA_FORK;
5699 struct xfs_mount *mp = ip->i_mount;
5700 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
5701 struct xfs_btree_cur *cur = NULL;
5702 struct xfs_bmbt_irec got, prev;
5703 struct xfs_iext_cursor icur;
5704 xfs_fileoff_t new_startoff;
5708 if (unlikely(XFS_TEST_ERROR(
5709 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5710 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
5711 mp, XFS_ERRTAG_BMAPIFORMAT))) {
5712 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
5713 return -EFSCORRUPTED;
5716 if (XFS_FORCED_SHUTDOWN(mp))
5719 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
5721 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5722 error = xfs_iread_extents(tp, ip, whichfork);
5727 if (ifp->if_flags & XFS_IFBROOT) {
5728 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5729 cur->bc_private.b.flags = 0;
5732 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5736 XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock),
5739 new_startoff = got.br_startoff - offset_shift_fsb;
5740 if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
5741 if (new_startoff < prev.br_startoff + prev.br_blockcount) {
5746 if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) {
5747 error = xfs_bmse_merge(tp, ip, whichfork,
5748 offset_shift_fsb, &icur, &got, &prev,
5755 if (got.br_startoff < offset_shift_fsb) {
5761 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
5762 cur, &logflags, new_startoff);
5767 if (!xfs_iext_next_extent(ifp, &icur, &got)) {
5772 *next_fsb = got.br_startoff;
5775 xfs_btree_del_cursor(cur, error);
5777 xfs_trans_log_inode(tp, ip, logflags);
5781 /* Make sure we won't be right-shifting an extent past the maximum bound. */
5783 xfs_bmap_can_insert_extents(
5784 struct xfs_inode *ip,
5786 xfs_fileoff_t shift)
5788 struct xfs_bmbt_irec got;
5792 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5794 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
5797 xfs_ilock(ip, XFS_ILOCK_EXCL);
5798 error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty);
5799 if (!error && !is_empty && got.br_startoff >= off &&
5800 ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff)
5802 xfs_iunlock(ip, XFS_ILOCK_EXCL);
5808 xfs_bmap_insert_extents(
5809 struct xfs_trans *tp,
5810 struct xfs_inode *ip,
5811 xfs_fileoff_t *next_fsb,
5812 xfs_fileoff_t offset_shift_fsb,
5814 xfs_fileoff_t stop_fsb)
5816 int whichfork = XFS_DATA_FORK;
5817 struct xfs_mount *mp = ip->i_mount;
5818 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
5819 struct xfs_btree_cur *cur = NULL;
5820 struct xfs_bmbt_irec got, next;
5821 struct xfs_iext_cursor icur;
5822 xfs_fileoff_t new_startoff;
5826 if (unlikely(XFS_TEST_ERROR(
5827 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5828 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
5829 mp, XFS_ERRTAG_BMAPIFORMAT))) {
5830 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
5831 return -EFSCORRUPTED;
5834 if (XFS_FORCED_SHUTDOWN(mp))
5837 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
5839 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5840 error = xfs_iread_extents(tp, ip, whichfork);
5845 if (ifp->if_flags & XFS_IFBROOT) {
5846 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5847 cur->bc_private.b.flags = 0;
5850 if (*next_fsb == NULLFSBLOCK) {
5851 xfs_iext_last(ifp, &icur);
5852 if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5853 stop_fsb > got.br_startoff) {
5858 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5863 XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock),
5866 if (stop_fsb >= got.br_startoff + got.br_blockcount) {
5871 new_startoff = got.br_startoff + offset_shift_fsb;
5872 if (xfs_iext_peek_next_extent(ifp, &icur, &next)) {
5873 if (new_startoff + got.br_blockcount > next.br_startoff) {
5879 * Unlike a left shift (which involves a hole punch), a right
5880 * shift does not modify extent neighbors in any way. We should
5881 * never find mergeable extents in this scenario. Check anyways
5882 * and warn if we encounter two extents that could be one.
5884 if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb))
5888 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
5889 cur, &logflags, new_startoff);
5893 if (!xfs_iext_prev_extent(ifp, &icur, &got) ||
5894 stop_fsb >= got.br_startoff + got.br_blockcount) {
5899 *next_fsb = got.br_startoff;
5902 xfs_btree_del_cursor(cur, error);
5904 xfs_trans_log_inode(tp, ip, logflags);
5909 * Splits an extent into two extents at split_fsb block such that it is the
5910 * first block of the current_ext. @ext is a target extent to be split.
5911 * @split_fsb is a block where the extents is split. If split_fsb lies in a
5912 * hole or the first block of extents, just return 0.
5915 xfs_bmap_split_extent_at(
5916 struct xfs_trans *tp,
5917 struct xfs_inode *ip,
5918 xfs_fileoff_t split_fsb)
5920 int whichfork = XFS_DATA_FORK;
5921 struct xfs_btree_cur *cur = NULL;
5922 struct xfs_bmbt_irec got;
5923 struct xfs_bmbt_irec new; /* split extent */
5924 struct xfs_mount *mp = ip->i_mount;
5925 struct xfs_ifork *ifp;
5926 xfs_fsblock_t gotblkcnt; /* new block count for got */
5927 struct xfs_iext_cursor icur;
5932 if (unlikely(XFS_TEST_ERROR(
5933 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5934 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
5935 mp, XFS_ERRTAG_BMAPIFORMAT))) {
5936 XFS_ERROR_REPORT("xfs_bmap_split_extent_at",
5937 XFS_ERRLEVEL_LOW, mp);
5938 return -EFSCORRUPTED;
5941 if (XFS_FORCED_SHUTDOWN(mp))
5944 ifp = XFS_IFORK_PTR(ip, whichfork);
5945 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5946 /* Read in all the extents */
5947 error = xfs_iread_extents(tp, ip, whichfork);
5953 * If there are not extents, or split_fsb lies in a hole we are done.
5955 if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) ||
5956 got.br_startoff >= split_fsb)
5959 gotblkcnt = split_fsb - got.br_startoff;
5960 new.br_startoff = split_fsb;
5961 new.br_startblock = got.br_startblock + gotblkcnt;
5962 new.br_blockcount = got.br_blockcount - gotblkcnt;
5963 new.br_state = got.br_state;
5965 if (ifp->if_flags & XFS_IFBROOT) {
5966 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5967 cur->bc_private.b.flags = 0;
5968 error = xfs_bmbt_lookup_eq(cur, &got, &i);
5971 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
5974 got.br_blockcount = gotblkcnt;
5975 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur,
5978 logflags = XFS_ILOG_CORE;
5980 error = xfs_bmbt_update(cur, &got);
5984 logflags |= XFS_ILOG_DEXT;
5986 /* Add new extent */
5987 xfs_iext_next(ifp, &icur);
5988 xfs_iext_insert(ip, &icur, &new, 0);
5989 XFS_IFORK_NEXT_SET(ip, whichfork,
5990 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
5993 error = xfs_bmbt_lookup_eq(cur, &new, &i);
5996 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor);
5997 error = xfs_btree_insert(cur, &i);
6000 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
6004 * Convert to a btree if necessary.
6006 if (xfs_bmap_needs_btree(ip, whichfork)) {
6007 int tmp_logflags; /* partial log flag return val */
6009 ASSERT(cur == NULL);
6010 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
6011 &tmp_logflags, whichfork);
6012 logflags |= tmp_logflags;
6017 cur->bc_private.b.allocated = 0;
6018 xfs_btree_del_cursor(cur, error);
6022 xfs_trans_log_inode(tp, ip, logflags);
6027 xfs_bmap_split_extent(
6028 struct xfs_inode *ip,
6029 xfs_fileoff_t split_fsb)
6031 struct xfs_mount *mp = ip->i_mount;
6032 struct xfs_trans *tp;
6035 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
6036 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
6040 xfs_ilock(ip, XFS_ILOCK_EXCL);
6041 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
6043 error = xfs_bmap_split_extent_at(tp, ip, split_fsb);
6047 return xfs_trans_commit(tp);
6050 xfs_trans_cancel(tp);
6054 /* Deferred mapping is only for real extents in the data fork. */
6056 xfs_bmap_is_update_needed(
6057 struct xfs_bmbt_irec *bmap)
6059 return bmap->br_startblock != HOLESTARTBLOCK &&
6060 bmap->br_startblock != DELAYSTARTBLOCK;
6063 /* Record a bmap intent. */
6066 struct xfs_trans *tp,
6067 enum xfs_bmap_intent_type type,
6068 struct xfs_inode *ip,
6070 struct xfs_bmbt_irec *bmap)
6072 struct xfs_bmap_intent *bi;
6074 trace_xfs_bmap_defer(tp->t_mountp,
6075 XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock),
6077 XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock),
6078 ip->i_ino, whichfork,
6080 bmap->br_blockcount,
6083 bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_NOFS);
6084 INIT_LIST_HEAD(&bi->bi_list);
6087 bi->bi_whichfork = whichfork;
6088 bi->bi_bmap = *bmap;
6090 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list);
6094 /* Map an extent into a file. */
6096 xfs_bmap_map_extent(
6097 struct xfs_trans *tp,
6098 struct xfs_inode *ip,
6099 struct xfs_bmbt_irec *PREV)
6101 if (!xfs_bmap_is_update_needed(PREV))
6104 __xfs_bmap_add(tp, XFS_BMAP_MAP, ip, XFS_DATA_FORK, PREV);
6107 /* Unmap an extent out of a file. */
6109 xfs_bmap_unmap_extent(
6110 struct xfs_trans *tp,
6111 struct xfs_inode *ip,
6112 struct xfs_bmbt_irec *PREV)
6114 if (!xfs_bmap_is_update_needed(PREV))
6117 __xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, XFS_DATA_FORK, PREV);
6121 * Process one of the deferred bmap operations. We pass back the
6122 * btree cursor to maintain our lock on the bmapbt between calls.
6125 xfs_bmap_finish_one(
6126 struct xfs_trans *tp,
6127 struct xfs_inode *ip,
6128 enum xfs_bmap_intent_type type,
6130 xfs_fileoff_t startoff,
6131 xfs_fsblock_t startblock,
6132 xfs_filblks_t *blockcount,
6137 ASSERT(tp->t_firstblock == NULLFSBLOCK);
6139 trace_xfs_bmap_deferred(tp->t_mountp,
6140 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type,
6141 XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
6142 ip->i_ino, whichfork, startoff, *blockcount, state);
6144 if (WARN_ON_ONCE(whichfork != XFS_DATA_FORK))
6145 return -EFSCORRUPTED;
6147 if (XFS_TEST_ERROR(false, tp->t_mountp,
6148 XFS_ERRTAG_BMAP_FINISH_ONE))
6153 error = xfs_bmapi_remap(tp, ip, startoff, *blockcount,
6157 case XFS_BMAP_UNMAP:
6158 error = __xfs_bunmapi(tp, ip, startoff, blockcount,
6159 XFS_BMAPI_REMAP, 1);
6163 error = -EFSCORRUPTED;
6169 /* Check that an inode's extent does not have invalid flags or bad ranges. */
6171 xfs_bmap_validate_extent(
6172 struct xfs_inode *ip,
6174 struct xfs_bmbt_irec *irec)
6176 struct xfs_mount *mp = ip->i_mount;
6177 xfs_fsblock_t endfsb;
6180 isrt = XFS_IS_REALTIME_INODE(ip);
6181 endfsb = irec->br_startblock + irec->br_blockcount - 1;
6183 if (!xfs_verify_rtbno(mp, irec->br_startblock))
6184 return __this_address;
6185 if (!xfs_verify_rtbno(mp, endfsb))
6186 return __this_address;
6188 if (!xfs_verify_fsbno(mp, irec->br_startblock))
6189 return __this_address;
6190 if (!xfs_verify_fsbno(mp, endfsb))
6191 return __this_address;
6192 if (XFS_FSB_TO_AGNO(mp, irec->br_startblock) !=
6193 XFS_FSB_TO_AGNO(mp, endfsb))
6194 return __this_address;
6196 if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK)
6197 return __this_address;