1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_trans.h"
16 #include "xfs_trans_priv.h"
17 #include "xfs_inode_item.h"
18 #include "xfs_quota.h"
19 #include "xfs_trace.h"
20 #include "xfs_icache.h"
21 #include "xfs_bmap_util.h"
22 #include "xfs_dquot_item.h"
23 #include "xfs_dquot.h"
24 #include "xfs_reflink.h"
25 #include "xfs_ialloc.h"
27 #include <linux/iversion.h>
29 /* Radix tree tags for incore inode tree. */
31 /* inode is to be reclaimed */
32 #define XFS_ICI_RECLAIM_TAG 0
33 /* Inode has speculative preallocations (posteof or cow) to clean. */
34 #define XFS_ICI_BLOCKGC_TAG 1
37 * The goal for walking incore inodes. These can correspond with incore inode
38 * radix tree tags when convenient. Avoid existing XFS_IWALK namespace.
40 enum xfs_icwalk_goal {
41 /* Goals that are not related to tags; these must be < 0. */
42 XFS_ICWALK_DQRELE = -1,
44 /* Goals directly associated with tagged inodes. */
45 XFS_ICWALK_BLOCKGC = XFS_ICI_BLOCKGC_TAG,
48 #define XFS_ICWALK_NULL_TAG (-1U)
50 /* Compute the inode radix tree tag for this goal. */
51 static inline unsigned int
52 xfs_icwalk_tag(enum xfs_icwalk_goal goal)
54 return goal < 0 ? XFS_ICWALK_NULL_TAG : goal;
57 static int xfs_icwalk(struct xfs_mount *mp, int iter_flags,
58 int (*execute)(struct xfs_inode *ip, void *args),
59 void *args, enum xfs_icwalk_goal goal);
60 static int xfs_icwalk_ag(struct xfs_perag *pag, int iter_flags,
61 int (*execute)(struct xfs_inode *ip, void *args),
62 void *args, enum xfs_icwalk_goal goal);
65 * Private inode cache walk flags for struct xfs_eofblocks. Must not coincide
66 * with XFS_EOF_FLAGS_*.
68 #define XFS_ICWALK_FLAG_DROP_UDQUOT (1U << 31)
69 #define XFS_ICWALK_FLAG_DROP_GDQUOT (1U << 30)
70 #define XFS_ICWALK_FLAG_DROP_PDQUOT (1U << 29)
72 #define XFS_ICWALK_PRIVATE_FLAGS (XFS_ICWALK_FLAG_DROP_UDQUOT | \
73 XFS_ICWALK_FLAG_DROP_GDQUOT | \
74 XFS_ICWALK_FLAG_DROP_PDQUOT)
77 * Allocate and initialise an xfs_inode.
87 * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL
88 * and return NULL here on ENOMEM.
90 ip = kmem_cache_alloc(xfs_inode_zone, GFP_KERNEL | __GFP_NOFAIL);
92 if (inode_init_always(mp->m_super, VFS_I(ip))) {
93 kmem_cache_free(xfs_inode_zone, ip);
97 /* VFS doesn't initialise i_mode! */
98 VFS_I(ip)->i_mode = 0;
100 XFS_STATS_INC(mp, vn_active);
101 ASSERT(atomic_read(&ip->i_pincount) == 0);
102 ASSERT(ip->i_ino == 0);
104 /* initialise the xfs inode */
107 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
110 memset(&ip->i_df, 0, sizeof(ip->i_df));
112 ip->i_delayed_blks = 0;
113 ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
118 INIT_WORK(&ip->i_ioend_work, xfs_end_io);
119 INIT_LIST_HEAD(&ip->i_ioend_list);
120 spin_lock_init(&ip->i_ioend_lock);
126 xfs_inode_free_callback(
127 struct rcu_head *head)
129 struct inode *inode = container_of(head, struct inode, i_rcu);
130 struct xfs_inode *ip = XFS_I(inode);
132 switch (VFS_I(ip)->i_mode & S_IFMT) {
136 xfs_idestroy_fork(&ip->i_df);
141 xfs_idestroy_fork(ip->i_afp);
142 kmem_cache_free(xfs_ifork_zone, ip->i_afp);
145 xfs_idestroy_fork(ip->i_cowfp);
146 kmem_cache_free(xfs_ifork_zone, ip->i_cowfp);
149 ASSERT(!test_bit(XFS_LI_IN_AIL,
150 &ip->i_itemp->ili_item.li_flags));
151 xfs_inode_item_destroy(ip);
155 kmem_cache_free(xfs_inode_zone, ip);
160 struct xfs_inode *ip)
162 /* asserts to verify all state is correct here */
163 ASSERT(atomic_read(&ip->i_pincount) == 0);
164 ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
165 XFS_STATS_DEC(ip->i_mount, vn_active);
167 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
172 struct xfs_inode *ip)
174 ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
177 * Because we use RCU freeing we need to ensure the inode always
178 * appears to be reclaimed with an invalid inode number when in the
179 * free state. The ip->i_flags_lock provides the barrier against lookup
182 spin_lock(&ip->i_flags_lock);
183 ip->i_flags = XFS_IRECLAIM;
185 spin_unlock(&ip->i_flags_lock);
187 __xfs_inode_free(ip);
191 * Queue background inode reclaim work if there are reclaimable inodes and there
192 * isn't reclaim work already scheduled or in progress.
195 xfs_reclaim_work_queue(
196 struct xfs_mount *mp)
200 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
201 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
202 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
208 xfs_perag_set_reclaim_tag(
209 struct xfs_perag *pag)
211 struct xfs_mount *mp = pag->pag_mount;
213 lockdep_assert_held(&pag->pag_ici_lock);
214 if (pag->pag_ici_reclaimable++)
217 /* propagate the reclaim tag up into the perag radix tree */
218 spin_lock(&mp->m_perag_lock);
219 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
220 XFS_ICI_RECLAIM_TAG);
221 spin_unlock(&mp->m_perag_lock);
223 /* schedule periodic background inode reclaim */
224 xfs_reclaim_work_queue(mp);
226 trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
230 xfs_perag_clear_reclaim_tag(
231 struct xfs_perag *pag)
233 struct xfs_mount *mp = pag->pag_mount;
235 lockdep_assert_held(&pag->pag_ici_lock);
236 if (--pag->pag_ici_reclaimable)
239 /* clear the reclaim tag from the perag radix tree */
240 spin_lock(&mp->m_perag_lock);
241 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
242 XFS_ICI_RECLAIM_TAG);
243 spin_unlock(&mp->m_perag_lock);
244 trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
249 * We set the inode flag atomically with the radix tree tag.
250 * Once we get tag lookups on the radix tree, this inode flag
254 xfs_inode_set_reclaim_tag(
255 struct xfs_inode *ip)
257 struct xfs_mount *mp = ip->i_mount;
258 struct xfs_perag *pag;
260 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
261 spin_lock(&pag->pag_ici_lock);
262 spin_lock(&ip->i_flags_lock);
264 radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
265 XFS_ICI_RECLAIM_TAG);
266 xfs_perag_set_reclaim_tag(pag);
267 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
269 spin_unlock(&ip->i_flags_lock);
270 spin_unlock(&pag->pag_ici_lock);
275 xfs_inode_clear_reclaim_tag(
276 struct xfs_perag *pag,
279 radix_tree_tag_clear(&pag->pag_ici_root,
280 XFS_INO_TO_AGINO(pag->pag_mount, ino),
281 XFS_ICI_RECLAIM_TAG);
282 xfs_perag_clear_reclaim_tag(pag);
287 struct xfs_inode *ip)
289 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
290 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
293 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
294 if (!xfs_iflags_test(ip, XFS_INEW))
298 finish_wait(wq, &wait.wq_entry);
302 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
303 * part of the structure. This is made more complex by the fact we store
304 * information about the on-disk values in the VFS inode and so we can't just
305 * overwrite the values unconditionally. Hence we save the parameters we
306 * need to retain across reinitialisation, and rewrite them into the VFS inode
307 * after reinitialisation even if it fails.
311 struct xfs_mount *mp,
315 uint32_t nlink = inode->i_nlink;
316 uint32_t generation = inode->i_generation;
317 uint64_t version = inode_peek_iversion(inode);
318 umode_t mode = inode->i_mode;
319 dev_t dev = inode->i_rdev;
320 kuid_t uid = inode->i_uid;
321 kgid_t gid = inode->i_gid;
323 error = inode_init_always(mp->m_super, inode);
325 set_nlink(inode, nlink);
326 inode->i_generation = generation;
327 inode_set_iversion_queried(inode, version);
328 inode->i_mode = mode;
336 * If we are allocating a new inode, then check what was returned is
337 * actually a free, empty inode. If we are not allocating an inode,
338 * then check we didn't find a free inode.
341 * 0 if the inode free state matches the lookup context
342 * -ENOENT if the inode is free and we are not allocating
343 * -EFSCORRUPTED if there is any state mismatch at all
346 xfs_iget_check_free_state(
347 struct xfs_inode *ip,
350 if (flags & XFS_IGET_CREATE) {
351 /* should be a free inode */
352 if (VFS_I(ip)->i_mode != 0) {
353 xfs_warn(ip->i_mount,
354 "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
355 ip->i_ino, VFS_I(ip)->i_mode);
356 return -EFSCORRUPTED;
359 if (ip->i_nblocks != 0) {
360 xfs_warn(ip->i_mount,
361 "Corruption detected! Free inode 0x%llx has blocks allocated!",
363 return -EFSCORRUPTED;
368 /* should be an allocated inode */
369 if (VFS_I(ip)->i_mode == 0)
376 * Check the validity of the inode we just found it the cache
380 struct xfs_perag *pag,
381 struct xfs_inode *ip,
384 int lock_flags) __releases(RCU)
386 struct inode *inode = VFS_I(ip);
387 struct xfs_mount *mp = ip->i_mount;
391 * check for re-use of an inode within an RCU grace period due to the
392 * radix tree nodes not being updated yet. We monitor for this by
393 * setting the inode number to zero before freeing the inode structure.
394 * If the inode has been reallocated and set up, then the inode number
395 * will not match, so check for that, too.
397 spin_lock(&ip->i_flags_lock);
398 if (ip->i_ino != ino) {
399 trace_xfs_iget_skip(ip);
400 XFS_STATS_INC(mp, xs_ig_frecycle);
407 * If we are racing with another cache hit that is currently
408 * instantiating this inode or currently recycling it out of
409 * reclaimabe state, wait for the initialisation to complete
412 * XXX(hch): eventually we should do something equivalent to
413 * wait_on_inode to wait for these flags to be cleared
414 * instead of polling for it.
416 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
417 trace_xfs_iget_skip(ip);
418 XFS_STATS_INC(mp, xs_ig_frecycle);
424 * Check the inode free state is valid. This also detects lookup
425 * racing with unlinks.
427 error = xfs_iget_check_free_state(ip, flags);
432 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
433 * Need to carefully get it back into useable state.
435 if (ip->i_flags & XFS_IRECLAIMABLE) {
436 trace_xfs_iget_reclaim(ip);
438 if (flags & XFS_IGET_INCORE) {
444 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
445 * from stomping over us while we recycle the inode. We can't
446 * clear the radix tree reclaimable tag yet as it requires
447 * pag_ici_lock to be held exclusive.
449 ip->i_flags |= XFS_IRECLAIM;
451 spin_unlock(&ip->i_flags_lock);
454 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
455 error = xfs_reinit_inode(mp, inode);
459 * Re-initializing the inode failed, and we are in deep
460 * trouble. Try to re-add it to the reclaim list.
463 spin_lock(&ip->i_flags_lock);
464 wake = !!__xfs_iflags_test(ip, XFS_INEW);
465 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
467 wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
468 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
469 trace_xfs_iget_reclaim_fail(ip);
473 spin_lock(&pag->pag_ici_lock);
474 spin_lock(&ip->i_flags_lock);
477 * Clear the per-lifetime state in the inode as we are now
478 * effectively a new inode and need to return to the initial
479 * state before reuse occurs.
481 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
482 ip->i_flags |= XFS_INEW;
483 xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
484 inode->i_state = I_NEW;
488 spin_unlock(&ip->i_flags_lock);
489 spin_unlock(&pag->pag_ici_lock);
491 /* If the VFS inode is being torn down, pause and try again. */
493 trace_xfs_iget_skip(ip);
498 /* We've got a live one. */
499 spin_unlock(&ip->i_flags_lock);
501 trace_xfs_iget_hit(ip);
505 xfs_ilock(ip, lock_flags);
507 if (!(flags & XFS_IGET_INCORE))
508 xfs_iflags_clear(ip, XFS_ISTALE);
509 XFS_STATS_INC(mp, xs_ig_found);
514 spin_unlock(&ip->i_flags_lock);
522 struct xfs_mount *mp,
523 struct xfs_perag *pag,
526 struct xfs_inode **ipp,
530 struct xfs_inode *ip;
532 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
535 ip = xfs_inode_alloc(mp, ino);
539 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, flags);
544 * For version 5 superblocks, if we are initialising a new inode and we
545 * are not utilising the XFS_MOUNT_IKEEP inode cluster mode, we can
546 * simply build the new inode core with a random generation number.
548 * For version 4 (and older) superblocks, log recovery is dependent on
549 * the i_flushiter field being initialised from the current on-disk
550 * value and hence we must also read the inode off disk even when
551 * initializing new inodes.
553 if (xfs_sb_version_has_v3inode(&mp->m_sb) &&
554 (flags & XFS_IGET_CREATE) && !(mp->m_flags & XFS_MOUNT_IKEEP)) {
555 VFS_I(ip)->i_generation = prandom_u32();
559 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp);
563 error = xfs_inode_from_disk(ip,
564 xfs_buf_offset(bp, ip->i_imap.im_boffset));
566 xfs_buf_set_ref(bp, XFS_INO_REF);
567 xfs_trans_brelse(tp, bp);
573 trace_xfs_iget_miss(ip);
576 * Check the inode free state is valid. This also detects lookup
577 * racing with unlinks.
579 error = xfs_iget_check_free_state(ip, flags);
584 * Preload the radix tree so we can insert safely under the
585 * write spinlock. Note that we cannot sleep inside the preload
586 * region. Since we can be called from transaction context, don't
587 * recurse into the file system.
589 if (radix_tree_preload(GFP_NOFS)) {
595 * Because the inode hasn't been added to the radix-tree yet it can't
596 * be found by another thread, so we can do the non-sleeping lock here.
599 if (!xfs_ilock_nowait(ip, lock_flags))
604 * These values must be set before inserting the inode into the radix
605 * tree as the moment it is inserted a concurrent lookup (allowed by the
606 * RCU locking mechanism) can find it and that lookup must see that this
607 * is an inode currently under construction (i.e. that XFS_INEW is set).
608 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
609 * memory barrier that ensures this detection works correctly at lookup
613 if (flags & XFS_IGET_DONTCACHE)
614 d_mark_dontcache(VFS_I(ip));
618 xfs_iflags_set(ip, iflags);
620 /* insert the new inode */
621 spin_lock(&pag->pag_ici_lock);
622 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
623 if (unlikely(error)) {
624 WARN_ON(error != -EEXIST);
625 XFS_STATS_INC(mp, xs_ig_dup);
627 goto out_preload_end;
629 spin_unlock(&pag->pag_ici_lock);
630 radix_tree_preload_end();
636 spin_unlock(&pag->pag_ici_lock);
637 radix_tree_preload_end();
639 xfs_iunlock(ip, lock_flags);
641 __destroy_inode(VFS_I(ip));
647 * Look up an inode by number in the given file system. The inode is looked up
648 * in the cache held in each AG. If the inode is found in the cache, initialise
649 * the vfs inode if necessary.
651 * If it is not in core, read it in from the file system's device, add it to the
652 * cache and initialise the vfs inode.
654 * The inode is locked according to the value of the lock_flags parameter.
655 * Inode lookup is only done during metadata operations and not as part of the
656 * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup.
660 struct xfs_mount *mp,
661 struct xfs_trans *tp,
665 struct xfs_inode **ipp)
667 struct xfs_inode *ip;
668 struct xfs_perag *pag;
672 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
674 /* reject inode numbers outside existing AGs */
675 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
678 XFS_STATS_INC(mp, xs_ig_attempts);
680 /* get the perag structure and ensure that it's inode capable */
681 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
682 agino = XFS_INO_TO_AGINO(mp, ino);
687 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
690 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
692 goto out_error_or_again;
695 if (flags & XFS_IGET_INCORE) {
697 goto out_error_or_again;
699 XFS_STATS_INC(mp, xs_ig_missed);
701 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
704 goto out_error_or_again;
711 * If we have a real type for an on-disk inode, we can setup the inode
712 * now. If it's a new inode being created, xfs_ialloc will handle it.
714 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
715 xfs_setup_existing_inode(ip);
719 if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
728 * "Is this a cached inode that's also allocated?"
730 * Look up an inode by number in the given file system. If the inode is
731 * in cache and isn't in purgatory, return 1 if the inode is allocated
732 * and 0 if it is not. For all other cases (not in cache, being torn
733 * down, etc.), return a negative error code.
735 * The caller has to prevent inode allocation and freeing activity,
736 * presumably by locking the AGI buffer. This is to ensure that an
737 * inode cannot transition from allocated to freed until the caller is
738 * ready to allow that. If the inode is in an intermediate state (new,
739 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
740 * inode is not in the cache, -ENOENT will be returned. The caller must
741 * deal with these scenarios appropriately.
743 * This is a specialized use case for the online scrubber; if you're
744 * reading this, you probably want xfs_iget.
747 xfs_icache_inode_is_allocated(
748 struct xfs_mount *mp,
749 struct xfs_trans *tp,
753 struct xfs_inode *ip;
756 error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
760 *inuse = !!(VFS_I(ip)->i_mode);
766 * The inode lookup is done in batches to keep the amount of lock traffic and
767 * radix tree lookups to a minimum. The batch size is a trade off between
768 * lookup reduction and stack usage. This is in the reclaim path, so we can't
771 * XXX: This will be moved closer to xfs_icwalk* once we get rid of the
772 * separate reclaim walk functions.
774 #define XFS_LOOKUP_BATCH 32
776 #ifdef CONFIG_XFS_QUOTA
777 /* Drop this inode's dquots. */
780 struct xfs_inode *ip,
783 struct xfs_eofblocks *eofb = priv;
785 xfs_ilock(ip, XFS_ILOCK_EXCL);
786 if (eofb->eof_flags & XFS_ICWALK_FLAG_DROP_UDQUOT) {
787 xfs_qm_dqrele(ip->i_udquot);
790 if (eofb->eof_flags & XFS_ICWALK_FLAG_DROP_GDQUOT) {
791 xfs_qm_dqrele(ip->i_gdquot);
794 if (eofb->eof_flags & XFS_ICWALK_FLAG_DROP_PDQUOT) {
795 xfs_qm_dqrele(ip->i_pdquot);
798 xfs_iunlock(ip, XFS_ILOCK_EXCL);
803 * Detach all dquots from incore inodes if we can. The caller must already
804 * have dropped the relevant XFS_[UGP]QUOTA_ACTIVE flags so that dquots will
805 * not get reattached.
808 xfs_dqrele_all_inodes(
809 struct xfs_mount *mp,
812 struct xfs_eofblocks eofb = { .eof_flags = 0 };
814 if (qflags & XFS_UQUOTA_ACCT)
815 eofb.eof_flags |= XFS_ICWALK_FLAG_DROP_UDQUOT;
816 if (qflags & XFS_GQUOTA_ACCT)
817 eofb.eof_flags |= XFS_ICWALK_FLAG_DROP_GDQUOT;
818 if (qflags & XFS_PQUOTA_ACCT)
819 eofb.eof_flags |= XFS_ICWALK_FLAG_DROP_PDQUOT;
821 return xfs_icwalk(mp, XFS_INODE_WALK_INEW_WAIT, xfs_dqrele_inode,
822 &eofb, XFS_ICWALK_DQRELE);
824 #endif /* CONFIG_XFS_QUOTA */
827 * Grab the inode for reclaim exclusively.
829 * We have found this inode via a lookup under RCU, so the inode may have
830 * already been freed, or it may be in the process of being recycled by
831 * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode
832 * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE
833 * will not be set. Hence we need to check for both these flag conditions to
834 * avoid inodes that are no longer reclaim candidates.
836 * Note: checking for other state flags here, under the i_flags_lock or not, is
837 * racy and should be avoided. Those races should be resolved only after we have
838 * ensured that we are able to reclaim this inode and the world can see that we
839 * are going to reclaim it.
841 * Return true if we grabbed it, false otherwise.
844 xfs_reclaim_inode_grab(
845 struct xfs_inode *ip)
847 ASSERT(rcu_read_lock_held());
849 spin_lock(&ip->i_flags_lock);
850 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
851 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
852 /* not a reclaim candidate. */
853 spin_unlock(&ip->i_flags_lock);
856 __xfs_iflags_set(ip, XFS_IRECLAIM);
857 spin_unlock(&ip->i_flags_lock);
862 * Inode reclaim is non-blocking, so the default action if progress cannot be
863 * made is to "requeue" the inode for reclaim by unlocking it and clearing the
864 * XFS_IRECLAIM flag. If we are in a shutdown state, we don't care about
865 * blocking anymore and hence we can wait for the inode to be able to reclaim
868 * We do no IO here - if callers require inodes to be cleaned they must push the
869 * AIL first to trigger writeback of dirty inodes. This enables writeback to be
870 * done in the background in a non-blocking manner, and enables memory reclaim
871 * to make progress without blocking.
875 struct xfs_inode *ip,
876 struct xfs_perag *pag)
878 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
880 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
882 if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
885 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
887 xfs_iflush_abort(ip);
890 if (xfs_ipincount(ip))
891 goto out_clear_flush;
892 if (!xfs_inode_clean(ip))
893 goto out_clear_flush;
895 xfs_iflags_clear(ip, XFS_IFLUSHING);
899 * Because we use RCU freeing we need to ensure the inode always appears
900 * to be reclaimed with an invalid inode number when in the free state.
901 * We do this as early as possible under the ILOCK so that
902 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
903 * detect races with us here. By doing this, we guarantee that once
904 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
905 * it will see either a valid inode that will serialise correctly, or it
906 * will see an invalid inode that it can skip.
908 spin_lock(&ip->i_flags_lock);
909 ip->i_flags = XFS_IRECLAIM;
911 spin_unlock(&ip->i_flags_lock);
913 xfs_iunlock(ip, XFS_ILOCK_EXCL);
915 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
917 * Remove the inode from the per-AG radix tree.
919 * Because radix_tree_delete won't complain even if the item was never
920 * added to the tree assert that it's been there before to catch
921 * problems with the inode life time early on.
923 spin_lock(&pag->pag_ici_lock);
924 if (!radix_tree_delete(&pag->pag_ici_root,
925 XFS_INO_TO_AGINO(ip->i_mount, ino)))
927 xfs_perag_clear_reclaim_tag(pag);
928 spin_unlock(&pag->pag_ici_lock);
931 * Here we do an (almost) spurious inode lock in order to coordinate
932 * with inode cache radix tree lookups. This is because the lookup
933 * can reference the inodes in the cache without taking references.
935 * We make that OK here by ensuring that we wait until the inode is
936 * unlocked after the lookup before we go ahead and free it.
938 xfs_ilock(ip, XFS_ILOCK_EXCL);
939 ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot);
940 xfs_iunlock(ip, XFS_ILOCK_EXCL);
941 ASSERT(xfs_inode_clean(ip));
943 __xfs_inode_free(ip);
947 xfs_iflags_clear(ip, XFS_IFLUSHING);
949 xfs_iunlock(ip, XFS_ILOCK_EXCL);
951 xfs_iflags_clear(ip, XFS_IRECLAIM);
955 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
956 * corrupted, we still want to try to reclaim all the inodes. If we don't,
957 * then a shut down during filesystem unmount reclaim walk leak all the
958 * unreclaimed inodes.
960 * Returns non-zero if any AGs or inodes were skipped in the reclaim pass
961 * so that callers that want to block until all dirty inodes are written back
962 * and reclaimed can sanely loop.
965 xfs_reclaim_inodes_ag(
966 struct xfs_mount *mp,
969 struct xfs_perag *pag;
970 xfs_agnumber_t ag = 0;
972 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
973 unsigned long first_index = 0;
977 ag = pag->pag_agno + 1;
979 first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
981 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
985 nr_found = radix_tree_gang_lookup_tag(
987 (void **)batch, first_index,
989 XFS_ICI_RECLAIM_TAG);
997 * Grab the inodes before we drop the lock. if we found
998 * nothing, nr == 0 and the loop will be skipped.
1000 for (i = 0; i < nr_found; i++) {
1001 struct xfs_inode *ip = batch[i];
1003 if (done || !xfs_reclaim_inode_grab(ip))
1007 * Update the index for the next lookup. Catch
1008 * overflows into the next AG range which can
1009 * occur if we have inodes in the last block of
1010 * the AG and we are currently pointing to the
1013 * Because we may see inodes that are from the
1014 * wrong AG due to RCU freeing and
1015 * reallocation, only update the index if it
1016 * lies in this AG. It was a race that lead us
1017 * to see this inode, so another lookup from
1018 * the same index will not find it again.
1020 if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1023 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1024 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1028 /* unlock now we've grabbed the inodes. */
1031 for (i = 0; i < nr_found; i++) {
1033 xfs_reclaim_inode(batch[i], pag);
1036 *nr_to_scan -= XFS_LOOKUP_BATCH;
1038 } while (nr_found && !done && *nr_to_scan > 0);
1042 WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
1049 struct xfs_mount *mp)
1051 int nr_to_scan = INT_MAX;
1053 while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
1054 xfs_ail_push_all_sync(mp->m_ail);
1055 xfs_reclaim_inodes_ag(mp, &nr_to_scan);
1060 * The shrinker infrastructure determines how many inodes we should scan for
1061 * reclaim. We want as many clean inodes ready to reclaim as possible, so we
1062 * push the AIL here. We also want to proactively free up memory if we can to
1063 * minimise the amount of work memory reclaim has to do so we kick the
1064 * background reclaim if it isn't already scheduled.
1067 xfs_reclaim_inodes_nr(
1068 struct xfs_mount *mp,
1071 /* kick background reclaimer and push the AIL */
1072 xfs_reclaim_work_queue(mp);
1073 xfs_ail_push_all(mp->m_ail);
1075 xfs_reclaim_inodes_ag(mp, &nr_to_scan);
1080 * Return the number of reclaimable inodes in the filesystem for
1081 * the shrinker to determine how much to reclaim.
1084 xfs_reclaim_inodes_count(
1085 struct xfs_mount *mp)
1087 struct xfs_perag *pag;
1088 xfs_agnumber_t ag = 0;
1089 int reclaimable = 0;
1091 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1092 ag = pag->pag_agno + 1;
1093 reclaimable += pag->pag_ici_reclaimable;
1101 struct xfs_inode *ip,
1102 struct xfs_eofblocks *eofb)
1104 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1105 !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1108 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1109 !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1112 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1113 ip->i_projid != eofb->eof_prid)
1120 * A union-based inode filtering algorithm. Process the inode if any of the
1121 * criteria match. This is for global/internal scans only.
1124 xfs_inode_match_id_union(
1125 struct xfs_inode *ip,
1126 struct xfs_eofblocks *eofb)
1128 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1129 uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1132 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1133 gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1136 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1137 ip->i_projid == eofb->eof_prid)
1144 * Is this inode @ip eligible for eof/cow block reclamation, given some
1145 * filtering parameters @eofb? The inode is eligible if @eofb is null or
1146 * if the predicate functions match.
1149 xfs_inode_matches_eofb(
1150 struct xfs_inode *ip,
1151 struct xfs_eofblocks *eofb)
1158 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1159 match = xfs_inode_match_id_union(ip, eofb);
1161 match = xfs_inode_match_id(ip, eofb);
1165 /* skip the inode if the file size is too small */
1166 if ((eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE) &&
1167 XFS_ISIZE(ip) < eofb->eof_min_file_size)
1174 * This is a fast pass over the inode cache to try to get reclaim moving on as
1175 * many inodes as possible in a short period of time. It kicks itself every few
1176 * seconds, as well as being kicked by the inode cache shrinker when memory
1181 struct work_struct *work)
1183 struct xfs_mount *mp = container_of(to_delayed_work(work),
1184 struct xfs_mount, m_reclaim_work);
1185 int nr_to_scan = INT_MAX;
1187 xfs_reclaim_inodes_ag(mp, &nr_to_scan);
1188 xfs_reclaim_work_queue(mp);
1192 xfs_inode_free_eofblocks(
1193 struct xfs_inode *ip,
1195 unsigned int *lockflags)
1197 struct xfs_eofblocks *eofb = args;
1200 wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC);
1202 if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
1206 * If the mapping is dirty the operation can block and wait for some
1207 * time. Unless we are waiting, skip it.
1209 if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1212 if (!xfs_inode_matches_eofb(ip, eofb))
1216 * If the caller is waiting, return -EAGAIN to keep the background
1217 * scanner moving and revisit the inode in a subsequent pass.
1219 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1224 *lockflags |= XFS_IOLOCK_EXCL;
1226 if (xfs_can_free_eofblocks(ip, false))
1227 return xfs_free_eofblocks(ip);
1229 /* inode could be preallocated or append-only */
1230 trace_xfs_inode_free_eofblocks_invalid(ip);
1231 xfs_inode_clear_eofblocks_tag(ip);
1236 * Background scanning to trim preallocated space. This is queued based on the
1237 * 'speculative_prealloc_lifetime' tunable (5m by default).
1241 struct xfs_perag *pag)
1244 if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
1245 queue_delayed_work(pag->pag_mount->m_gc_workqueue,
1246 &pag->pag_blockgc_work,
1247 msecs_to_jiffies(xfs_blockgc_secs * 1000));
1252 xfs_blockgc_set_iflag(
1253 struct xfs_inode *ip,
1254 unsigned long iflag)
1256 struct xfs_mount *mp = ip->i_mount;
1257 struct xfs_perag *pag;
1260 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1263 * Don't bother locking the AG and looking up in the radix trees
1264 * if we already know that we have the tag set.
1266 if (ip->i_flags & iflag)
1268 spin_lock(&ip->i_flags_lock);
1269 ip->i_flags |= iflag;
1270 spin_unlock(&ip->i_flags_lock);
1272 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1273 spin_lock(&pag->pag_ici_lock);
1275 tagged = radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG);
1276 radix_tree_tag_set(&pag->pag_ici_root,
1277 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1278 XFS_ICI_BLOCKGC_TAG);
1280 /* propagate the blockgc tag up into the perag radix tree */
1281 spin_lock(&ip->i_mount->m_perag_lock);
1282 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1283 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1284 XFS_ICI_BLOCKGC_TAG);
1285 spin_unlock(&ip->i_mount->m_perag_lock);
1287 /* kick off background trimming */
1288 xfs_blockgc_queue(pag);
1290 trace_xfs_perag_set_blockgc(ip->i_mount, pag->pag_agno, -1,
1294 spin_unlock(&pag->pag_ici_lock);
1299 xfs_inode_set_eofblocks_tag(
1302 trace_xfs_inode_set_eofblocks_tag(ip);
1303 return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS);
1307 xfs_blockgc_clear_iflag(
1308 struct xfs_inode *ip,
1309 unsigned long iflag)
1311 struct xfs_mount *mp = ip->i_mount;
1312 struct xfs_perag *pag;
1315 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1317 spin_lock(&ip->i_flags_lock);
1318 ip->i_flags &= ~iflag;
1319 clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0;
1320 spin_unlock(&ip->i_flags_lock);
1325 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1326 spin_lock(&pag->pag_ici_lock);
1328 radix_tree_tag_clear(&pag->pag_ici_root,
1329 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1330 XFS_ICI_BLOCKGC_TAG);
1331 if (!radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG)) {
1332 /* clear the blockgc tag from the perag radix tree */
1333 spin_lock(&ip->i_mount->m_perag_lock);
1334 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1335 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1336 XFS_ICI_BLOCKGC_TAG);
1337 spin_unlock(&ip->i_mount->m_perag_lock);
1338 trace_xfs_perag_clear_blockgc(ip->i_mount, pag->pag_agno, -1,
1342 spin_unlock(&pag->pag_ici_lock);
1347 xfs_inode_clear_eofblocks_tag(
1350 trace_xfs_inode_clear_eofblocks_tag(ip);
1351 return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS);
1355 * Set ourselves up to free CoW blocks from this file. If it's already clean
1356 * then we can bail out quickly, but otherwise we must back off if the file
1357 * is undergoing some kind of write.
1360 xfs_prep_free_cowblocks(
1361 struct xfs_inode *ip)
1364 * Just clear the tag if we have an empty cow fork or none at all. It's
1365 * possible the inode was fully unshared since it was originally tagged.
1367 if (!xfs_inode_has_cow_data(ip)) {
1368 trace_xfs_inode_free_cowblocks_invalid(ip);
1369 xfs_inode_clear_cowblocks_tag(ip);
1374 * If the mapping is dirty or under writeback we cannot touch the
1375 * CoW fork. Leave it alone if we're in the midst of a directio.
1377 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1378 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1379 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1380 atomic_read(&VFS_I(ip)->i_dio_count))
1387 * Automatic CoW Reservation Freeing
1389 * These functions automatically garbage collect leftover CoW reservations
1390 * that were made on behalf of a cowextsize hint when we start to run out
1391 * of quota or when the reservations sit around for too long. If the file
1392 * has dirty pages or is undergoing writeback, its CoW reservations will
1395 * The actual garbage collection piggybacks off the same code that runs
1396 * the speculative EOF preallocation garbage collector.
1399 xfs_inode_free_cowblocks(
1400 struct xfs_inode *ip,
1402 unsigned int *lockflags)
1404 struct xfs_eofblocks *eofb = args;
1408 wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC);
1410 if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
1413 if (!xfs_prep_free_cowblocks(ip))
1416 if (!xfs_inode_matches_eofb(ip, eofb))
1420 * If the caller is waiting, return -EAGAIN to keep the background
1421 * scanner moving and revisit the inode in a subsequent pass.
1423 if (!(*lockflags & XFS_IOLOCK_EXCL) &&
1424 !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1429 *lockflags |= XFS_IOLOCK_EXCL;
1431 if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) {
1436 *lockflags |= XFS_MMAPLOCK_EXCL;
1439 * Check again, nobody else should be able to dirty blocks or change
1440 * the reflink iflag now that we have the first two locks held.
1442 if (xfs_prep_free_cowblocks(ip))
1443 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1448 xfs_inode_set_cowblocks_tag(
1451 trace_xfs_inode_set_cowblocks_tag(ip);
1452 return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS);
1456 xfs_inode_clear_cowblocks_tag(
1459 trace_xfs_inode_clear_cowblocks_tag(ip);
1460 return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
1463 #define for_each_perag_tag(mp, next_agno, pag, tag) \
1464 for ((next_agno) = 0, (pag) = xfs_perag_get_tag((mp), 0, (tag)); \
1466 (next_agno) = (pag)->pag_agno + 1, \
1467 xfs_perag_put(pag), \
1468 (pag) = xfs_perag_get_tag((mp), (next_agno), (tag)))
1471 /* Disable post-EOF and CoW block auto-reclamation. */
1474 struct xfs_mount *mp)
1476 struct xfs_perag *pag;
1477 xfs_agnumber_t agno;
1479 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1480 cancel_delayed_work_sync(&pag->pag_blockgc_work);
1483 /* Enable post-EOF and CoW block auto-reclamation. */
1486 struct xfs_mount *mp)
1488 struct xfs_perag *pag;
1489 xfs_agnumber_t agno;
1491 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1492 xfs_blockgc_queue(pag);
1496 * Decide if the given @ip is eligible to be a part of the inode walk, and
1497 * grab it if so. Returns true if it's ready to go or false if we should just
1501 xfs_inode_walk_ag_grab(
1502 struct xfs_inode *ip,
1505 struct inode *inode = VFS_I(ip);
1506 bool newinos = !!(flags & XFS_INODE_WALK_INEW_WAIT);
1508 ASSERT(rcu_read_lock_held());
1510 /* Check for stale RCU freed inode */
1511 spin_lock(&ip->i_flags_lock);
1513 goto out_unlock_noent;
1515 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
1516 if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
1517 __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
1518 goto out_unlock_noent;
1519 spin_unlock(&ip->i_flags_lock);
1521 /* nothing to sync during shutdown */
1522 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
1525 /* If we can't grab the inode, it must on it's way to reclaim. */
1529 /* inode is valid */
1533 spin_unlock(&ip->i_flags_lock);
1537 /* Scan one incore inode for block preallocations that we can remove. */
1539 xfs_blockgc_scan_inode(
1540 struct xfs_inode *ip,
1543 unsigned int lockflags = 0;
1546 error = xfs_inode_free_eofblocks(ip, args, &lockflags);
1550 error = xfs_inode_free_cowblocks(ip, args, &lockflags);
1553 xfs_iunlock(ip, lockflags);
1557 /* Background worker that trims preallocated space. */
1560 struct work_struct *work)
1562 struct xfs_perag *pag = container_of(to_delayed_work(work),
1563 struct xfs_perag, pag_blockgc_work);
1564 struct xfs_mount *mp = pag->pag_mount;
1567 if (!sb_start_write_trylock(mp->m_super))
1569 error = xfs_icwalk_ag(pag, 0, xfs_blockgc_scan_inode, NULL,
1570 XFS_ICWALK_BLOCKGC);
1572 xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
1573 pag->pag_agno, error);
1574 sb_end_write(mp->m_super);
1575 xfs_blockgc_queue(pag);
1579 * Try to free space in the filesystem by purging eofblocks and cowblocks.
1582 xfs_blockgc_free_space(
1583 struct xfs_mount *mp,
1584 struct xfs_eofblocks *eofb)
1586 trace_xfs_blockgc_free_space(mp, eofb, _RET_IP_);
1588 return xfs_icwalk(mp, 0, xfs_blockgc_scan_inode, eofb,
1589 XFS_ICWALK_BLOCKGC);
1593 * Run cow/eofblocks scans on the supplied dquots. We don't know exactly which
1594 * quota caused an allocation failure, so we make a best effort by including
1595 * each quota under low free space conditions (less than 1% free space) in the
1598 * Callers must not hold any inode's ILOCK. If requesting a synchronous scan
1599 * (XFS_EOF_FLAGS_SYNC), the caller also must not hold any inode's IOLOCK or
1603 xfs_blockgc_free_dquots(
1604 struct xfs_mount *mp,
1605 struct xfs_dquot *udqp,
1606 struct xfs_dquot *gdqp,
1607 struct xfs_dquot *pdqp,
1608 unsigned int eof_flags)
1610 struct xfs_eofblocks eofb = {0};
1611 bool do_work = false;
1613 if (!udqp && !gdqp && !pdqp)
1617 * Run a scan to free blocks using the union filter to cover all
1618 * applicable quotas in a single scan.
1620 eofb.eof_flags = XFS_EOF_FLAGS_UNION | eof_flags;
1622 if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) {
1623 eofb.eof_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
1624 eofb.eof_flags |= XFS_EOF_FLAGS_UID;
1628 if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) {
1629 eofb.eof_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
1630 eofb.eof_flags |= XFS_EOF_FLAGS_GID;
1634 if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) {
1635 eofb.eof_prid = pdqp->q_id;
1636 eofb.eof_flags |= XFS_EOF_FLAGS_PRID;
1643 return xfs_blockgc_free_space(mp, &eofb);
1646 /* Run cow/eofblocks scans on the quotas attached to the inode. */
1648 xfs_blockgc_free_quota(
1649 struct xfs_inode *ip,
1650 unsigned int eof_flags)
1652 return xfs_blockgc_free_dquots(ip->i_mount,
1653 xfs_inode_dquot(ip, XFS_DQTYPE_USER),
1654 xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
1655 xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), eof_flags);
1658 /* XFS Inode Cache Walking Code */
1661 * For a given per-AG structure @pag, grab, @execute, and rele all incore
1662 * inodes with the given radix tree @tag.
1666 struct xfs_perag *pag,
1668 int (*execute)(struct xfs_inode *ip, void *args),
1670 enum xfs_icwalk_goal goal)
1672 struct xfs_mount *mp = pag->pag_mount;
1673 uint32_t first_index;
1685 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1686 unsigned int tag = xfs_icwalk_tag(goal);
1692 if (tag == XFS_ICWALK_NULL_TAG)
1693 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
1694 (void **)batch, first_index,
1697 nr_found = radix_tree_gang_lookup_tag(
1699 (void **) batch, first_index,
1700 XFS_LOOKUP_BATCH, tag);
1708 * Grab the inodes before we drop the lock. if we found
1709 * nothing, nr == 0 and the loop will be skipped.
1711 for (i = 0; i < nr_found; i++) {
1712 struct xfs_inode *ip = batch[i];
1714 if (done || !xfs_inode_walk_ag_grab(ip, iter_flags))
1718 * Update the index for the next lookup. Catch
1719 * overflows into the next AG range which can occur if
1720 * we have inodes in the last block of the AG and we
1721 * are currently pointing to the last inode.
1723 * Because we may see inodes that are from the wrong AG
1724 * due to RCU freeing and reallocation, only update the
1725 * index if it lies in this AG. It was a race that lead
1726 * us to see this inode, so another lookup from the
1727 * same index will not find it again.
1729 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
1731 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1732 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1736 /* unlock now we've grabbed the inodes. */
1739 for (i = 0; i < nr_found; i++) {
1742 if ((iter_flags & XFS_INODE_WALK_INEW_WAIT) &&
1743 xfs_iflags_test(batch[i], XFS_INEW))
1744 xfs_inew_wait(batch[i]);
1745 error = execute(batch[i], args);
1746 xfs_irele(batch[i]);
1747 if (error == -EAGAIN) {
1751 if (error && last_error != -EFSCORRUPTED)
1755 /* bail out if the filesystem is corrupted. */
1756 if (error == -EFSCORRUPTED)
1761 } while (nr_found && !done);
1770 /* Fetch the next (possibly tagged) per-AG structure. */
1771 static inline struct xfs_perag *
1772 xfs_icwalk_get_perag(
1773 struct xfs_mount *mp,
1774 xfs_agnumber_t agno,
1775 enum xfs_icwalk_goal goal)
1777 unsigned int tag = xfs_icwalk_tag(goal);
1779 if (tag == XFS_ICWALK_NULL_TAG)
1780 return xfs_perag_get(mp, agno);
1781 return xfs_perag_get_tag(mp, agno, tag);
1785 * Call the @execute function on all incore inodes matching the radix tree
1790 struct xfs_mount *mp,
1792 int (*execute)(struct xfs_inode *ip, void *args),
1794 enum xfs_icwalk_goal goal)
1796 struct xfs_perag *pag;
1799 xfs_agnumber_t agno = 0;
1801 while ((pag = xfs_icwalk_get_perag(mp, agno, goal))) {
1802 agno = pag->pag_agno + 1;
1803 error = xfs_icwalk_ag(pag, iter_flags, execute, args, goal);
1807 if (error == -EFSCORRUPTED)
1812 BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_EOF_FLAGS_VALID);