1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_trans.h"
16 #include "xfs_trans_priv.h"
17 #include "xfs_inode_item.h"
18 #include "xfs_quota.h"
19 #include "xfs_trace.h"
20 #include "xfs_icache.h"
21 #include "xfs_bmap_util.h"
22 #include "xfs_dquot_item.h"
23 #include "xfs_dquot.h"
24 #include "xfs_reflink.h"
25 #include "xfs_ialloc.h"
27 #include <linux/iversion.h>
29 static int xfs_inode_walk(struct xfs_mount *mp, int iter_flags,
30 int (*execute)(struct xfs_inode *ip, void *args),
32 static int xfs_inode_walk_ag(struct xfs_perag *pag, int iter_flags,
33 int (*execute)(struct xfs_inode *ip, void *args),
37 * Private inode cache walk flags for struct xfs_eofblocks. Must not coincide
38 * with XFS_EOF_FLAGS_*.
40 #define XFS_ICWALK_FLAG_DROP_UDQUOT (1U << 31)
41 #define XFS_ICWALK_FLAG_DROP_GDQUOT (1U << 30)
42 #define XFS_ICWALK_FLAG_DROP_PDQUOT (1U << 29)
44 #define XFS_ICWALK_PRIVATE_FLAGS (XFS_ICWALK_FLAG_DROP_UDQUOT | \
45 XFS_ICWALK_FLAG_DROP_GDQUOT | \
46 XFS_ICWALK_FLAG_DROP_PDQUOT)
49 * Allocate and initialise an xfs_inode.
59 * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL
60 * and return NULL here on ENOMEM.
62 ip = kmem_cache_alloc(xfs_inode_zone, GFP_KERNEL | __GFP_NOFAIL);
64 if (inode_init_always(mp->m_super, VFS_I(ip))) {
65 kmem_cache_free(xfs_inode_zone, ip);
69 /* VFS doesn't initialise i_mode! */
70 VFS_I(ip)->i_mode = 0;
72 XFS_STATS_INC(mp, vn_active);
73 ASSERT(atomic_read(&ip->i_pincount) == 0);
74 ASSERT(ip->i_ino == 0);
76 /* initialise the xfs inode */
79 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
82 memset(&ip->i_df, 0, sizeof(ip->i_df));
84 ip->i_delayed_blks = 0;
85 ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
90 INIT_WORK(&ip->i_ioend_work, xfs_end_io);
91 INIT_LIST_HEAD(&ip->i_ioend_list);
92 spin_lock_init(&ip->i_ioend_lock);
98 xfs_inode_free_callback(
99 struct rcu_head *head)
101 struct inode *inode = container_of(head, struct inode, i_rcu);
102 struct xfs_inode *ip = XFS_I(inode);
104 switch (VFS_I(ip)->i_mode & S_IFMT) {
108 xfs_idestroy_fork(&ip->i_df);
113 xfs_idestroy_fork(ip->i_afp);
114 kmem_cache_free(xfs_ifork_zone, ip->i_afp);
117 xfs_idestroy_fork(ip->i_cowfp);
118 kmem_cache_free(xfs_ifork_zone, ip->i_cowfp);
121 ASSERT(!test_bit(XFS_LI_IN_AIL,
122 &ip->i_itemp->ili_item.li_flags));
123 xfs_inode_item_destroy(ip);
127 kmem_cache_free(xfs_inode_zone, ip);
132 struct xfs_inode *ip)
134 /* asserts to verify all state is correct here */
135 ASSERT(atomic_read(&ip->i_pincount) == 0);
136 ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
137 XFS_STATS_DEC(ip->i_mount, vn_active);
139 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
144 struct xfs_inode *ip)
146 ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
149 * Because we use RCU freeing we need to ensure the inode always
150 * appears to be reclaimed with an invalid inode number when in the
151 * free state. The ip->i_flags_lock provides the barrier against lookup
154 spin_lock(&ip->i_flags_lock);
155 ip->i_flags = XFS_IRECLAIM;
157 spin_unlock(&ip->i_flags_lock);
159 __xfs_inode_free(ip);
163 * Queue background inode reclaim work if there are reclaimable inodes and there
164 * isn't reclaim work already scheduled or in progress.
167 xfs_reclaim_work_queue(
168 struct xfs_mount *mp)
172 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
173 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
174 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
180 xfs_perag_set_reclaim_tag(
181 struct xfs_perag *pag)
183 struct xfs_mount *mp = pag->pag_mount;
185 lockdep_assert_held(&pag->pag_ici_lock);
186 if (pag->pag_ici_reclaimable++)
189 /* propagate the reclaim tag up into the perag radix tree */
190 spin_lock(&mp->m_perag_lock);
191 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
192 XFS_ICI_RECLAIM_TAG);
193 spin_unlock(&mp->m_perag_lock);
195 /* schedule periodic background inode reclaim */
196 xfs_reclaim_work_queue(mp);
198 trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
202 xfs_perag_clear_reclaim_tag(
203 struct xfs_perag *pag)
205 struct xfs_mount *mp = pag->pag_mount;
207 lockdep_assert_held(&pag->pag_ici_lock);
208 if (--pag->pag_ici_reclaimable)
211 /* clear the reclaim tag from the perag radix tree */
212 spin_lock(&mp->m_perag_lock);
213 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
214 XFS_ICI_RECLAIM_TAG);
215 spin_unlock(&mp->m_perag_lock);
216 trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
221 * We set the inode flag atomically with the radix tree tag.
222 * Once we get tag lookups on the radix tree, this inode flag
226 xfs_inode_set_reclaim_tag(
227 struct xfs_inode *ip)
229 struct xfs_mount *mp = ip->i_mount;
230 struct xfs_perag *pag;
232 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
233 spin_lock(&pag->pag_ici_lock);
234 spin_lock(&ip->i_flags_lock);
236 radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
237 XFS_ICI_RECLAIM_TAG);
238 xfs_perag_set_reclaim_tag(pag);
239 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
241 spin_unlock(&ip->i_flags_lock);
242 spin_unlock(&pag->pag_ici_lock);
247 xfs_inode_clear_reclaim_tag(
248 struct xfs_perag *pag,
251 radix_tree_tag_clear(&pag->pag_ici_root,
252 XFS_INO_TO_AGINO(pag->pag_mount, ino),
253 XFS_ICI_RECLAIM_TAG);
254 xfs_perag_clear_reclaim_tag(pag);
259 struct xfs_inode *ip)
261 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
262 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
265 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
266 if (!xfs_iflags_test(ip, XFS_INEW))
270 finish_wait(wq, &wait.wq_entry);
274 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
275 * part of the structure. This is made more complex by the fact we store
276 * information about the on-disk values in the VFS inode and so we can't just
277 * overwrite the values unconditionally. Hence we save the parameters we
278 * need to retain across reinitialisation, and rewrite them into the VFS inode
279 * after reinitialisation even if it fails.
283 struct xfs_mount *mp,
287 uint32_t nlink = inode->i_nlink;
288 uint32_t generation = inode->i_generation;
289 uint64_t version = inode_peek_iversion(inode);
290 umode_t mode = inode->i_mode;
291 dev_t dev = inode->i_rdev;
292 kuid_t uid = inode->i_uid;
293 kgid_t gid = inode->i_gid;
295 error = inode_init_always(mp->m_super, inode);
297 set_nlink(inode, nlink);
298 inode->i_generation = generation;
299 inode_set_iversion_queried(inode, version);
300 inode->i_mode = mode;
308 * If we are allocating a new inode, then check what was returned is
309 * actually a free, empty inode. If we are not allocating an inode,
310 * then check we didn't find a free inode.
313 * 0 if the inode free state matches the lookup context
314 * -ENOENT if the inode is free and we are not allocating
315 * -EFSCORRUPTED if there is any state mismatch at all
318 xfs_iget_check_free_state(
319 struct xfs_inode *ip,
322 if (flags & XFS_IGET_CREATE) {
323 /* should be a free inode */
324 if (VFS_I(ip)->i_mode != 0) {
325 xfs_warn(ip->i_mount,
326 "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
327 ip->i_ino, VFS_I(ip)->i_mode);
328 return -EFSCORRUPTED;
331 if (ip->i_nblocks != 0) {
332 xfs_warn(ip->i_mount,
333 "Corruption detected! Free inode 0x%llx has blocks allocated!",
335 return -EFSCORRUPTED;
340 /* should be an allocated inode */
341 if (VFS_I(ip)->i_mode == 0)
348 * Check the validity of the inode we just found it the cache
352 struct xfs_perag *pag,
353 struct xfs_inode *ip,
356 int lock_flags) __releases(RCU)
358 struct inode *inode = VFS_I(ip);
359 struct xfs_mount *mp = ip->i_mount;
363 * check for re-use of an inode within an RCU grace period due to the
364 * radix tree nodes not being updated yet. We monitor for this by
365 * setting the inode number to zero before freeing the inode structure.
366 * If the inode has been reallocated and set up, then the inode number
367 * will not match, so check for that, too.
369 spin_lock(&ip->i_flags_lock);
370 if (ip->i_ino != ino) {
371 trace_xfs_iget_skip(ip);
372 XFS_STATS_INC(mp, xs_ig_frecycle);
379 * If we are racing with another cache hit that is currently
380 * instantiating this inode or currently recycling it out of
381 * reclaimabe state, wait for the initialisation to complete
384 * XXX(hch): eventually we should do something equivalent to
385 * wait_on_inode to wait for these flags to be cleared
386 * instead of polling for it.
388 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
389 trace_xfs_iget_skip(ip);
390 XFS_STATS_INC(mp, xs_ig_frecycle);
396 * Check the inode free state is valid. This also detects lookup
397 * racing with unlinks.
399 error = xfs_iget_check_free_state(ip, flags);
404 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
405 * Need to carefully get it back into useable state.
407 if (ip->i_flags & XFS_IRECLAIMABLE) {
408 trace_xfs_iget_reclaim(ip);
410 if (flags & XFS_IGET_INCORE) {
416 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
417 * from stomping over us while we recycle the inode. We can't
418 * clear the radix tree reclaimable tag yet as it requires
419 * pag_ici_lock to be held exclusive.
421 ip->i_flags |= XFS_IRECLAIM;
423 spin_unlock(&ip->i_flags_lock);
426 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
427 error = xfs_reinit_inode(mp, inode);
431 * Re-initializing the inode failed, and we are in deep
432 * trouble. Try to re-add it to the reclaim list.
435 spin_lock(&ip->i_flags_lock);
436 wake = !!__xfs_iflags_test(ip, XFS_INEW);
437 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
439 wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
440 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
441 trace_xfs_iget_reclaim_fail(ip);
445 spin_lock(&pag->pag_ici_lock);
446 spin_lock(&ip->i_flags_lock);
449 * Clear the per-lifetime state in the inode as we are now
450 * effectively a new inode and need to return to the initial
451 * state before reuse occurs.
453 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
454 ip->i_flags |= XFS_INEW;
455 xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
456 inode->i_state = I_NEW;
460 spin_unlock(&ip->i_flags_lock);
461 spin_unlock(&pag->pag_ici_lock);
463 /* If the VFS inode is being torn down, pause and try again. */
465 trace_xfs_iget_skip(ip);
470 /* We've got a live one. */
471 spin_unlock(&ip->i_flags_lock);
473 trace_xfs_iget_hit(ip);
477 xfs_ilock(ip, lock_flags);
479 if (!(flags & XFS_IGET_INCORE))
480 xfs_iflags_clear(ip, XFS_ISTALE);
481 XFS_STATS_INC(mp, xs_ig_found);
486 spin_unlock(&ip->i_flags_lock);
494 struct xfs_mount *mp,
495 struct xfs_perag *pag,
498 struct xfs_inode **ipp,
502 struct xfs_inode *ip;
504 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
507 ip = xfs_inode_alloc(mp, ino);
511 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, flags);
516 * For version 5 superblocks, if we are initialising a new inode and we
517 * are not utilising the XFS_MOUNT_IKEEP inode cluster mode, we can
518 * simply build the new inode core with a random generation number.
520 * For version 4 (and older) superblocks, log recovery is dependent on
521 * the i_flushiter field being initialised from the current on-disk
522 * value and hence we must also read the inode off disk even when
523 * initializing new inodes.
525 if (xfs_sb_version_has_v3inode(&mp->m_sb) &&
526 (flags & XFS_IGET_CREATE) && !(mp->m_flags & XFS_MOUNT_IKEEP)) {
527 VFS_I(ip)->i_generation = prandom_u32();
531 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp);
535 error = xfs_inode_from_disk(ip,
536 xfs_buf_offset(bp, ip->i_imap.im_boffset));
538 xfs_buf_set_ref(bp, XFS_INO_REF);
539 xfs_trans_brelse(tp, bp);
545 trace_xfs_iget_miss(ip);
548 * Check the inode free state is valid. This also detects lookup
549 * racing with unlinks.
551 error = xfs_iget_check_free_state(ip, flags);
556 * Preload the radix tree so we can insert safely under the
557 * write spinlock. Note that we cannot sleep inside the preload
558 * region. Since we can be called from transaction context, don't
559 * recurse into the file system.
561 if (radix_tree_preload(GFP_NOFS)) {
567 * Because the inode hasn't been added to the radix-tree yet it can't
568 * be found by another thread, so we can do the non-sleeping lock here.
571 if (!xfs_ilock_nowait(ip, lock_flags))
576 * These values must be set before inserting the inode into the radix
577 * tree as the moment it is inserted a concurrent lookup (allowed by the
578 * RCU locking mechanism) can find it and that lookup must see that this
579 * is an inode currently under construction (i.e. that XFS_INEW is set).
580 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
581 * memory barrier that ensures this detection works correctly at lookup
585 if (flags & XFS_IGET_DONTCACHE)
586 d_mark_dontcache(VFS_I(ip));
590 xfs_iflags_set(ip, iflags);
592 /* insert the new inode */
593 spin_lock(&pag->pag_ici_lock);
594 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
595 if (unlikely(error)) {
596 WARN_ON(error != -EEXIST);
597 XFS_STATS_INC(mp, xs_ig_dup);
599 goto out_preload_end;
601 spin_unlock(&pag->pag_ici_lock);
602 radix_tree_preload_end();
608 spin_unlock(&pag->pag_ici_lock);
609 radix_tree_preload_end();
611 xfs_iunlock(ip, lock_flags);
613 __destroy_inode(VFS_I(ip));
619 * Look up an inode by number in the given file system. The inode is looked up
620 * in the cache held in each AG. If the inode is found in the cache, initialise
621 * the vfs inode if necessary.
623 * If it is not in core, read it in from the file system's device, add it to the
624 * cache and initialise the vfs inode.
626 * The inode is locked according to the value of the lock_flags parameter.
627 * Inode lookup is only done during metadata operations and not as part of the
628 * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup.
632 struct xfs_mount *mp,
633 struct xfs_trans *tp,
637 struct xfs_inode **ipp)
639 struct xfs_inode *ip;
640 struct xfs_perag *pag;
644 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
646 /* reject inode numbers outside existing AGs */
647 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
650 XFS_STATS_INC(mp, xs_ig_attempts);
652 /* get the perag structure and ensure that it's inode capable */
653 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
654 agino = XFS_INO_TO_AGINO(mp, ino);
659 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
662 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
664 goto out_error_or_again;
667 if (flags & XFS_IGET_INCORE) {
669 goto out_error_or_again;
671 XFS_STATS_INC(mp, xs_ig_missed);
673 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
676 goto out_error_or_again;
683 * If we have a real type for an on-disk inode, we can setup the inode
684 * now. If it's a new inode being created, xfs_ialloc will handle it.
686 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
687 xfs_setup_existing_inode(ip);
691 if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
700 * "Is this a cached inode that's also allocated?"
702 * Look up an inode by number in the given file system. If the inode is
703 * in cache and isn't in purgatory, return 1 if the inode is allocated
704 * and 0 if it is not. For all other cases (not in cache, being torn
705 * down, etc.), return a negative error code.
707 * The caller has to prevent inode allocation and freeing activity,
708 * presumably by locking the AGI buffer. This is to ensure that an
709 * inode cannot transition from allocated to freed until the caller is
710 * ready to allow that. If the inode is in an intermediate state (new,
711 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
712 * inode is not in the cache, -ENOENT will be returned. The caller must
713 * deal with these scenarios appropriately.
715 * This is a specialized use case for the online scrubber; if you're
716 * reading this, you probably want xfs_iget.
719 xfs_icache_inode_is_allocated(
720 struct xfs_mount *mp,
721 struct xfs_trans *tp,
725 struct xfs_inode *ip;
728 error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
732 *inuse = !!(VFS_I(ip)->i_mode);
738 * The inode lookup is done in batches to keep the amount of lock traffic and
739 * radix tree lookups to a minimum. The batch size is a trade off between
740 * lookup reduction and stack usage. This is in the reclaim path, so we can't
743 * XXX: This will be moved closer to xfs_inode_walk* once we get rid of the
744 * separate reclaim walk functions.
746 #define XFS_LOOKUP_BATCH 32
748 #ifdef CONFIG_XFS_QUOTA
749 /* Drop this inode's dquots. */
752 struct xfs_inode *ip,
755 struct xfs_eofblocks *eofb = priv;
757 xfs_ilock(ip, XFS_ILOCK_EXCL);
758 if (eofb->eof_flags & XFS_ICWALK_FLAG_DROP_UDQUOT) {
759 xfs_qm_dqrele(ip->i_udquot);
762 if (eofb->eof_flags & XFS_ICWALK_FLAG_DROP_GDQUOT) {
763 xfs_qm_dqrele(ip->i_gdquot);
766 if (eofb->eof_flags & XFS_ICWALK_FLAG_DROP_PDQUOT) {
767 xfs_qm_dqrele(ip->i_pdquot);
770 xfs_iunlock(ip, XFS_ILOCK_EXCL);
775 * Detach all dquots from incore inodes if we can. The caller must already
776 * have dropped the relevant XFS_[UGP]QUOTA_ACTIVE flags so that dquots will
777 * not get reattached.
780 xfs_dqrele_all_inodes(
781 struct xfs_mount *mp,
784 struct xfs_eofblocks eofb = { .eof_flags = 0 };
786 if (qflags & XFS_UQUOTA_ACCT)
787 eofb.eof_flags |= XFS_ICWALK_FLAG_DROP_UDQUOT;
788 if (qflags & XFS_GQUOTA_ACCT)
789 eofb.eof_flags |= XFS_ICWALK_FLAG_DROP_GDQUOT;
790 if (qflags & XFS_PQUOTA_ACCT)
791 eofb.eof_flags |= XFS_ICWALK_FLAG_DROP_PDQUOT;
793 return xfs_inode_walk(mp, XFS_INODE_WALK_INEW_WAIT, xfs_dqrele_inode,
794 &eofb, XFS_ICI_NO_TAG);
796 #endif /* CONFIG_XFS_QUOTA */
799 * Grab the inode for reclaim exclusively.
801 * We have found this inode via a lookup under RCU, so the inode may have
802 * already been freed, or it may be in the process of being recycled by
803 * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode
804 * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE
805 * will not be set. Hence we need to check for both these flag conditions to
806 * avoid inodes that are no longer reclaim candidates.
808 * Note: checking for other state flags here, under the i_flags_lock or not, is
809 * racy and should be avoided. Those races should be resolved only after we have
810 * ensured that we are able to reclaim this inode and the world can see that we
811 * are going to reclaim it.
813 * Return true if we grabbed it, false otherwise.
816 xfs_reclaim_inode_grab(
817 struct xfs_inode *ip)
819 ASSERT(rcu_read_lock_held());
821 spin_lock(&ip->i_flags_lock);
822 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
823 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
824 /* not a reclaim candidate. */
825 spin_unlock(&ip->i_flags_lock);
828 __xfs_iflags_set(ip, XFS_IRECLAIM);
829 spin_unlock(&ip->i_flags_lock);
834 * Inode reclaim is non-blocking, so the default action if progress cannot be
835 * made is to "requeue" the inode for reclaim by unlocking it and clearing the
836 * XFS_IRECLAIM flag. If we are in a shutdown state, we don't care about
837 * blocking anymore and hence we can wait for the inode to be able to reclaim
840 * We do no IO here - if callers require inodes to be cleaned they must push the
841 * AIL first to trigger writeback of dirty inodes. This enables writeback to be
842 * done in the background in a non-blocking manner, and enables memory reclaim
843 * to make progress without blocking.
847 struct xfs_inode *ip,
848 struct xfs_perag *pag)
850 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
852 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
854 if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
857 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
859 xfs_iflush_abort(ip);
862 if (xfs_ipincount(ip))
863 goto out_clear_flush;
864 if (!xfs_inode_clean(ip))
865 goto out_clear_flush;
867 xfs_iflags_clear(ip, XFS_IFLUSHING);
871 * Because we use RCU freeing we need to ensure the inode always appears
872 * to be reclaimed with an invalid inode number when in the free state.
873 * We do this as early as possible under the ILOCK so that
874 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
875 * detect races with us here. By doing this, we guarantee that once
876 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
877 * it will see either a valid inode that will serialise correctly, or it
878 * will see an invalid inode that it can skip.
880 spin_lock(&ip->i_flags_lock);
881 ip->i_flags = XFS_IRECLAIM;
883 spin_unlock(&ip->i_flags_lock);
885 xfs_iunlock(ip, XFS_ILOCK_EXCL);
887 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
889 * Remove the inode from the per-AG radix tree.
891 * Because radix_tree_delete won't complain even if the item was never
892 * added to the tree assert that it's been there before to catch
893 * problems with the inode life time early on.
895 spin_lock(&pag->pag_ici_lock);
896 if (!radix_tree_delete(&pag->pag_ici_root,
897 XFS_INO_TO_AGINO(ip->i_mount, ino)))
899 xfs_perag_clear_reclaim_tag(pag);
900 spin_unlock(&pag->pag_ici_lock);
903 * Here we do an (almost) spurious inode lock in order to coordinate
904 * with inode cache radix tree lookups. This is because the lookup
905 * can reference the inodes in the cache without taking references.
907 * We make that OK here by ensuring that we wait until the inode is
908 * unlocked after the lookup before we go ahead and free it.
910 xfs_ilock(ip, XFS_ILOCK_EXCL);
911 ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot);
912 xfs_iunlock(ip, XFS_ILOCK_EXCL);
913 ASSERT(xfs_inode_clean(ip));
915 __xfs_inode_free(ip);
919 xfs_iflags_clear(ip, XFS_IFLUSHING);
921 xfs_iunlock(ip, XFS_ILOCK_EXCL);
923 xfs_iflags_clear(ip, XFS_IRECLAIM);
927 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
928 * corrupted, we still want to try to reclaim all the inodes. If we don't,
929 * then a shut down during filesystem unmount reclaim walk leak all the
930 * unreclaimed inodes.
932 * Returns non-zero if any AGs or inodes were skipped in the reclaim pass
933 * so that callers that want to block until all dirty inodes are written back
934 * and reclaimed can sanely loop.
937 xfs_reclaim_inodes_ag(
938 struct xfs_mount *mp,
941 struct xfs_perag *pag;
942 xfs_agnumber_t ag = 0;
944 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
945 unsigned long first_index = 0;
949 ag = pag->pag_agno + 1;
951 first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
953 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
957 nr_found = radix_tree_gang_lookup_tag(
959 (void **)batch, first_index,
961 XFS_ICI_RECLAIM_TAG);
969 * Grab the inodes before we drop the lock. if we found
970 * nothing, nr == 0 and the loop will be skipped.
972 for (i = 0; i < nr_found; i++) {
973 struct xfs_inode *ip = batch[i];
975 if (done || !xfs_reclaim_inode_grab(ip))
979 * Update the index for the next lookup. Catch
980 * overflows into the next AG range which can
981 * occur if we have inodes in the last block of
982 * the AG and we are currently pointing to the
985 * Because we may see inodes that are from the
986 * wrong AG due to RCU freeing and
987 * reallocation, only update the index if it
988 * lies in this AG. It was a race that lead us
989 * to see this inode, so another lookup from
990 * the same index will not find it again.
992 if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
995 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
996 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1000 /* unlock now we've grabbed the inodes. */
1003 for (i = 0; i < nr_found; i++) {
1005 xfs_reclaim_inode(batch[i], pag);
1008 *nr_to_scan -= XFS_LOOKUP_BATCH;
1010 } while (nr_found && !done && *nr_to_scan > 0);
1014 WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
1021 struct xfs_mount *mp)
1023 int nr_to_scan = INT_MAX;
1025 while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
1026 xfs_ail_push_all_sync(mp->m_ail);
1027 xfs_reclaim_inodes_ag(mp, &nr_to_scan);
1032 * The shrinker infrastructure determines how many inodes we should scan for
1033 * reclaim. We want as many clean inodes ready to reclaim as possible, so we
1034 * push the AIL here. We also want to proactively free up memory if we can to
1035 * minimise the amount of work memory reclaim has to do so we kick the
1036 * background reclaim if it isn't already scheduled.
1039 xfs_reclaim_inodes_nr(
1040 struct xfs_mount *mp,
1043 /* kick background reclaimer and push the AIL */
1044 xfs_reclaim_work_queue(mp);
1045 xfs_ail_push_all(mp->m_ail);
1047 xfs_reclaim_inodes_ag(mp, &nr_to_scan);
1052 * Return the number of reclaimable inodes in the filesystem for
1053 * the shrinker to determine how much to reclaim.
1056 xfs_reclaim_inodes_count(
1057 struct xfs_mount *mp)
1059 struct xfs_perag *pag;
1060 xfs_agnumber_t ag = 0;
1061 int reclaimable = 0;
1063 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1064 ag = pag->pag_agno + 1;
1065 reclaimable += pag->pag_ici_reclaimable;
1073 struct xfs_inode *ip,
1074 struct xfs_eofblocks *eofb)
1076 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1077 !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1080 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1081 !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1084 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1085 ip->i_projid != eofb->eof_prid)
1092 * A union-based inode filtering algorithm. Process the inode if any of the
1093 * criteria match. This is for global/internal scans only.
1096 xfs_inode_match_id_union(
1097 struct xfs_inode *ip,
1098 struct xfs_eofblocks *eofb)
1100 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1101 uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1104 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1105 gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1108 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1109 ip->i_projid == eofb->eof_prid)
1116 * Is this inode @ip eligible for eof/cow block reclamation, given some
1117 * filtering parameters @eofb? The inode is eligible if @eofb is null or
1118 * if the predicate functions match.
1121 xfs_inode_matches_eofb(
1122 struct xfs_inode *ip,
1123 struct xfs_eofblocks *eofb)
1130 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1131 match = xfs_inode_match_id_union(ip, eofb);
1133 match = xfs_inode_match_id(ip, eofb);
1137 /* skip the inode if the file size is too small */
1138 if ((eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE) &&
1139 XFS_ISIZE(ip) < eofb->eof_min_file_size)
1146 * This is a fast pass over the inode cache to try to get reclaim moving on as
1147 * many inodes as possible in a short period of time. It kicks itself every few
1148 * seconds, as well as being kicked by the inode cache shrinker when memory
1153 struct work_struct *work)
1155 struct xfs_mount *mp = container_of(to_delayed_work(work),
1156 struct xfs_mount, m_reclaim_work);
1157 int nr_to_scan = INT_MAX;
1159 xfs_reclaim_inodes_ag(mp, &nr_to_scan);
1160 xfs_reclaim_work_queue(mp);
1164 xfs_inode_free_eofblocks(
1165 struct xfs_inode *ip,
1167 unsigned int *lockflags)
1169 struct xfs_eofblocks *eofb = args;
1172 wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC);
1174 if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
1178 * If the mapping is dirty the operation can block and wait for some
1179 * time. Unless we are waiting, skip it.
1181 if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1184 if (!xfs_inode_matches_eofb(ip, eofb))
1188 * If the caller is waiting, return -EAGAIN to keep the background
1189 * scanner moving and revisit the inode in a subsequent pass.
1191 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1196 *lockflags |= XFS_IOLOCK_EXCL;
1198 if (xfs_can_free_eofblocks(ip, false))
1199 return xfs_free_eofblocks(ip);
1201 /* inode could be preallocated or append-only */
1202 trace_xfs_inode_free_eofblocks_invalid(ip);
1203 xfs_inode_clear_eofblocks_tag(ip);
1208 * Background scanning to trim preallocated space. This is queued based on the
1209 * 'speculative_prealloc_lifetime' tunable (5m by default).
1213 struct xfs_perag *pag)
1216 if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
1217 queue_delayed_work(pag->pag_mount->m_gc_workqueue,
1218 &pag->pag_blockgc_work,
1219 msecs_to_jiffies(xfs_blockgc_secs * 1000));
1224 xfs_blockgc_set_iflag(
1225 struct xfs_inode *ip,
1226 unsigned long iflag)
1228 struct xfs_mount *mp = ip->i_mount;
1229 struct xfs_perag *pag;
1232 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1235 * Don't bother locking the AG and looking up in the radix trees
1236 * if we already know that we have the tag set.
1238 if (ip->i_flags & iflag)
1240 spin_lock(&ip->i_flags_lock);
1241 ip->i_flags |= iflag;
1242 spin_unlock(&ip->i_flags_lock);
1244 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1245 spin_lock(&pag->pag_ici_lock);
1247 tagged = radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG);
1248 radix_tree_tag_set(&pag->pag_ici_root,
1249 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1250 XFS_ICI_BLOCKGC_TAG);
1252 /* propagate the blockgc tag up into the perag radix tree */
1253 spin_lock(&ip->i_mount->m_perag_lock);
1254 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1255 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1256 XFS_ICI_BLOCKGC_TAG);
1257 spin_unlock(&ip->i_mount->m_perag_lock);
1259 /* kick off background trimming */
1260 xfs_blockgc_queue(pag);
1262 trace_xfs_perag_set_blockgc(ip->i_mount, pag->pag_agno, -1,
1266 spin_unlock(&pag->pag_ici_lock);
1271 xfs_inode_set_eofblocks_tag(
1274 trace_xfs_inode_set_eofblocks_tag(ip);
1275 return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS);
1279 xfs_blockgc_clear_iflag(
1280 struct xfs_inode *ip,
1281 unsigned long iflag)
1283 struct xfs_mount *mp = ip->i_mount;
1284 struct xfs_perag *pag;
1287 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1289 spin_lock(&ip->i_flags_lock);
1290 ip->i_flags &= ~iflag;
1291 clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0;
1292 spin_unlock(&ip->i_flags_lock);
1297 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1298 spin_lock(&pag->pag_ici_lock);
1300 radix_tree_tag_clear(&pag->pag_ici_root,
1301 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1302 XFS_ICI_BLOCKGC_TAG);
1303 if (!radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG)) {
1304 /* clear the blockgc tag from the perag radix tree */
1305 spin_lock(&ip->i_mount->m_perag_lock);
1306 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1307 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1308 XFS_ICI_BLOCKGC_TAG);
1309 spin_unlock(&ip->i_mount->m_perag_lock);
1310 trace_xfs_perag_clear_blockgc(ip->i_mount, pag->pag_agno, -1,
1314 spin_unlock(&pag->pag_ici_lock);
1319 xfs_inode_clear_eofblocks_tag(
1322 trace_xfs_inode_clear_eofblocks_tag(ip);
1323 return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS);
1327 * Set ourselves up to free CoW blocks from this file. If it's already clean
1328 * then we can bail out quickly, but otherwise we must back off if the file
1329 * is undergoing some kind of write.
1332 xfs_prep_free_cowblocks(
1333 struct xfs_inode *ip)
1336 * Just clear the tag if we have an empty cow fork or none at all. It's
1337 * possible the inode was fully unshared since it was originally tagged.
1339 if (!xfs_inode_has_cow_data(ip)) {
1340 trace_xfs_inode_free_cowblocks_invalid(ip);
1341 xfs_inode_clear_cowblocks_tag(ip);
1346 * If the mapping is dirty or under writeback we cannot touch the
1347 * CoW fork. Leave it alone if we're in the midst of a directio.
1349 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1350 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1351 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1352 atomic_read(&VFS_I(ip)->i_dio_count))
1359 * Automatic CoW Reservation Freeing
1361 * These functions automatically garbage collect leftover CoW reservations
1362 * that were made on behalf of a cowextsize hint when we start to run out
1363 * of quota or when the reservations sit around for too long. If the file
1364 * has dirty pages or is undergoing writeback, its CoW reservations will
1367 * The actual garbage collection piggybacks off the same code that runs
1368 * the speculative EOF preallocation garbage collector.
1371 xfs_inode_free_cowblocks(
1372 struct xfs_inode *ip,
1374 unsigned int *lockflags)
1376 struct xfs_eofblocks *eofb = args;
1380 wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC);
1382 if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
1385 if (!xfs_prep_free_cowblocks(ip))
1388 if (!xfs_inode_matches_eofb(ip, eofb))
1392 * If the caller is waiting, return -EAGAIN to keep the background
1393 * scanner moving and revisit the inode in a subsequent pass.
1395 if (!(*lockflags & XFS_IOLOCK_EXCL) &&
1396 !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1401 *lockflags |= XFS_IOLOCK_EXCL;
1403 if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) {
1408 *lockflags |= XFS_MMAPLOCK_EXCL;
1411 * Check again, nobody else should be able to dirty blocks or change
1412 * the reflink iflag now that we have the first two locks held.
1414 if (xfs_prep_free_cowblocks(ip))
1415 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1420 xfs_inode_set_cowblocks_tag(
1423 trace_xfs_inode_set_cowblocks_tag(ip);
1424 return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS);
1428 xfs_inode_clear_cowblocks_tag(
1431 trace_xfs_inode_clear_cowblocks_tag(ip);
1432 return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
1435 #define for_each_perag_tag(mp, next_agno, pag, tag) \
1436 for ((next_agno) = 0, (pag) = xfs_perag_get_tag((mp), 0, (tag)); \
1438 (next_agno) = (pag)->pag_agno + 1, \
1439 xfs_perag_put(pag), \
1440 (pag) = xfs_perag_get_tag((mp), (next_agno), (tag)))
1443 /* Disable post-EOF and CoW block auto-reclamation. */
1446 struct xfs_mount *mp)
1448 struct xfs_perag *pag;
1449 xfs_agnumber_t agno;
1451 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1452 cancel_delayed_work_sync(&pag->pag_blockgc_work);
1455 /* Enable post-EOF and CoW block auto-reclamation. */
1458 struct xfs_mount *mp)
1460 struct xfs_perag *pag;
1461 xfs_agnumber_t agno;
1463 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1464 xfs_blockgc_queue(pag);
1468 * Decide if the given @ip is eligible to be a part of the inode walk, and
1469 * grab it if so. Returns true if it's ready to go or false if we should just
1473 xfs_inode_walk_ag_grab(
1474 struct xfs_inode *ip,
1477 struct inode *inode = VFS_I(ip);
1478 bool newinos = !!(flags & XFS_INODE_WALK_INEW_WAIT);
1480 ASSERT(rcu_read_lock_held());
1482 /* Check for stale RCU freed inode */
1483 spin_lock(&ip->i_flags_lock);
1485 goto out_unlock_noent;
1487 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
1488 if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
1489 __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
1490 goto out_unlock_noent;
1491 spin_unlock(&ip->i_flags_lock);
1493 /* nothing to sync during shutdown */
1494 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
1497 /* If we can't grab the inode, it must on it's way to reclaim. */
1501 /* inode is valid */
1505 spin_unlock(&ip->i_flags_lock);
1509 /* Scan one incore inode for block preallocations that we can remove. */
1511 xfs_blockgc_scan_inode(
1512 struct xfs_inode *ip,
1515 unsigned int lockflags = 0;
1518 error = xfs_inode_free_eofblocks(ip, args, &lockflags);
1522 error = xfs_inode_free_cowblocks(ip, args, &lockflags);
1525 xfs_iunlock(ip, lockflags);
1529 /* Background worker that trims preallocated space. */
1532 struct work_struct *work)
1534 struct xfs_perag *pag = container_of(to_delayed_work(work),
1535 struct xfs_perag, pag_blockgc_work);
1536 struct xfs_mount *mp = pag->pag_mount;
1539 if (!sb_start_write_trylock(mp->m_super))
1541 error = xfs_inode_walk_ag(pag, 0, xfs_blockgc_scan_inode, NULL,
1542 XFS_ICI_BLOCKGC_TAG);
1544 xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
1545 pag->pag_agno, error);
1546 sb_end_write(mp->m_super);
1547 xfs_blockgc_queue(pag);
1551 * Try to free space in the filesystem by purging eofblocks and cowblocks.
1554 xfs_blockgc_free_space(
1555 struct xfs_mount *mp,
1556 struct xfs_eofblocks *eofb)
1558 trace_xfs_blockgc_free_space(mp, eofb, _RET_IP_);
1560 return xfs_inode_walk(mp, 0, xfs_blockgc_scan_inode, eofb,
1561 XFS_ICI_BLOCKGC_TAG);
1565 * Run cow/eofblocks scans on the supplied dquots. We don't know exactly which
1566 * quota caused an allocation failure, so we make a best effort by including
1567 * each quota under low free space conditions (less than 1% free space) in the
1570 * Callers must not hold any inode's ILOCK. If requesting a synchronous scan
1571 * (XFS_EOF_FLAGS_SYNC), the caller also must not hold any inode's IOLOCK or
1575 xfs_blockgc_free_dquots(
1576 struct xfs_mount *mp,
1577 struct xfs_dquot *udqp,
1578 struct xfs_dquot *gdqp,
1579 struct xfs_dquot *pdqp,
1580 unsigned int eof_flags)
1582 struct xfs_eofblocks eofb = {0};
1583 bool do_work = false;
1585 if (!udqp && !gdqp && !pdqp)
1589 * Run a scan to free blocks using the union filter to cover all
1590 * applicable quotas in a single scan.
1592 eofb.eof_flags = XFS_EOF_FLAGS_UNION | eof_flags;
1594 if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) {
1595 eofb.eof_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
1596 eofb.eof_flags |= XFS_EOF_FLAGS_UID;
1600 if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) {
1601 eofb.eof_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
1602 eofb.eof_flags |= XFS_EOF_FLAGS_GID;
1606 if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) {
1607 eofb.eof_prid = pdqp->q_id;
1608 eofb.eof_flags |= XFS_EOF_FLAGS_PRID;
1615 return xfs_blockgc_free_space(mp, &eofb);
1618 /* Run cow/eofblocks scans on the quotas attached to the inode. */
1620 xfs_blockgc_free_quota(
1621 struct xfs_inode *ip,
1622 unsigned int eof_flags)
1624 return xfs_blockgc_free_dquots(ip->i_mount,
1625 xfs_inode_dquot(ip, XFS_DQTYPE_USER),
1626 xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
1627 xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), eof_flags);
1630 /* XFS Inode Cache Walking Code */
1633 * For a given per-AG structure @pag, grab, @execute, and rele all incore
1634 * inodes with the given radix tree @tag.
1638 struct xfs_perag *pag,
1640 int (*execute)(struct xfs_inode *ip, void *args),
1644 struct xfs_mount *mp = pag->pag_mount;
1645 uint32_t first_index;
1657 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1663 if (tag == XFS_ICI_NO_TAG)
1664 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
1665 (void **)batch, first_index,
1668 nr_found = radix_tree_gang_lookup_tag(
1670 (void **) batch, first_index,
1671 XFS_LOOKUP_BATCH, tag);
1679 * Grab the inodes before we drop the lock. if we found
1680 * nothing, nr == 0 and the loop will be skipped.
1682 for (i = 0; i < nr_found; i++) {
1683 struct xfs_inode *ip = batch[i];
1685 if (done || !xfs_inode_walk_ag_grab(ip, iter_flags))
1689 * Update the index for the next lookup. Catch
1690 * overflows into the next AG range which can occur if
1691 * we have inodes in the last block of the AG and we
1692 * are currently pointing to the last inode.
1694 * Because we may see inodes that are from the wrong AG
1695 * due to RCU freeing and reallocation, only update the
1696 * index if it lies in this AG. It was a race that lead
1697 * us to see this inode, so another lookup from the
1698 * same index will not find it again.
1700 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
1702 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1703 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1707 /* unlock now we've grabbed the inodes. */
1710 for (i = 0; i < nr_found; i++) {
1713 if ((iter_flags & XFS_INODE_WALK_INEW_WAIT) &&
1714 xfs_iflags_test(batch[i], XFS_INEW))
1715 xfs_inew_wait(batch[i]);
1716 error = execute(batch[i], args);
1717 xfs_irele(batch[i]);
1718 if (error == -EAGAIN) {
1722 if (error && last_error != -EFSCORRUPTED)
1726 /* bail out if the filesystem is corrupted. */
1727 if (error == -EFSCORRUPTED)
1732 } while (nr_found && !done);
1741 /* Fetch the next (possibly tagged) per-AG structure. */
1742 static inline struct xfs_perag *
1743 xfs_inode_walk_get_perag(
1744 struct xfs_mount *mp,
1745 xfs_agnumber_t agno,
1748 if (tag == XFS_ICI_NO_TAG)
1749 return xfs_perag_get(mp, agno);
1750 return xfs_perag_get_tag(mp, agno, tag);
1754 * Call the @execute function on all incore inodes matching the radix tree
1759 struct xfs_mount *mp,
1761 int (*execute)(struct xfs_inode *ip, void *args),
1765 struct xfs_perag *pag;
1768 xfs_agnumber_t agno = 0;
1770 while ((pag = xfs_inode_walk_get_perag(mp, agno, tag))) {
1771 agno = pag->pag_agno + 1;
1772 error = xfs_inode_walk_ag(pag, iter_flags, execute, args, tag);
1776 if (error == -EFSCORRUPTED)
1781 BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_EOF_FLAGS_VALID);