1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_trans.h"
16 #include "xfs_trans_priv.h"
17 #include "xfs_inode_item.h"
18 #include "xfs_quota.h"
19 #include "xfs_trace.h"
20 #include "xfs_icache.h"
21 #include "xfs_bmap_util.h"
22 #include "xfs_dquot_item.h"
23 #include "xfs_dquot.h"
24 #include "xfs_reflink.h"
25 #include "xfs_ialloc.h"
27 #include <linux/iversion.h>
29 /* Radix tree tags for incore inode tree. */
31 /* inode is to be reclaimed */
32 #define XFS_ICI_RECLAIM_TAG 0
33 /* Inode has speculative preallocations (posteof or cow) to clean. */
34 #define XFS_ICI_BLOCKGC_TAG 1
37 * The goal for walking incore inodes. These can correspond with incore inode
38 * radix tree tags when convenient. Avoid existing XFS_IWALK namespace.
40 enum xfs_icwalk_goal {
41 /* Goals that are not related to tags; these must be < 0. */
42 XFS_ICWALK_DQRELE = -1,
44 /* Goals directly associated with tagged inodes. */
45 XFS_ICWALK_BLOCKGC = XFS_ICI_BLOCKGC_TAG,
48 #define XFS_ICWALK_NULL_TAG (-1U)
50 /* Compute the inode radix tree tag for this goal. */
51 static inline unsigned int
52 xfs_icwalk_tag(enum xfs_icwalk_goal goal)
54 return goal < 0 ? XFS_ICWALK_NULL_TAG : goal;
57 static int xfs_icwalk(struct xfs_mount *mp,
58 enum xfs_icwalk_goal goal, void *args);
59 static int xfs_icwalk_ag(struct xfs_perag *pag,
60 enum xfs_icwalk_goal goal, void *args);
63 * Private inode cache walk flags for struct xfs_eofblocks. Must not coincide
64 * with XFS_EOF_FLAGS_*.
66 #define XFS_ICWALK_FLAG_DROP_UDQUOT (1U << 31)
67 #define XFS_ICWALK_FLAG_DROP_GDQUOT (1U << 30)
68 #define XFS_ICWALK_FLAG_DROP_PDQUOT (1U << 29)
70 #define XFS_ICWALK_PRIVATE_FLAGS (XFS_ICWALK_FLAG_DROP_UDQUOT | \
71 XFS_ICWALK_FLAG_DROP_GDQUOT | \
72 XFS_ICWALK_FLAG_DROP_PDQUOT)
75 * Allocate and initialise an xfs_inode.
85 * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL
86 * and return NULL here on ENOMEM.
88 ip = kmem_cache_alloc(xfs_inode_zone, GFP_KERNEL | __GFP_NOFAIL);
90 if (inode_init_always(mp->m_super, VFS_I(ip))) {
91 kmem_cache_free(xfs_inode_zone, ip);
95 /* VFS doesn't initialise i_mode! */
96 VFS_I(ip)->i_mode = 0;
98 XFS_STATS_INC(mp, vn_active);
99 ASSERT(atomic_read(&ip->i_pincount) == 0);
100 ASSERT(ip->i_ino == 0);
102 /* initialise the xfs inode */
105 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
108 memset(&ip->i_df, 0, sizeof(ip->i_df));
110 ip->i_delayed_blks = 0;
111 ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
116 INIT_WORK(&ip->i_ioend_work, xfs_end_io);
117 INIT_LIST_HEAD(&ip->i_ioend_list);
118 spin_lock_init(&ip->i_ioend_lock);
124 xfs_inode_free_callback(
125 struct rcu_head *head)
127 struct inode *inode = container_of(head, struct inode, i_rcu);
128 struct xfs_inode *ip = XFS_I(inode);
130 switch (VFS_I(ip)->i_mode & S_IFMT) {
134 xfs_idestroy_fork(&ip->i_df);
139 xfs_idestroy_fork(ip->i_afp);
140 kmem_cache_free(xfs_ifork_zone, ip->i_afp);
143 xfs_idestroy_fork(ip->i_cowfp);
144 kmem_cache_free(xfs_ifork_zone, ip->i_cowfp);
147 ASSERT(!test_bit(XFS_LI_IN_AIL,
148 &ip->i_itemp->ili_item.li_flags));
149 xfs_inode_item_destroy(ip);
153 kmem_cache_free(xfs_inode_zone, ip);
158 struct xfs_inode *ip)
160 /* asserts to verify all state is correct here */
161 ASSERT(atomic_read(&ip->i_pincount) == 0);
162 ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
163 XFS_STATS_DEC(ip->i_mount, vn_active);
165 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
170 struct xfs_inode *ip)
172 ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
175 * Because we use RCU freeing we need to ensure the inode always
176 * appears to be reclaimed with an invalid inode number when in the
177 * free state. The ip->i_flags_lock provides the barrier against lookup
180 spin_lock(&ip->i_flags_lock);
181 ip->i_flags = XFS_IRECLAIM;
183 spin_unlock(&ip->i_flags_lock);
185 __xfs_inode_free(ip);
189 * Queue background inode reclaim work if there are reclaimable inodes and there
190 * isn't reclaim work already scheduled or in progress.
193 xfs_reclaim_work_queue(
194 struct xfs_mount *mp)
198 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
199 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
200 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
206 xfs_perag_set_reclaim_tag(
207 struct xfs_perag *pag)
209 struct xfs_mount *mp = pag->pag_mount;
211 lockdep_assert_held(&pag->pag_ici_lock);
212 if (pag->pag_ici_reclaimable++)
215 /* propagate the reclaim tag up into the perag radix tree */
216 spin_lock(&mp->m_perag_lock);
217 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
218 XFS_ICI_RECLAIM_TAG);
219 spin_unlock(&mp->m_perag_lock);
221 /* schedule periodic background inode reclaim */
222 xfs_reclaim_work_queue(mp);
224 trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
228 xfs_perag_clear_reclaim_tag(
229 struct xfs_perag *pag)
231 struct xfs_mount *mp = pag->pag_mount;
233 lockdep_assert_held(&pag->pag_ici_lock);
234 if (--pag->pag_ici_reclaimable)
237 /* clear the reclaim tag from the perag radix tree */
238 spin_lock(&mp->m_perag_lock);
239 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
240 XFS_ICI_RECLAIM_TAG);
241 spin_unlock(&mp->m_perag_lock);
242 trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
247 * We set the inode flag atomically with the radix tree tag.
248 * Once we get tag lookups on the radix tree, this inode flag
252 xfs_inode_set_reclaim_tag(
253 struct xfs_inode *ip)
255 struct xfs_mount *mp = ip->i_mount;
256 struct xfs_perag *pag;
258 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
259 spin_lock(&pag->pag_ici_lock);
260 spin_lock(&ip->i_flags_lock);
262 radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
263 XFS_ICI_RECLAIM_TAG);
264 xfs_perag_set_reclaim_tag(pag);
265 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
267 spin_unlock(&ip->i_flags_lock);
268 spin_unlock(&pag->pag_ici_lock);
273 xfs_inode_clear_reclaim_tag(
274 struct xfs_perag *pag,
277 radix_tree_tag_clear(&pag->pag_ici_root,
278 XFS_INO_TO_AGINO(pag->pag_mount, ino),
279 XFS_ICI_RECLAIM_TAG);
280 xfs_perag_clear_reclaim_tag(pag);
285 struct xfs_inode *ip)
287 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
288 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
291 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
292 if (!xfs_iflags_test(ip, XFS_INEW))
296 finish_wait(wq, &wait.wq_entry);
300 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
301 * part of the structure. This is made more complex by the fact we store
302 * information about the on-disk values in the VFS inode and so we can't just
303 * overwrite the values unconditionally. Hence we save the parameters we
304 * need to retain across reinitialisation, and rewrite them into the VFS inode
305 * after reinitialisation even if it fails.
309 struct xfs_mount *mp,
313 uint32_t nlink = inode->i_nlink;
314 uint32_t generation = inode->i_generation;
315 uint64_t version = inode_peek_iversion(inode);
316 umode_t mode = inode->i_mode;
317 dev_t dev = inode->i_rdev;
318 kuid_t uid = inode->i_uid;
319 kgid_t gid = inode->i_gid;
321 error = inode_init_always(mp->m_super, inode);
323 set_nlink(inode, nlink);
324 inode->i_generation = generation;
325 inode_set_iversion_queried(inode, version);
326 inode->i_mode = mode;
334 * If we are allocating a new inode, then check what was returned is
335 * actually a free, empty inode. If we are not allocating an inode,
336 * then check we didn't find a free inode.
339 * 0 if the inode free state matches the lookup context
340 * -ENOENT if the inode is free and we are not allocating
341 * -EFSCORRUPTED if there is any state mismatch at all
344 xfs_iget_check_free_state(
345 struct xfs_inode *ip,
348 if (flags & XFS_IGET_CREATE) {
349 /* should be a free inode */
350 if (VFS_I(ip)->i_mode != 0) {
351 xfs_warn(ip->i_mount,
352 "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
353 ip->i_ino, VFS_I(ip)->i_mode);
354 return -EFSCORRUPTED;
357 if (ip->i_nblocks != 0) {
358 xfs_warn(ip->i_mount,
359 "Corruption detected! Free inode 0x%llx has blocks allocated!",
361 return -EFSCORRUPTED;
366 /* should be an allocated inode */
367 if (VFS_I(ip)->i_mode == 0)
374 * Check the validity of the inode we just found it the cache
378 struct xfs_perag *pag,
379 struct xfs_inode *ip,
382 int lock_flags) __releases(RCU)
384 struct inode *inode = VFS_I(ip);
385 struct xfs_mount *mp = ip->i_mount;
389 * check for re-use of an inode within an RCU grace period due to the
390 * radix tree nodes not being updated yet. We monitor for this by
391 * setting the inode number to zero before freeing the inode structure.
392 * If the inode has been reallocated and set up, then the inode number
393 * will not match, so check for that, too.
395 spin_lock(&ip->i_flags_lock);
396 if (ip->i_ino != ino) {
397 trace_xfs_iget_skip(ip);
398 XFS_STATS_INC(mp, xs_ig_frecycle);
405 * If we are racing with another cache hit that is currently
406 * instantiating this inode or currently recycling it out of
407 * reclaimabe state, wait for the initialisation to complete
410 * XXX(hch): eventually we should do something equivalent to
411 * wait_on_inode to wait for these flags to be cleared
412 * instead of polling for it.
414 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
415 trace_xfs_iget_skip(ip);
416 XFS_STATS_INC(mp, xs_ig_frecycle);
422 * Check the inode free state is valid. This also detects lookup
423 * racing with unlinks.
425 error = xfs_iget_check_free_state(ip, flags);
430 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
431 * Need to carefully get it back into useable state.
433 if (ip->i_flags & XFS_IRECLAIMABLE) {
434 trace_xfs_iget_reclaim(ip);
436 if (flags & XFS_IGET_INCORE) {
442 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
443 * from stomping over us while we recycle the inode. We can't
444 * clear the radix tree reclaimable tag yet as it requires
445 * pag_ici_lock to be held exclusive.
447 ip->i_flags |= XFS_IRECLAIM;
449 spin_unlock(&ip->i_flags_lock);
452 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
453 error = xfs_reinit_inode(mp, inode);
457 * Re-initializing the inode failed, and we are in deep
458 * trouble. Try to re-add it to the reclaim list.
461 spin_lock(&ip->i_flags_lock);
462 wake = !!__xfs_iflags_test(ip, XFS_INEW);
463 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
465 wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
466 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
467 trace_xfs_iget_reclaim_fail(ip);
471 spin_lock(&pag->pag_ici_lock);
472 spin_lock(&ip->i_flags_lock);
475 * Clear the per-lifetime state in the inode as we are now
476 * effectively a new inode and need to return to the initial
477 * state before reuse occurs.
479 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
480 ip->i_flags |= XFS_INEW;
481 xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
482 inode->i_state = I_NEW;
486 spin_unlock(&ip->i_flags_lock);
487 spin_unlock(&pag->pag_ici_lock);
489 /* If the VFS inode is being torn down, pause and try again. */
491 trace_xfs_iget_skip(ip);
496 /* We've got a live one. */
497 spin_unlock(&ip->i_flags_lock);
499 trace_xfs_iget_hit(ip);
503 xfs_ilock(ip, lock_flags);
505 if (!(flags & XFS_IGET_INCORE))
506 xfs_iflags_clear(ip, XFS_ISTALE);
507 XFS_STATS_INC(mp, xs_ig_found);
512 spin_unlock(&ip->i_flags_lock);
520 struct xfs_mount *mp,
521 struct xfs_perag *pag,
524 struct xfs_inode **ipp,
528 struct xfs_inode *ip;
530 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
533 ip = xfs_inode_alloc(mp, ino);
537 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, flags);
542 * For version 5 superblocks, if we are initialising a new inode and we
543 * are not utilising the XFS_MOUNT_IKEEP inode cluster mode, we can
544 * simply build the new inode core with a random generation number.
546 * For version 4 (and older) superblocks, log recovery is dependent on
547 * the i_flushiter field being initialised from the current on-disk
548 * value and hence we must also read the inode off disk even when
549 * initializing new inodes.
551 if (xfs_sb_version_has_v3inode(&mp->m_sb) &&
552 (flags & XFS_IGET_CREATE) && !(mp->m_flags & XFS_MOUNT_IKEEP)) {
553 VFS_I(ip)->i_generation = prandom_u32();
557 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp);
561 error = xfs_inode_from_disk(ip,
562 xfs_buf_offset(bp, ip->i_imap.im_boffset));
564 xfs_buf_set_ref(bp, XFS_INO_REF);
565 xfs_trans_brelse(tp, bp);
571 trace_xfs_iget_miss(ip);
574 * Check the inode free state is valid. This also detects lookup
575 * racing with unlinks.
577 error = xfs_iget_check_free_state(ip, flags);
582 * Preload the radix tree so we can insert safely under the
583 * write spinlock. Note that we cannot sleep inside the preload
584 * region. Since we can be called from transaction context, don't
585 * recurse into the file system.
587 if (radix_tree_preload(GFP_NOFS)) {
593 * Because the inode hasn't been added to the radix-tree yet it can't
594 * be found by another thread, so we can do the non-sleeping lock here.
597 if (!xfs_ilock_nowait(ip, lock_flags))
602 * These values must be set before inserting the inode into the radix
603 * tree as the moment it is inserted a concurrent lookup (allowed by the
604 * RCU locking mechanism) can find it and that lookup must see that this
605 * is an inode currently under construction (i.e. that XFS_INEW is set).
606 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
607 * memory barrier that ensures this detection works correctly at lookup
611 if (flags & XFS_IGET_DONTCACHE)
612 d_mark_dontcache(VFS_I(ip));
616 xfs_iflags_set(ip, iflags);
618 /* insert the new inode */
619 spin_lock(&pag->pag_ici_lock);
620 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
621 if (unlikely(error)) {
622 WARN_ON(error != -EEXIST);
623 XFS_STATS_INC(mp, xs_ig_dup);
625 goto out_preload_end;
627 spin_unlock(&pag->pag_ici_lock);
628 radix_tree_preload_end();
634 spin_unlock(&pag->pag_ici_lock);
635 radix_tree_preload_end();
637 xfs_iunlock(ip, lock_flags);
639 __destroy_inode(VFS_I(ip));
645 * Look up an inode by number in the given file system. The inode is looked up
646 * in the cache held in each AG. If the inode is found in the cache, initialise
647 * the vfs inode if necessary.
649 * If it is not in core, read it in from the file system's device, add it to the
650 * cache and initialise the vfs inode.
652 * The inode is locked according to the value of the lock_flags parameter.
653 * Inode lookup is only done during metadata operations and not as part of the
654 * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup.
658 struct xfs_mount *mp,
659 struct xfs_trans *tp,
663 struct xfs_inode **ipp)
665 struct xfs_inode *ip;
666 struct xfs_perag *pag;
670 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
672 /* reject inode numbers outside existing AGs */
673 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
676 XFS_STATS_INC(mp, xs_ig_attempts);
678 /* get the perag structure and ensure that it's inode capable */
679 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
680 agino = XFS_INO_TO_AGINO(mp, ino);
685 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
688 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
690 goto out_error_or_again;
693 if (flags & XFS_IGET_INCORE) {
695 goto out_error_or_again;
697 XFS_STATS_INC(mp, xs_ig_missed);
699 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
702 goto out_error_or_again;
709 * If we have a real type for an on-disk inode, we can setup the inode
710 * now. If it's a new inode being created, xfs_ialloc will handle it.
712 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
713 xfs_setup_existing_inode(ip);
717 if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
726 * "Is this a cached inode that's also allocated?"
728 * Look up an inode by number in the given file system. If the inode is
729 * in cache and isn't in purgatory, return 1 if the inode is allocated
730 * and 0 if it is not. For all other cases (not in cache, being torn
731 * down, etc.), return a negative error code.
733 * The caller has to prevent inode allocation and freeing activity,
734 * presumably by locking the AGI buffer. This is to ensure that an
735 * inode cannot transition from allocated to freed until the caller is
736 * ready to allow that. If the inode is in an intermediate state (new,
737 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
738 * inode is not in the cache, -ENOENT will be returned. The caller must
739 * deal with these scenarios appropriately.
741 * This is a specialized use case for the online scrubber; if you're
742 * reading this, you probably want xfs_iget.
745 xfs_icache_inode_is_allocated(
746 struct xfs_mount *mp,
747 struct xfs_trans *tp,
751 struct xfs_inode *ip;
754 error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
758 *inuse = !!(VFS_I(ip)->i_mode);
764 * The inode lookup is done in batches to keep the amount of lock traffic and
765 * radix tree lookups to a minimum. The batch size is a trade off between
766 * lookup reduction and stack usage. This is in the reclaim path, so we can't
769 * XXX: This will be moved closer to xfs_icwalk* once we get rid of the
770 * separate reclaim walk functions.
772 #define XFS_LOOKUP_BATCH 32
774 #ifdef CONFIG_XFS_QUOTA
775 /* Decide if we want to grab this inode to drop its dquots. */
778 struct xfs_inode *ip)
782 ASSERT(rcu_read_lock_held());
784 /* Check for stale RCU freed inode */
785 spin_lock(&ip->i_flags_lock);
790 * Skip inodes that are anywhere in the reclaim machinery because we
791 * drop dquots before tagging an inode for reclamation.
793 if (ip->i_flags & (XFS_IRECLAIM | XFS_IRECLAIMABLE))
797 * The inode looks alive; try to grab a VFS reference so that it won't
798 * get destroyed. If we got the reference, return true to say that
799 * we grabbed the inode.
801 * If we can't get the reference, then we know the inode had its VFS
802 * state torn down and hasn't yet entered the reclaim machinery. Since
803 * we also know that dquots are detached from an inode before it enters
804 * reclaim, we can skip the inode.
806 ret = igrab(VFS_I(ip)) != NULL;
809 spin_unlock(&ip->i_flags_lock);
813 /* Drop this inode's dquots. */
816 struct xfs_inode *ip,
819 struct xfs_eofblocks *eofb = priv;
821 if (xfs_iflags_test(ip, XFS_INEW))
824 xfs_ilock(ip, XFS_ILOCK_EXCL);
825 if (eofb->eof_flags & XFS_ICWALK_FLAG_DROP_UDQUOT) {
826 xfs_qm_dqrele(ip->i_udquot);
829 if (eofb->eof_flags & XFS_ICWALK_FLAG_DROP_GDQUOT) {
830 xfs_qm_dqrele(ip->i_gdquot);
833 if (eofb->eof_flags & XFS_ICWALK_FLAG_DROP_PDQUOT) {
834 xfs_qm_dqrele(ip->i_pdquot);
837 xfs_iunlock(ip, XFS_ILOCK_EXCL);
842 * Detach all dquots from incore inodes if we can. The caller must already
843 * have dropped the relevant XFS_[UGP]QUOTA_ACTIVE flags so that dquots will
844 * not get reattached.
847 xfs_dqrele_all_inodes(
848 struct xfs_mount *mp,
851 struct xfs_eofblocks eofb = { .eof_flags = 0 };
853 if (qflags & XFS_UQUOTA_ACCT)
854 eofb.eof_flags |= XFS_ICWALK_FLAG_DROP_UDQUOT;
855 if (qflags & XFS_GQUOTA_ACCT)
856 eofb.eof_flags |= XFS_ICWALK_FLAG_DROP_GDQUOT;
857 if (qflags & XFS_PQUOTA_ACCT)
858 eofb.eof_flags |= XFS_ICWALK_FLAG_DROP_PDQUOT;
860 return xfs_icwalk(mp, XFS_ICWALK_DQRELE, &eofb);
863 # define xfs_dqrele_igrab(ip) (false)
864 # define xfs_dqrele_inode(ip, priv) (0)
865 #endif /* CONFIG_XFS_QUOTA */
868 * Grab the inode for reclaim exclusively.
870 * We have found this inode via a lookup under RCU, so the inode may have
871 * already been freed, or it may be in the process of being recycled by
872 * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode
873 * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE
874 * will not be set. Hence we need to check for both these flag conditions to
875 * avoid inodes that are no longer reclaim candidates.
877 * Note: checking for other state flags here, under the i_flags_lock or not, is
878 * racy and should be avoided. Those races should be resolved only after we have
879 * ensured that we are able to reclaim this inode and the world can see that we
880 * are going to reclaim it.
882 * Return true if we grabbed it, false otherwise.
885 xfs_reclaim_inode_grab(
886 struct xfs_inode *ip)
888 ASSERT(rcu_read_lock_held());
890 spin_lock(&ip->i_flags_lock);
891 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
892 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
893 /* not a reclaim candidate. */
894 spin_unlock(&ip->i_flags_lock);
897 __xfs_iflags_set(ip, XFS_IRECLAIM);
898 spin_unlock(&ip->i_flags_lock);
903 * Inode reclaim is non-blocking, so the default action if progress cannot be
904 * made is to "requeue" the inode for reclaim by unlocking it and clearing the
905 * XFS_IRECLAIM flag. If we are in a shutdown state, we don't care about
906 * blocking anymore and hence we can wait for the inode to be able to reclaim
909 * We do no IO here - if callers require inodes to be cleaned they must push the
910 * AIL first to trigger writeback of dirty inodes. This enables writeback to be
911 * done in the background in a non-blocking manner, and enables memory reclaim
912 * to make progress without blocking.
916 struct xfs_inode *ip,
917 struct xfs_perag *pag)
919 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
921 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
923 if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
926 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
928 xfs_iflush_abort(ip);
931 if (xfs_ipincount(ip))
932 goto out_clear_flush;
933 if (!xfs_inode_clean(ip))
934 goto out_clear_flush;
936 xfs_iflags_clear(ip, XFS_IFLUSHING);
940 * Because we use RCU freeing we need to ensure the inode always appears
941 * to be reclaimed with an invalid inode number when in the free state.
942 * We do this as early as possible under the ILOCK so that
943 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
944 * detect races with us here. By doing this, we guarantee that once
945 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
946 * it will see either a valid inode that will serialise correctly, or it
947 * will see an invalid inode that it can skip.
949 spin_lock(&ip->i_flags_lock);
950 ip->i_flags = XFS_IRECLAIM;
952 spin_unlock(&ip->i_flags_lock);
954 xfs_iunlock(ip, XFS_ILOCK_EXCL);
956 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
958 * Remove the inode from the per-AG radix tree.
960 * Because radix_tree_delete won't complain even if the item was never
961 * added to the tree assert that it's been there before to catch
962 * problems with the inode life time early on.
964 spin_lock(&pag->pag_ici_lock);
965 if (!radix_tree_delete(&pag->pag_ici_root,
966 XFS_INO_TO_AGINO(ip->i_mount, ino)))
968 xfs_perag_clear_reclaim_tag(pag);
969 spin_unlock(&pag->pag_ici_lock);
972 * Here we do an (almost) spurious inode lock in order to coordinate
973 * with inode cache radix tree lookups. This is because the lookup
974 * can reference the inodes in the cache without taking references.
976 * We make that OK here by ensuring that we wait until the inode is
977 * unlocked after the lookup before we go ahead and free it.
979 xfs_ilock(ip, XFS_ILOCK_EXCL);
980 ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot);
981 xfs_iunlock(ip, XFS_ILOCK_EXCL);
982 ASSERT(xfs_inode_clean(ip));
984 __xfs_inode_free(ip);
988 xfs_iflags_clear(ip, XFS_IFLUSHING);
990 xfs_iunlock(ip, XFS_ILOCK_EXCL);
992 xfs_iflags_clear(ip, XFS_IRECLAIM);
996 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
997 * corrupted, we still want to try to reclaim all the inodes. If we don't,
998 * then a shut down during filesystem unmount reclaim walk leak all the
999 * unreclaimed inodes.
1001 * Returns non-zero if any AGs or inodes were skipped in the reclaim pass
1002 * so that callers that want to block until all dirty inodes are written back
1003 * and reclaimed can sanely loop.
1006 xfs_reclaim_inodes_ag(
1007 struct xfs_mount *mp,
1010 struct xfs_perag *pag;
1011 xfs_agnumber_t ag = 0;
1013 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1014 unsigned long first_index = 0;
1018 ag = pag->pag_agno + 1;
1020 first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
1022 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1026 nr_found = radix_tree_gang_lookup_tag(
1028 (void **)batch, first_index,
1030 XFS_ICI_RECLAIM_TAG);
1038 * Grab the inodes before we drop the lock. if we found
1039 * nothing, nr == 0 and the loop will be skipped.
1041 for (i = 0; i < nr_found; i++) {
1042 struct xfs_inode *ip = batch[i];
1044 if (done || !xfs_reclaim_inode_grab(ip))
1048 * Update the index for the next lookup. Catch
1049 * overflows into the next AG range which can
1050 * occur if we have inodes in the last block of
1051 * the AG and we are currently pointing to the
1054 * Because we may see inodes that are from the
1055 * wrong AG due to RCU freeing and
1056 * reallocation, only update the index if it
1057 * lies in this AG. It was a race that lead us
1058 * to see this inode, so another lookup from
1059 * the same index will not find it again.
1061 if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1064 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1065 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1069 /* unlock now we've grabbed the inodes. */
1072 for (i = 0; i < nr_found; i++) {
1074 xfs_reclaim_inode(batch[i], pag);
1077 *nr_to_scan -= XFS_LOOKUP_BATCH;
1079 } while (nr_found && !done && *nr_to_scan > 0);
1083 WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
1090 struct xfs_mount *mp)
1092 int nr_to_scan = INT_MAX;
1094 while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
1095 xfs_ail_push_all_sync(mp->m_ail);
1096 xfs_reclaim_inodes_ag(mp, &nr_to_scan);
1101 * The shrinker infrastructure determines how many inodes we should scan for
1102 * reclaim. We want as many clean inodes ready to reclaim as possible, so we
1103 * push the AIL here. We also want to proactively free up memory if we can to
1104 * minimise the amount of work memory reclaim has to do so we kick the
1105 * background reclaim if it isn't already scheduled.
1108 xfs_reclaim_inodes_nr(
1109 struct xfs_mount *mp,
1112 /* kick background reclaimer and push the AIL */
1113 xfs_reclaim_work_queue(mp);
1114 xfs_ail_push_all(mp->m_ail);
1116 xfs_reclaim_inodes_ag(mp, &nr_to_scan);
1121 * Return the number of reclaimable inodes in the filesystem for
1122 * the shrinker to determine how much to reclaim.
1125 xfs_reclaim_inodes_count(
1126 struct xfs_mount *mp)
1128 struct xfs_perag *pag;
1129 xfs_agnumber_t ag = 0;
1130 int reclaimable = 0;
1132 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1133 ag = pag->pag_agno + 1;
1134 reclaimable += pag->pag_ici_reclaimable;
1142 struct xfs_inode *ip,
1143 struct xfs_eofblocks *eofb)
1145 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1146 !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1149 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1150 !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1153 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1154 ip->i_projid != eofb->eof_prid)
1161 * A union-based inode filtering algorithm. Process the inode if any of the
1162 * criteria match. This is for global/internal scans only.
1165 xfs_inode_match_id_union(
1166 struct xfs_inode *ip,
1167 struct xfs_eofblocks *eofb)
1169 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1170 uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1173 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1174 gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1177 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1178 ip->i_projid == eofb->eof_prid)
1185 * Is this inode @ip eligible for eof/cow block reclamation, given some
1186 * filtering parameters @eofb? The inode is eligible if @eofb is null or
1187 * if the predicate functions match.
1190 xfs_inode_matches_eofb(
1191 struct xfs_inode *ip,
1192 struct xfs_eofblocks *eofb)
1199 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1200 match = xfs_inode_match_id_union(ip, eofb);
1202 match = xfs_inode_match_id(ip, eofb);
1206 /* skip the inode if the file size is too small */
1207 if ((eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE) &&
1208 XFS_ISIZE(ip) < eofb->eof_min_file_size)
1215 * This is a fast pass over the inode cache to try to get reclaim moving on as
1216 * many inodes as possible in a short period of time. It kicks itself every few
1217 * seconds, as well as being kicked by the inode cache shrinker when memory
1222 struct work_struct *work)
1224 struct xfs_mount *mp = container_of(to_delayed_work(work),
1225 struct xfs_mount, m_reclaim_work);
1226 int nr_to_scan = INT_MAX;
1228 xfs_reclaim_inodes_ag(mp, &nr_to_scan);
1229 xfs_reclaim_work_queue(mp);
1233 xfs_inode_free_eofblocks(
1234 struct xfs_inode *ip,
1236 unsigned int *lockflags)
1238 struct xfs_eofblocks *eofb = args;
1241 wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC);
1243 if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
1247 * If the mapping is dirty the operation can block and wait for some
1248 * time. Unless we are waiting, skip it.
1250 if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1253 if (!xfs_inode_matches_eofb(ip, eofb))
1257 * If the caller is waiting, return -EAGAIN to keep the background
1258 * scanner moving and revisit the inode in a subsequent pass.
1260 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1265 *lockflags |= XFS_IOLOCK_EXCL;
1267 if (xfs_can_free_eofblocks(ip, false))
1268 return xfs_free_eofblocks(ip);
1270 /* inode could be preallocated or append-only */
1271 trace_xfs_inode_free_eofblocks_invalid(ip);
1272 xfs_inode_clear_eofblocks_tag(ip);
1277 * Background scanning to trim preallocated space. This is queued based on the
1278 * 'speculative_prealloc_lifetime' tunable (5m by default).
1282 struct xfs_perag *pag)
1285 if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
1286 queue_delayed_work(pag->pag_mount->m_gc_workqueue,
1287 &pag->pag_blockgc_work,
1288 msecs_to_jiffies(xfs_blockgc_secs * 1000));
1293 xfs_blockgc_set_iflag(
1294 struct xfs_inode *ip,
1295 unsigned long iflag)
1297 struct xfs_mount *mp = ip->i_mount;
1298 struct xfs_perag *pag;
1301 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1304 * Don't bother locking the AG and looking up in the radix trees
1305 * if we already know that we have the tag set.
1307 if (ip->i_flags & iflag)
1309 spin_lock(&ip->i_flags_lock);
1310 ip->i_flags |= iflag;
1311 spin_unlock(&ip->i_flags_lock);
1313 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1314 spin_lock(&pag->pag_ici_lock);
1316 tagged = radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG);
1317 radix_tree_tag_set(&pag->pag_ici_root,
1318 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1319 XFS_ICI_BLOCKGC_TAG);
1321 /* propagate the blockgc tag up into the perag radix tree */
1322 spin_lock(&ip->i_mount->m_perag_lock);
1323 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1324 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1325 XFS_ICI_BLOCKGC_TAG);
1326 spin_unlock(&ip->i_mount->m_perag_lock);
1328 /* kick off background trimming */
1329 xfs_blockgc_queue(pag);
1331 trace_xfs_perag_set_blockgc(ip->i_mount, pag->pag_agno, -1,
1335 spin_unlock(&pag->pag_ici_lock);
1340 xfs_inode_set_eofblocks_tag(
1343 trace_xfs_inode_set_eofblocks_tag(ip);
1344 return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS);
1348 xfs_blockgc_clear_iflag(
1349 struct xfs_inode *ip,
1350 unsigned long iflag)
1352 struct xfs_mount *mp = ip->i_mount;
1353 struct xfs_perag *pag;
1356 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1358 spin_lock(&ip->i_flags_lock);
1359 ip->i_flags &= ~iflag;
1360 clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0;
1361 spin_unlock(&ip->i_flags_lock);
1366 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1367 spin_lock(&pag->pag_ici_lock);
1369 radix_tree_tag_clear(&pag->pag_ici_root,
1370 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1371 XFS_ICI_BLOCKGC_TAG);
1372 if (!radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG)) {
1373 /* clear the blockgc tag from the perag radix tree */
1374 spin_lock(&ip->i_mount->m_perag_lock);
1375 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1376 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1377 XFS_ICI_BLOCKGC_TAG);
1378 spin_unlock(&ip->i_mount->m_perag_lock);
1379 trace_xfs_perag_clear_blockgc(ip->i_mount, pag->pag_agno, -1,
1383 spin_unlock(&pag->pag_ici_lock);
1388 xfs_inode_clear_eofblocks_tag(
1391 trace_xfs_inode_clear_eofblocks_tag(ip);
1392 return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS);
1396 * Set ourselves up to free CoW blocks from this file. If it's already clean
1397 * then we can bail out quickly, but otherwise we must back off if the file
1398 * is undergoing some kind of write.
1401 xfs_prep_free_cowblocks(
1402 struct xfs_inode *ip)
1405 * Just clear the tag if we have an empty cow fork or none at all. It's
1406 * possible the inode was fully unshared since it was originally tagged.
1408 if (!xfs_inode_has_cow_data(ip)) {
1409 trace_xfs_inode_free_cowblocks_invalid(ip);
1410 xfs_inode_clear_cowblocks_tag(ip);
1415 * If the mapping is dirty or under writeback we cannot touch the
1416 * CoW fork. Leave it alone if we're in the midst of a directio.
1418 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1419 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1420 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1421 atomic_read(&VFS_I(ip)->i_dio_count))
1428 * Automatic CoW Reservation Freeing
1430 * These functions automatically garbage collect leftover CoW reservations
1431 * that were made on behalf of a cowextsize hint when we start to run out
1432 * of quota or when the reservations sit around for too long. If the file
1433 * has dirty pages or is undergoing writeback, its CoW reservations will
1436 * The actual garbage collection piggybacks off the same code that runs
1437 * the speculative EOF preallocation garbage collector.
1440 xfs_inode_free_cowblocks(
1441 struct xfs_inode *ip,
1443 unsigned int *lockflags)
1445 struct xfs_eofblocks *eofb = args;
1449 wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC);
1451 if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
1454 if (!xfs_prep_free_cowblocks(ip))
1457 if (!xfs_inode_matches_eofb(ip, eofb))
1461 * If the caller is waiting, return -EAGAIN to keep the background
1462 * scanner moving and revisit the inode in a subsequent pass.
1464 if (!(*lockflags & XFS_IOLOCK_EXCL) &&
1465 !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1470 *lockflags |= XFS_IOLOCK_EXCL;
1472 if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) {
1477 *lockflags |= XFS_MMAPLOCK_EXCL;
1480 * Check again, nobody else should be able to dirty blocks or change
1481 * the reflink iflag now that we have the first two locks held.
1483 if (xfs_prep_free_cowblocks(ip))
1484 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1489 xfs_inode_set_cowblocks_tag(
1492 trace_xfs_inode_set_cowblocks_tag(ip);
1493 return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS);
1497 xfs_inode_clear_cowblocks_tag(
1500 trace_xfs_inode_clear_cowblocks_tag(ip);
1501 return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
1504 #define for_each_perag_tag(mp, next_agno, pag, tag) \
1505 for ((next_agno) = 0, (pag) = xfs_perag_get_tag((mp), 0, (tag)); \
1507 (next_agno) = (pag)->pag_agno + 1, \
1508 xfs_perag_put(pag), \
1509 (pag) = xfs_perag_get_tag((mp), (next_agno), (tag)))
1512 /* Disable post-EOF and CoW block auto-reclamation. */
1515 struct xfs_mount *mp)
1517 struct xfs_perag *pag;
1518 xfs_agnumber_t agno;
1520 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1521 cancel_delayed_work_sync(&pag->pag_blockgc_work);
1524 /* Enable post-EOF and CoW block auto-reclamation. */
1527 struct xfs_mount *mp)
1529 struct xfs_perag *pag;
1530 xfs_agnumber_t agno;
1532 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1533 xfs_blockgc_queue(pag);
1536 /* Don't try to run block gc on an inode that's in any of these states. */
1537 #define XFS_BLOCKGC_NOGRAB_IFLAGS (XFS_INEW | \
1538 XFS_IRECLAIMABLE | \
1541 * Decide if the given @ip is eligible for garbage collection of speculative
1542 * preallocations, and grab it if so. Returns true if it's ready to go or
1543 * false if we should just ignore it.
1547 struct xfs_inode *ip)
1549 struct inode *inode = VFS_I(ip);
1551 ASSERT(rcu_read_lock_held());
1553 /* Check for stale RCU freed inode */
1554 spin_lock(&ip->i_flags_lock);
1556 goto out_unlock_noent;
1558 if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS)
1559 goto out_unlock_noent;
1560 spin_unlock(&ip->i_flags_lock);
1562 /* nothing to sync during shutdown */
1563 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
1566 /* If we can't grab the inode, it must on it's way to reclaim. */
1570 /* inode is valid */
1574 spin_unlock(&ip->i_flags_lock);
1578 /* Scan one incore inode for block preallocations that we can remove. */
1580 xfs_blockgc_scan_inode(
1581 struct xfs_inode *ip,
1584 unsigned int lockflags = 0;
1587 error = xfs_inode_free_eofblocks(ip, args, &lockflags);
1591 error = xfs_inode_free_cowblocks(ip, args, &lockflags);
1594 xfs_iunlock(ip, lockflags);
1598 /* Background worker that trims preallocated space. */
1601 struct work_struct *work)
1603 struct xfs_perag *pag = container_of(to_delayed_work(work),
1604 struct xfs_perag, pag_blockgc_work);
1605 struct xfs_mount *mp = pag->pag_mount;
1608 if (!sb_start_write_trylock(mp->m_super))
1610 error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL);
1612 xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
1613 pag->pag_agno, error);
1614 sb_end_write(mp->m_super);
1615 xfs_blockgc_queue(pag);
1619 * Try to free space in the filesystem by purging eofblocks and cowblocks.
1622 xfs_blockgc_free_space(
1623 struct xfs_mount *mp,
1624 struct xfs_eofblocks *eofb)
1626 trace_xfs_blockgc_free_space(mp, eofb, _RET_IP_);
1628 return xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, eofb);
1632 * Run cow/eofblocks scans on the supplied dquots. We don't know exactly which
1633 * quota caused an allocation failure, so we make a best effort by including
1634 * each quota under low free space conditions (less than 1% free space) in the
1637 * Callers must not hold any inode's ILOCK. If requesting a synchronous scan
1638 * (XFS_EOF_FLAGS_SYNC), the caller also must not hold any inode's IOLOCK or
1642 xfs_blockgc_free_dquots(
1643 struct xfs_mount *mp,
1644 struct xfs_dquot *udqp,
1645 struct xfs_dquot *gdqp,
1646 struct xfs_dquot *pdqp,
1647 unsigned int eof_flags)
1649 struct xfs_eofblocks eofb = {0};
1650 bool do_work = false;
1652 if (!udqp && !gdqp && !pdqp)
1656 * Run a scan to free blocks using the union filter to cover all
1657 * applicable quotas in a single scan.
1659 eofb.eof_flags = XFS_EOF_FLAGS_UNION | eof_flags;
1661 if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) {
1662 eofb.eof_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
1663 eofb.eof_flags |= XFS_EOF_FLAGS_UID;
1667 if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) {
1668 eofb.eof_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
1669 eofb.eof_flags |= XFS_EOF_FLAGS_GID;
1673 if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) {
1674 eofb.eof_prid = pdqp->q_id;
1675 eofb.eof_flags |= XFS_EOF_FLAGS_PRID;
1682 return xfs_blockgc_free_space(mp, &eofb);
1685 /* Run cow/eofblocks scans on the quotas attached to the inode. */
1687 xfs_blockgc_free_quota(
1688 struct xfs_inode *ip,
1689 unsigned int eof_flags)
1691 return xfs_blockgc_free_dquots(ip->i_mount,
1692 xfs_inode_dquot(ip, XFS_DQTYPE_USER),
1693 xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
1694 xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), eof_flags);
1697 /* XFS Inode Cache Walking Code */
1700 * Decide if we want to grab this inode in anticipation of doing work towards
1701 * the goal. If selected, the VFS must hold a reference to this inode, which
1702 * will be released after processing.
1706 enum xfs_icwalk_goal goal,
1707 struct xfs_inode *ip)
1710 case XFS_ICWALK_DQRELE:
1711 return xfs_dqrele_igrab(ip);
1712 case XFS_ICWALK_BLOCKGC:
1713 return xfs_blockgc_igrab(ip);
1719 /* Process an inode and release it. Return -EAGAIN to skip an inode. */
1721 xfs_icwalk_process_inode(
1722 enum xfs_icwalk_goal goal,
1723 struct xfs_inode *ip,
1729 case XFS_ICWALK_DQRELE:
1730 error = xfs_dqrele_inode(ip, args);
1732 case XFS_ICWALK_BLOCKGC:
1733 error = xfs_blockgc_scan_inode(ip, args);
1741 * For a given per-AG structure @pag and a goal, grab qualifying inodes and
1742 * process them in some manner.
1746 struct xfs_perag *pag,
1747 enum xfs_icwalk_goal goal,
1750 struct xfs_mount *mp = pag->pag_mount;
1751 uint32_t first_index;
1763 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1764 unsigned int tag = xfs_icwalk_tag(goal);
1770 if (tag == XFS_ICWALK_NULL_TAG)
1771 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
1772 (void **)batch, first_index,
1775 nr_found = radix_tree_gang_lookup_tag(
1777 (void **) batch, first_index,
1778 XFS_LOOKUP_BATCH, tag);
1786 * Grab the inodes before we drop the lock. if we found
1787 * nothing, nr == 0 and the loop will be skipped.
1789 for (i = 0; i < nr_found; i++) {
1790 struct xfs_inode *ip = batch[i];
1792 if (done || !xfs_icwalk_igrab(goal, ip))
1796 * Update the index for the next lookup. Catch
1797 * overflows into the next AG range which can occur if
1798 * we have inodes in the last block of the AG and we
1799 * are currently pointing to the last inode.
1801 * Because we may see inodes that are from the wrong AG
1802 * due to RCU freeing and reallocation, only update the
1803 * index if it lies in this AG. It was a race that lead
1804 * us to see this inode, so another lookup from the
1805 * same index will not find it again.
1807 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
1809 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1810 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1814 /* unlock now we've grabbed the inodes. */
1817 for (i = 0; i < nr_found; i++) {
1820 error = xfs_icwalk_process_inode(goal, batch[i], args);
1821 if (error == -EAGAIN) {
1825 if (error && last_error != -EFSCORRUPTED)
1829 /* bail out if the filesystem is corrupted. */
1830 if (error == -EFSCORRUPTED)
1835 } while (nr_found && !done);
1844 /* Fetch the next (possibly tagged) per-AG structure. */
1845 static inline struct xfs_perag *
1846 xfs_icwalk_get_perag(
1847 struct xfs_mount *mp,
1848 xfs_agnumber_t agno,
1849 enum xfs_icwalk_goal goal)
1851 unsigned int tag = xfs_icwalk_tag(goal);
1853 if (tag == XFS_ICWALK_NULL_TAG)
1854 return xfs_perag_get(mp, agno);
1855 return xfs_perag_get_tag(mp, agno, tag);
1858 /* Walk all incore inodes to achieve a given goal. */
1861 struct xfs_mount *mp,
1862 enum xfs_icwalk_goal goal,
1865 struct xfs_perag *pag;
1868 xfs_agnumber_t agno = 0;
1870 while ((pag = xfs_icwalk_get_perag(mp, agno, goal))) {
1871 agno = pag->pag_agno + 1;
1872 error = xfs_icwalk_ag(pag, goal, args);
1876 if (error == -EFSCORRUPTED)
1881 BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_EOF_FLAGS_VALID);