1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
16 #include "xfs_inode_item.h"
17 #include "xfs_quota.h"
18 #include "xfs_trace.h"
19 #include "xfs_icache.h"
20 #include "xfs_bmap_util.h"
21 #include "xfs_dquot_item.h"
22 #include "xfs_dquot.h"
23 #include "xfs_reflink.h"
24 #include "xfs_ialloc.h"
26 #include "xfs_log_priv.h"
28 #include <linux/iversion.h>
30 /* Radix tree tags for incore inode tree. */
32 /* inode is to be reclaimed */
33 #define XFS_ICI_RECLAIM_TAG 0
34 /* Inode has speculative preallocations (posteof or cow) to clean. */
35 #define XFS_ICI_BLOCKGC_TAG 1
38 * The goal for walking incore inodes. These can correspond with incore inode
39 * radix tree tags when convenient. Avoid existing XFS_IWALK namespace.
41 enum xfs_icwalk_goal {
42 /* Goals directly associated with tagged inodes. */
43 XFS_ICWALK_BLOCKGC = XFS_ICI_BLOCKGC_TAG,
44 XFS_ICWALK_RECLAIM = XFS_ICI_RECLAIM_TAG,
47 static int xfs_icwalk(struct xfs_mount *mp,
48 enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
49 static int xfs_icwalk_ag(struct xfs_perag *pag,
50 enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
53 * Private inode cache walk flags for struct xfs_icwalk. Must not
54 * coincide with XFS_ICWALK_FLAGS_VALID.
57 /* Stop scanning after icw_scan_limit inodes. */
58 #define XFS_ICWALK_FLAG_SCAN_LIMIT (1U << 28)
60 #define XFS_ICWALK_FLAG_RECLAIM_SICK (1U << 27)
61 #define XFS_ICWALK_FLAG_UNION (1U << 26) /* union filter algorithm */
63 #define XFS_ICWALK_PRIVATE_FLAGS (XFS_ICWALK_FLAG_SCAN_LIMIT | \
64 XFS_ICWALK_FLAG_RECLAIM_SICK | \
65 XFS_ICWALK_FLAG_UNION)
68 * Allocate and initialise an xfs_inode.
78 * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL
79 * and return NULL here on ENOMEM.
81 ip = alloc_inode_sb(mp->m_super, xfs_inode_cache, GFP_KERNEL | __GFP_NOFAIL);
83 if (inode_init_always(mp->m_super, VFS_I(ip))) {
84 kmem_cache_free(xfs_inode_cache, ip);
88 /* VFS doesn't initialise i_mode or i_state! */
89 VFS_I(ip)->i_mode = 0;
90 VFS_I(ip)->i_state = 0;
91 mapping_set_large_folios(VFS_I(ip)->i_mapping);
93 XFS_STATS_INC(mp, vn_active);
94 ASSERT(atomic_read(&ip->i_pincount) == 0);
95 ASSERT(ip->i_ino == 0);
97 /* initialise the xfs inode */
100 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
102 memset(&ip->i_af, 0, sizeof(ip->i_af));
103 ip->i_af.if_format = XFS_DINODE_FMT_EXTENTS;
104 memset(&ip->i_df, 0, sizeof(ip->i_df));
106 ip->i_delayed_blks = 0;
107 ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
112 INIT_WORK(&ip->i_ioend_work, xfs_end_io);
113 INIT_LIST_HEAD(&ip->i_ioend_list);
114 spin_lock_init(&ip->i_ioend_lock);
115 ip->i_next_unlinked = NULLAGINO;
116 ip->i_prev_unlinked = NULLAGINO;
122 xfs_inode_free_callback(
123 struct rcu_head *head)
125 struct inode *inode = container_of(head, struct inode, i_rcu);
126 struct xfs_inode *ip = XFS_I(inode);
128 switch (VFS_I(ip)->i_mode & S_IFMT) {
132 xfs_idestroy_fork(&ip->i_df);
136 xfs_ifork_zap_attr(ip);
139 xfs_idestroy_fork(ip->i_cowfp);
140 kmem_cache_free(xfs_ifork_cache, ip->i_cowfp);
143 ASSERT(!test_bit(XFS_LI_IN_AIL,
144 &ip->i_itemp->ili_item.li_flags));
145 xfs_inode_item_destroy(ip);
149 kmem_cache_free(xfs_inode_cache, ip);
154 struct xfs_inode *ip)
156 /* asserts to verify all state is correct here */
157 ASSERT(atomic_read(&ip->i_pincount) == 0);
158 ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
159 XFS_STATS_DEC(ip->i_mount, vn_active);
161 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
166 struct xfs_inode *ip)
168 ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
171 * Because we use RCU freeing we need to ensure the inode always
172 * appears to be reclaimed with an invalid inode number when in the
173 * free state. The ip->i_flags_lock provides the barrier against lookup
176 spin_lock(&ip->i_flags_lock);
177 ip->i_flags = XFS_IRECLAIM;
179 spin_unlock(&ip->i_flags_lock);
181 __xfs_inode_free(ip);
185 * Queue background inode reclaim work if there are reclaimable inodes and there
186 * isn't reclaim work already scheduled or in progress.
189 xfs_reclaim_work_queue(
190 struct xfs_mount *mp)
194 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
195 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
196 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
202 * Background scanning to trim preallocated space. This is queued based on the
203 * 'speculative_prealloc_lifetime' tunable (5m by default).
207 struct xfs_perag *pag)
209 struct xfs_mount *mp = pag->pag_mount;
211 if (!xfs_is_blockgc_enabled(mp))
215 if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
216 queue_delayed_work(pag->pag_mount->m_blockgc_wq,
217 &pag->pag_blockgc_work,
218 msecs_to_jiffies(xfs_blockgc_secs * 1000));
222 /* Set a tag on both the AG incore inode tree and the AG radix tree. */
224 xfs_perag_set_inode_tag(
225 struct xfs_perag *pag,
229 struct xfs_mount *mp = pag->pag_mount;
232 lockdep_assert_held(&pag->pag_ici_lock);
234 was_tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
235 radix_tree_tag_set(&pag->pag_ici_root, agino, tag);
237 if (tag == XFS_ICI_RECLAIM_TAG)
238 pag->pag_ici_reclaimable++;
243 /* propagate the tag up into the perag radix tree */
244 spin_lock(&mp->m_perag_lock);
245 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, tag);
246 spin_unlock(&mp->m_perag_lock);
248 /* start background work */
250 case XFS_ICI_RECLAIM_TAG:
251 xfs_reclaim_work_queue(mp);
253 case XFS_ICI_BLOCKGC_TAG:
254 xfs_blockgc_queue(pag);
258 trace_xfs_perag_set_inode_tag(pag, _RET_IP_);
261 /* Clear a tag on both the AG incore inode tree and the AG radix tree. */
263 xfs_perag_clear_inode_tag(
264 struct xfs_perag *pag,
268 struct xfs_mount *mp = pag->pag_mount;
270 lockdep_assert_held(&pag->pag_ici_lock);
273 * Reclaim can signal (with a null agino) that it cleared its own tag
274 * by removing the inode from the radix tree.
276 if (agino != NULLAGINO)
277 radix_tree_tag_clear(&pag->pag_ici_root, agino, tag);
279 ASSERT(tag == XFS_ICI_RECLAIM_TAG);
281 if (tag == XFS_ICI_RECLAIM_TAG)
282 pag->pag_ici_reclaimable--;
284 if (radix_tree_tagged(&pag->pag_ici_root, tag))
287 /* clear the tag from the perag radix tree */
288 spin_lock(&mp->m_perag_lock);
289 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag);
290 spin_unlock(&mp->m_perag_lock);
292 trace_xfs_perag_clear_inode_tag(pag, _RET_IP_);
296 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
297 * part of the structure. This is made more complex by the fact we store
298 * information about the on-disk values in the VFS inode and so we can't just
299 * overwrite the values unconditionally. Hence we save the parameters we
300 * need to retain across reinitialisation, and rewrite them into the VFS inode
301 * after reinitialisation even if it fails.
305 struct xfs_mount *mp,
309 uint32_t nlink = inode->i_nlink;
310 uint32_t generation = inode->i_generation;
311 uint64_t version = inode_peek_iversion(inode);
312 umode_t mode = inode->i_mode;
313 dev_t dev = inode->i_rdev;
314 kuid_t uid = inode->i_uid;
315 kgid_t gid = inode->i_gid;
317 error = inode_init_always(mp->m_super, inode);
319 set_nlink(inode, nlink);
320 inode->i_generation = generation;
321 inode_set_iversion_queried(inode, version);
322 inode->i_mode = mode;
326 mapping_set_large_folios(inode->i_mapping);
331 * Carefully nudge an inode whose VFS state has been torn down back into a
332 * usable state. Drops the i_flags_lock and the rcu read lock.
336 struct xfs_perag *pag,
337 struct xfs_inode *ip) __releases(&ip->i_flags_lock)
339 struct xfs_mount *mp = ip->i_mount;
340 struct inode *inode = VFS_I(ip);
343 trace_xfs_iget_recycle(ip);
345 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
349 * We need to make it look like the inode is being reclaimed to prevent
350 * the actual reclaim workers from stomping over us while we recycle
351 * the inode. We can't clear the radix tree tag yet as it requires
352 * pag_ici_lock to be held exclusive.
354 ip->i_flags |= XFS_IRECLAIM;
356 spin_unlock(&ip->i_flags_lock);
359 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
360 error = xfs_reinit_inode(mp, inode);
361 xfs_iunlock(ip, XFS_ILOCK_EXCL);
364 * Re-initializing the inode failed, and we are in deep
365 * trouble. Try to re-add it to the reclaim list.
368 spin_lock(&ip->i_flags_lock);
369 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
370 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
371 spin_unlock(&ip->i_flags_lock);
374 trace_xfs_iget_recycle_fail(ip);
378 spin_lock(&pag->pag_ici_lock);
379 spin_lock(&ip->i_flags_lock);
382 * Clear the per-lifetime state in the inode as we are now effectively
383 * a new inode and need to return to the initial state before reuse
386 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
387 ip->i_flags |= XFS_INEW;
388 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
389 XFS_ICI_RECLAIM_TAG);
390 inode->i_state = I_NEW;
391 spin_unlock(&ip->i_flags_lock);
392 spin_unlock(&pag->pag_ici_lock);
398 * If we are allocating a new inode, then check what was returned is
399 * actually a free, empty inode. If we are not allocating an inode,
400 * then check we didn't find a free inode.
403 * 0 if the inode free state matches the lookup context
404 * -ENOENT if the inode is free and we are not allocating
405 * -EFSCORRUPTED if there is any state mismatch at all
408 xfs_iget_check_free_state(
409 struct xfs_inode *ip,
412 if (flags & XFS_IGET_CREATE) {
413 /* should be a free inode */
414 if (VFS_I(ip)->i_mode != 0) {
415 xfs_warn(ip->i_mount,
416 "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
417 ip->i_ino, VFS_I(ip)->i_mode);
418 return -EFSCORRUPTED;
421 if (ip->i_nblocks != 0) {
422 xfs_warn(ip->i_mount,
423 "Corruption detected! Free inode 0x%llx has blocks allocated!",
425 return -EFSCORRUPTED;
430 /* should be an allocated inode */
431 if (VFS_I(ip)->i_mode == 0)
437 /* Make all pending inactivation work start immediately. */
439 xfs_inodegc_queue_all(
440 struct xfs_mount *mp)
442 struct xfs_inodegc *gc;
446 for_each_online_cpu(cpu) {
447 gc = per_cpu_ptr(mp->m_inodegc, cpu);
448 if (!llist_empty(&gc->list)) {
449 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
457 /* Wait for all queued work and collect errors */
459 xfs_inodegc_wait_all(
460 struct xfs_mount *mp)
465 flush_workqueue(mp->m_inodegc_wq);
466 for_each_online_cpu(cpu) {
467 struct xfs_inodegc *gc;
469 gc = per_cpu_ptr(mp->m_inodegc, cpu);
470 if (gc->error && !error)
479 * Check the validity of the inode we just found it the cache
483 struct xfs_perag *pag,
484 struct xfs_inode *ip,
487 int lock_flags) __releases(RCU)
489 struct inode *inode = VFS_I(ip);
490 struct xfs_mount *mp = ip->i_mount;
494 * check for re-use of an inode within an RCU grace period due to the
495 * radix tree nodes not being updated yet. We monitor for this by
496 * setting the inode number to zero before freeing the inode structure.
497 * If the inode has been reallocated and set up, then the inode number
498 * will not match, so check for that, too.
500 spin_lock(&ip->i_flags_lock);
501 if (ip->i_ino != ino)
505 * If we are racing with another cache hit that is currently
506 * instantiating this inode or currently recycling it out of
507 * reclaimable state, wait for the initialisation to complete
510 * If we're racing with the inactivation worker we also want to wait.
511 * If we're creating a new file, it's possible that the worker
512 * previously marked the inode as free on disk but hasn't finished
513 * updating the incore state yet. The AGI buffer will be dirty and
514 * locked to the icreate transaction, so a synchronous push of the
515 * inodegc workers would result in deadlock. For a regular iget, the
516 * worker is running already, so we might as well wait.
518 * XXX(hch): eventually we should do something equivalent to
519 * wait_on_inode to wait for these flags to be cleared
520 * instead of polling for it.
522 if (ip->i_flags & (XFS_INEW | XFS_IRECLAIM | XFS_INACTIVATING))
525 if (ip->i_flags & XFS_NEED_INACTIVE) {
526 /* Unlinked inodes cannot be re-grabbed. */
527 if (VFS_I(ip)->i_nlink == 0) {
531 goto out_inodegc_flush;
535 * Check the inode free state is valid. This also detects lookup
536 * racing with unlinks.
538 error = xfs_iget_check_free_state(ip, flags);
542 /* Skip inodes that have no vfs state. */
543 if ((flags & XFS_IGET_INCORE) &&
544 (ip->i_flags & XFS_IRECLAIMABLE))
547 /* The inode fits the selection criteria; process it. */
548 if (ip->i_flags & XFS_IRECLAIMABLE) {
549 /* Drops i_flags_lock and RCU read lock. */
550 error = xfs_iget_recycle(pag, ip);
551 if (error == -EAGAIN)
556 /* If the VFS inode is being torn down, pause and try again. */
560 /* We've got a live one. */
561 spin_unlock(&ip->i_flags_lock);
563 trace_xfs_iget_hit(ip);
567 xfs_ilock(ip, lock_flags);
569 if (!(flags & XFS_IGET_INCORE))
570 xfs_iflags_clear(ip, XFS_ISTALE);
571 XFS_STATS_INC(mp, xs_ig_found);
576 trace_xfs_iget_skip(ip);
577 XFS_STATS_INC(mp, xs_ig_frecycle);
580 spin_unlock(&ip->i_flags_lock);
585 spin_unlock(&ip->i_flags_lock);
588 * Do not wait for the workers, because the caller could hold an AGI
589 * buffer lock. We're just going to sleep in a loop anyway.
591 if (xfs_is_inodegc_enabled(mp))
592 xfs_inodegc_queue_all(mp);
598 struct xfs_mount *mp,
599 struct xfs_perag *pag,
602 struct xfs_inode **ipp,
606 struct xfs_inode *ip;
608 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
611 ip = xfs_inode_alloc(mp, ino);
615 error = xfs_imap(pag, tp, ip->i_ino, &ip->i_imap, flags);
620 * For version 5 superblocks, if we are initialising a new inode and we
621 * are not utilising the XFS_FEAT_IKEEP inode cluster mode, we can
622 * simply build the new inode core with a random generation number.
624 * For version 4 (and older) superblocks, log recovery is dependent on
625 * the i_flushiter field being initialised from the current on-disk
626 * value and hence we must also read the inode off disk even when
627 * initializing new inodes.
629 if (xfs_has_v3inodes(mp) &&
630 (flags & XFS_IGET_CREATE) && !xfs_has_ikeep(mp)) {
631 VFS_I(ip)->i_generation = get_random_u32();
635 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp);
639 error = xfs_inode_from_disk(ip,
640 xfs_buf_offset(bp, ip->i_imap.im_boffset));
642 xfs_buf_set_ref(bp, XFS_INO_REF);
643 xfs_trans_brelse(tp, bp);
649 trace_xfs_iget_miss(ip);
652 * Check the inode free state is valid. This also detects lookup
653 * racing with unlinks.
655 error = xfs_iget_check_free_state(ip, flags);
660 * Preload the radix tree so we can insert safely under the
661 * write spinlock. Note that we cannot sleep inside the preload
662 * region. Since we can be called from transaction context, don't
663 * recurse into the file system.
665 if (radix_tree_preload(GFP_NOFS)) {
671 * Because the inode hasn't been added to the radix-tree yet it can't
672 * be found by another thread, so we can do the non-sleeping lock here.
675 if (!xfs_ilock_nowait(ip, lock_flags))
680 * These values must be set before inserting the inode into the radix
681 * tree as the moment it is inserted a concurrent lookup (allowed by the
682 * RCU locking mechanism) can find it and that lookup must see that this
683 * is an inode currently under construction (i.e. that XFS_INEW is set).
684 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
685 * memory barrier that ensures this detection works correctly at lookup
689 if (flags & XFS_IGET_DONTCACHE)
690 d_mark_dontcache(VFS_I(ip));
694 xfs_iflags_set(ip, iflags);
696 /* insert the new inode */
697 spin_lock(&pag->pag_ici_lock);
698 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
699 if (unlikely(error)) {
700 WARN_ON(error != -EEXIST);
701 XFS_STATS_INC(mp, xs_ig_dup);
703 goto out_preload_end;
705 spin_unlock(&pag->pag_ici_lock);
706 radix_tree_preload_end();
712 spin_unlock(&pag->pag_ici_lock);
713 radix_tree_preload_end();
715 xfs_iunlock(ip, lock_flags);
717 __destroy_inode(VFS_I(ip));
723 * Look up an inode by number in the given file system. The inode is looked up
724 * in the cache held in each AG. If the inode is found in the cache, initialise
725 * the vfs inode if necessary.
727 * If it is not in core, read it in from the file system's device, add it to the
728 * cache and initialise the vfs inode.
730 * The inode is locked according to the value of the lock_flags parameter.
731 * Inode lookup is only done during metadata operations and not as part of the
732 * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup.
736 struct xfs_mount *mp,
737 struct xfs_trans *tp,
741 struct xfs_inode **ipp)
743 struct xfs_inode *ip;
744 struct xfs_perag *pag;
748 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
750 /* reject inode numbers outside existing AGs */
751 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
754 XFS_STATS_INC(mp, xs_ig_attempts);
756 /* get the perag structure and ensure that it's inode capable */
757 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
758 agino = XFS_INO_TO_AGINO(mp, ino);
763 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
766 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
768 goto out_error_or_again;
771 if (flags & XFS_IGET_INCORE) {
773 goto out_error_or_again;
775 XFS_STATS_INC(mp, xs_ig_missed);
777 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
780 goto out_error_or_again;
787 * If we have a real type for an on-disk inode, we can setup the inode
788 * now. If it's a new inode being created, xfs_init_new_inode will
791 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
792 xfs_setup_existing_inode(ip);
796 if (!(flags & (XFS_IGET_INCORE | XFS_IGET_NORETRY)) &&
806 * "Is this a cached inode that's also allocated?"
808 * Look up an inode by number in the given file system. If the inode is
809 * in cache and isn't in purgatory, return 1 if the inode is allocated
810 * and 0 if it is not. For all other cases (not in cache, being torn
811 * down, etc.), return a negative error code.
813 * The caller has to prevent inode allocation and freeing activity,
814 * presumably by locking the AGI buffer. This is to ensure that an
815 * inode cannot transition from allocated to freed until the caller is
816 * ready to allow that. If the inode is in an intermediate state (new,
817 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
818 * inode is not in the cache, -ENOENT will be returned. The caller must
819 * deal with these scenarios appropriately.
821 * This is a specialized use case for the online scrubber; if you're
822 * reading this, you probably want xfs_iget.
825 xfs_icache_inode_is_allocated(
826 struct xfs_mount *mp,
827 struct xfs_trans *tp,
831 struct xfs_inode *ip;
834 error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
838 *inuse = !!(VFS_I(ip)->i_mode);
844 * Grab the inode for reclaim exclusively.
846 * We have found this inode via a lookup under RCU, so the inode may have
847 * already been freed, or it may be in the process of being recycled by
848 * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode
849 * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE
850 * will not be set. Hence we need to check for both these flag conditions to
851 * avoid inodes that are no longer reclaim candidates.
853 * Note: checking for other state flags here, under the i_flags_lock or not, is
854 * racy and should be avoided. Those races should be resolved only after we have
855 * ensured that we are able to reclaim this inode and the world can see that we
856 * are going to reclaim it.
858 * Return true if we grabbed it, false otherwise.
862 struct xfs_inode *ip,
863 struct xfs_icwalk *icw)
865 ASSERT(rcu_read_lock_held());
867 spin_lock(&ip->i_flags_lock);
868 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
869 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
870 /* not a reclaim candidate. */
871 spin_unlock(&ip->i_flags_lock);
875 /* Don't reclaim a sick inode unless the caller asked for it. */
877 (!icw || !(icw->icw_flags & XFS_ICWALK_FLAG_RECLAIM_SICK))) {
878 spin_unlock(&ip->i_flags_lock);
882 __xfs_iflags_set(ip, XFS_IRECLAIM);
883 spin_unlock(&ip->i_flags_lock);
888 * Inode reclaim is non-blocking, so the default action if progress cannot be
889 * made is to "requeue" the inode for reclaim by unlocking it and clearing the
890 * XFS_IRECLAIM flag. If we are in a shutdown state, we don't care about
891 * blocking anymore and hence we can wait for the inode to be able to reclaim
894 * We do no IO here - if callers require inodes to be cleaned they must push the
895 * AIL first to trigger writeback of dirty inodes. This enables writeback to be
896 * done in the background in a non-blocking manner, and enables memory reclaim
897 * to make progress without blocking.
901 struct xfs_inode *ip,
902 struct xfs_perag *pag)
904 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
906 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
908 if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
912 * Check for log shutdown because aborting the inode can move the log
913 * tail and corrupt in memory state. This is fine if the log is shut
914 * down, but if the log is still active and only the mount is shut down
915 * then the in-memory log tail movement caused by the abort can be
916 * incorrectly propagated to disk.
918 if (xlog_is_shutdown(ip->i_mount->m_log)) {
920 xfs_iflush_shutdown_abort(ip);
923 if (xfs_ipincount(ip))
924 goto out_clear_flush;
925 if (!xfs_inode_clean(ip))
926 goto out_clear_flush;
928 xfs_iflags_clear(ip, XFS_IFLUSHING);
930 trace_xfs_inode_reclaiming(ip);
933 * Because we use RCU freeing we need to ensure the inode always appears
934 * to be reclaimed with an invalid inode number when in the free state.
935 * We do this as early as possible under the ILOCK so that
936 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
937 * detect races with us here. By doing this, we guarantee that once
938 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
939 * it will see either a valid inode that will serialise correctly, or it
940 * will see an invalid inode that it can skip.
942 spin_lock(&ip->i_flags_lock);
943 ip->i_flags = XFS_IRECLAIM;
947 spin_unlock(&ip->i_flags_lock);
949 ASSERT(!ip->i_itemp || ip->i_itemp->ili_item.li_buf == NULL);
950 xfs_iunlock(ip, XFS_ILOCK_EXCL);
952 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
954 * Remove the inode from the per-AG radix tree.
956 * Because radix_tree_delete won't complain even if the item was never
957 * added to the tree assert that it's been there before to catch
958 * problems with the inode life time early on.
960 spin_lock(&pag->pag_ici_lock);
961 if (!radix_tree_delete(&pag->pag_ici_root,
962 XFS_INO_TO_AGINO(ip->i_mount, ino)))
964 xfs_perag_clear_inode_tag(pag, NULLAGINO, XFS_ICI_RECLAIM_TAG);
965 spin_unlock(&pag->pag_ici_lock);
968 * Here we do an (almost) spurious inode lock in order to coordinate
969 * with inode cache radix tree lookups. This is because the lookup
970 * can reference the inodes in the cache without taking references.
972 * We make that OK here by ensuring that we wait until the inode is
973 * unlocked after the lookup before we go ahead and free it.
975 xfs_ilock(ip, XFS_ILOCK_EXCL);
976 ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot);
977 xfs_iunlock(ip, XFS_ILOCK_EXCL);
978 ASSERT(xfs_inode_clean(ip));
980 __xfs_inode_free(ip);
984 xfs_iflags_clear(ip, XFS_IFLUSHING);
986 xfs_iunlock(ip, XFS_ILOCK_EXCL);
988 xfs_iflags_clear(ip, XFS_IRECLAIM);
991 /* Reclaim sick inodes if we're unmounting or the fs went down. */
993 xfs_want_reclaim_sick(
994 struct xfs_mount *mp)
996 return xfs_is_unmounting(mp) || xfs_has_norecovery(mp) ||
1002 struct xfs_mount *mp)
1004 struct xfs_icwalk icw = {
1008 if (xfs_want_reclaim_sick(mp))
1009 icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
1011 while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
1012 xfs_ail_push_all_sync(mp->m_ail);
1013 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
1018 * The shrinker infrastructure determines how many inodes we should scan for
1019 * reclaim. We want as many clean inodes ready to reclaim as possible, so we
1020 * push the AIL here. We also want to proactively free up memory if we can to
1021 * minimise the amount of work memory reclaim has to do so we kick the
1022 * background reclaim if it isn't already scheduled.
1025 xfs_reclaim_inodes_nr(
1026 struct xfs_mount *mp,
1027 unsigned long nr_to_scan)
1029 struct xfs_icwalk icw = {
1030 .icw_flags = XFS_ICWALK_FLAG_SCAN_LIMIT,
1031 .icw_scan_limit = min_t(unsigned long, LONG_MAX, nr_to_scan),
1034 if (xfs_want_reclaim_sick(mp))
1035 icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
1037 /* kick background reclaimer and push the AIL */
1038 xfs_reclaim_work_queue(mp);
1039 xfs_ail_push_all(mp->m_ail);
1041 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
1046 * Return the number of reclaimable inodes in the filesystem for
1047 * the shrinker to determine how much to reclaim.
1050 xfs_reclaim_inodes_count(
1051 struct xfs_mount *mp)
1053 struct xfs_perag *pag;
1054 xfs_agnumber_t ag = 0;
1055 long reclaimable = 0;
1057 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1058 ag = pag->pag_agno + 1;
1059 reclaimable += pag->pag_ici_reclaimable;
1066 xfs_icwalk_match_id(
1067 struct xfs_inode *ip,
1068 struct xfs_icwalk *icw)
1070 if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1071 !uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
1074 if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1075 !gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
1078 if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1079 ip->i_projid != icw->icw_prid)
1086 * A union-based inode filtering algorithm. Process the inode if any of the
1087 * criteria match. This is for global/internal scans only.
1090 xfs_icwalk_match_id_union(
1091 struct xfs_inode *ip,
1092 struct xfs_icwalk *icw)
1094 if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1095 uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
1098 if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1099 gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
1102 if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1103 ip->i_projid == icw->icw_prid)
1110 * Is this inode @ip eligible for eof/cow block reclamation, given some
1111 * filtering parameters @icw? The inode is eligible if @icw is null or
1112 * if the predicate functions match.
1116 struct xfs_inode *ip,
1117 struct xfs_icwalk *icw)
1124 if (icw->icw_flags & XFS_ICWALK_FLAG_UNION)
1125 match = xfs_icwalk_match_id_union(ip, icw);
1127 match = xfs_icwalk_match_id(ip, icw);
1131 /* skip the inode if the file size is too small */
1132 if ((icw->icw_flags & XFS_ICWALK_FLAG_MINFILESIZE) &&
1133 XFS_ISIZE(ip) < icw->icw_min_file_size)
1140 * This is a fast pass over the inode cache to try to get reclaim moving on as
1141 * many inodes as possible in a short period of time. It kicks itself every few
1142 * seconds, as well as being kicked by the inode cache shrinker when memory
1147 struct work_struct *work)
1149 struct xfs_mount *mp = container_of(to_delayed_work(work),
1150 struct xfs_mount, m_reclaim_work);
1152 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL);
1153 xfs_reclaim_work_queue(mp);
1157 xfs_inode_free_eofblocks(
1158 struct xfs_inode *ip,
1159 struct xfs_icwalk *icw,
1160 unsigned int *lockflags)
1164 wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
1166 if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
1170 * If the mapping is dirty the operation can block and wait for some
1171 * time. Unless we are waiting, skip it.
1173 if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1176 if (!xfs_icwalk_match(ip, icw))
1180 * If the caller is waiting, return -EAGAIN to keep the background
1181 * scanner moving and revisit the inode in a subsequent pass.
1183 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1188 *lockflags |= XFS_IOLOCK_EXCL;
1190 if (xfs_can_free_eofblocks(ip, false))
1191 return xfs_free_eofblocks(ip);
1193 /* inode could be preallocated or append-only */
1194 trace_xfs_inode_free_eofblocks_invalid(ip);
1195 xfs_inode_clear_eofblocks_tag(ip);
1200 xfs_blockgc_set_iflag(
1201 struct xfs_inode *ip,
1202 unsigned long iflag)
1204 struct xfs_mount *mp = ip->i_mount;
1205 struct xfs_perag *pag;
1207 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1210 * Don't bother locking the AG and looking up in the radix trees
1211 * if we already know that we have the tag set.
1213 if (ip->i_flags & iflag)
1215 spin_lock(&ip->i_flags_lock);
1216 ip->i_flags |= iflag;
1217 spin_unlock(&ip->i_flags_lock);
1219 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1220 spin_lock(&pag->pag_ici_lock);
1222 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1223 XFS_ICI_BLOCKGC_TAG);
1225 spin_unlock(&pag->pag_ici_lock);
1230 xfs_inode_set_eofblocks_tag(
1233 trace_xfs_inode_set_eofblocks_tag(ip);
1234 return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS);
1238 xfs_blockgc_clear_iflag(
1239 struct xfs_inode *ip,
1240 unsigned long iflag)
1242 struct xfs_mount *mp = ip->i_mount;
1243 struct xfs_perag *pag;
1246 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1248 spin_lock(&ip->i_flags_lock);
1249 ip->i_flags &= ~iflag;
1250 clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0;
1251 spin_unlock(&ip->i_flags_lock);
1256 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1257 spin_lock(&pag->pag_ici_lock);
1259 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1260 XFS_ICI_BLOCKGC_TAG);
1262 spin_unlock(&pag->pag_ici_lock);
1267 xfs_inode_clear_eofblocks_tag(
1270 trace_xfs_inode_clear_eofblocks_tag(ip);
1271 return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS);
1275 * Set ourselves up to free CoW blocks from this file. If it's already clean
1276 * then we can bail out quickly, but otherwise we must back off if the file
1277 * is undergoing some kind of write.
1280 xfs_prep_free_cowblocks(
1281 struct xfs_inode *ip)
1284 * Just clear the tag if we have an empty cow fork or none at all. It's
1285 * possible the inode was fully unshared since it was originally tagged.
1287 if (!xfs_inode_has_cow_data(ip)) {
1288 trace_xfs_inode_free_cowblocks_invalid(ip);
1289 xfs_inode_clear_cowblocks_tag(ip);
1294 * If the mapping is dirty or under writeback we cannot touch the
1295 * CoW fork. Leave it alone if we're in the midst of a directio.
1297 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1298 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1299 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1300 atomic_read(&VFS_I(ip)->i_dio_count))
1307 * Automatic CoW Reservation Freeing
1309 * These functions automatically garbage collect leftover CoW reservations
1310 * that were made on behalf of a cowextsize hint when we start to run out
1311 * of quota or when the reservations sit around for too long. If the file
1312 * has dirty pages or is undergoing writeback, its CoW reservations will
1315 * The actual garbage collection piggybacks off the same code that runs
1316 * the speculative EOF preallocation garbage collector.
1319 xfs_inode_free_cowblocks(
1320 struct xfs_inode *ip,
1321 struct xfs_icwalk *icw,
1322 unsigned int *lockflags)
1327 wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
1329 if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
1332 if (!xfs_prep_free_cowblocks(ip))
1335 if (!xfs_icwalk_match(ip, icw))
1339 * If the caller is waiting, return -EAGAIN to keep the background
1340 * scanner moving and revisit the inode in a subsequent pass.
1342 if (!(*lockflags & XFS_IOLOCK_EXCL) &&
1343 !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1348 *lockflags |= XFS_IOLOCK_EXCL;
1350 if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) {
1355 *lockflags |= XFS_MMAPLOCK_EXCL;
1358 * Check again, nobody else should be able to dirty blocks or change
1359 * the reflink iflag now that we have the first two locks held.
1361 if (xfs_prep_free_cowblocks(ip))
1362 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1367 xfs_inode_set_cowblocks_tag(
1370 trace_xfs_inode_set_cowblocks_tag(ip);
1371 return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS);
1375 xfs_inode_clear_cowblocks_tag(
1378 trace_xfs_inode_clear_cowblocks_tag(ip);
1379 return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
1382 /* Disable post-EOF and CoW block auto-reclamation. */
1385 struct xfs_mount *mp)
1387 struct xfs_perag *pag;
1388 xfs_agnumber_t agno;
1390 if (!xfs_clear_blockgc_enabled(mp))
1393 for_each_perag(mp, agno, pag)
1394 cancel_delayed_work_sync(&pag->pag_blockgc_work);
1395 trace_xfs_blockgc_stop(mp, __return_address);
1398 /* Enable post-EOF and CoW block auto-reclamation. */
1401 struct xfs_mount *mp)
1403 struct xfs_perag *pag;
1404 xfs_agnumber_t agno;
1406 if (xfs_set_blockgc_enabled(mp))
1409 trace_xfs_blockgc_start(mp, __return_address);
1410 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1411 xfs_blockgc_queue(pag);
1414 /* Don't try to run block gc on an inode that's in any of these states. */
1415 #define XFS_BLOCKGC_NOGRAB_IFLAGS (XFS_INEW | \
1416 XFS_NEED_INACTIVE | \
1417 XFS_INACTIVATING | \
1418 XFS_IRECLAIMABLE | \
1421 * Decide if the given @ip is eligible for garbage collection of speculative
1422 * preallocations, and grab it if so. Returns true if it's ready to go or
1423 * false if we should just ignore it.
1427 struct xfs_inode *ip)
1429 struct inode *inode = VFS_I(ip);
1431 ASSERT(rcu_read_lock_held());
1433 /* Check for stale RCU freed inode */
1434 spin_lock(&ip->i_flags_lock);
1436 goto out_unlock_noent;
1438 if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS)
1439 goto out_unlock_noent;
1440 spin_unlock(&ip->i_flags_lock);
1442 /* nothing to sync during shutdown */
1443 if (xfs_is_shutdown(ip->i_mount))
1446 /* If we can't grab the inode, it must on it's way to reclaim. */
1450 /* inode is valid */
1454 spin_unlock(&ip->i_flags_lock);
1458 /* Scan one incore inode for block preallocations that we can remove. */
1460 xfs_blockgc_scan_inode(
1461 struct xfs_inode *ip,
1462 struct xfs_icwalk *icw)
1464 unsigned int lockflags = 0;
1467 error = xfs_inode_free_eofblocks(ip, icw, &lockflags);
1471 error = xfs_inode_free_cowblocks(ip, icw, &lockflags);
1474 xfs_iunlock(ip, lockflags);
1479 /* Background worker that trims preallocated space. */
1482 struct work_struct *work)
1484 struct xfs_perag *pag = container_of(to_delayed_work(work),
1485 struct xfs_perag, pag_blockgc_work);
1486 struct xfs_mount *mp = pag->pag_mount;
1489 trace_xfs_blockgc_worker(mp, __return_address);
1491 error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL);
1493 xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
1494 pag->pag_agno, error);
1495 xfs_blockgc_queue(pag);
1499 * Try to free space in the filesystem by purging inactive inodes, eofblocks
1503 xfs_blockgc_free_space(
1504 struct xfs_mount *mp,
1505 struct xfs_icwalk *icw)
1509 trace_xfs_blockgc_free_space(mp, icw, _RET_IP_);
1511 error = xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, icw);
1515 return xfs_inodegc_flush(mp);
1519 * Reclaim all the free space that we can by scheduling the background blockgc
1520 * and inodegc workers immediately and waiting for them all to clear.
1523 xfs_blockgc_flush_all(
1524 struct xfs_mount *mp)
1526 struct xfs_perag *pag;
1527 xfs_agnumber_t agno;
1529 trace_xfs_blockgc_flush_all(mp, __return_address);
1532 * For each blockgc worker, move its queue time up to now. If it
1533 * wasn't queued, it will not be requeued. Then flush whatever's
1536 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1537 mod_delayed_work(pag->pag_mount->m_blockgc_wq,
1538 &pag->pag_blockgc_work, 0);
1540 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1541 flush_delayed_work(&pag->pag_blockgc_work);
1543 return xfs_inodegc_flush(mp);
1547 * Run cow/eofblocks scans on the supplied dquots. We don't know exactly which
1548 * quota caused an allocation failure, so we make a best effort by including
1549 * each quota under low free space conditions (less than 1% free space) in the
1552 * Callers must not hold any inode's ILOCK. If requesting a synchronous scan
1553 * (XFS_ICWALK_FLAG_SYNC), the caller also must not hold any inode's IOLOCK or
1557 xfs_blockgc_free_dquots(
1558 struct xfs_mount *mp,
1559 struct xfs_dquot *udqp,
1560 struct xfs_dquot *gdqp,
1561 struct xfs_dquot *pdqp,
1562 unsigned int iwalk_flags)
1564 struct xfs_icwalk icw = {0};
1565 bool do_work = false;
1567 if (!udqp && !gdqp && !pdqp)
1571 * Run a scan to free blocks using the union filter to cover all
1572 * applicable quotas in a single scan.
1574 icw.icw_flags = XFS_ICWALK_FLAG_UNION | iwalk_flags;
1576 if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) {
1577 icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
1578 icw.icw_flags |= XFS_ICWALK_FLAG_UID;
1582 if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) {
1583 icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
1584 icw.icw_flags |= XFS_ICWALK_FLAG_GID;
1588 if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) {
1589 icw.icw_prid = pdqp->q_id;
1590 icw.icw_flags |= XFS_ICWALK_FLAG_PRID;
1597 return xfs_blockgc_free_space(mp, &icw);
1600 /* Run cow/eofblocks scans on the quotas attached to the inode. */
1602 xfs_blockgc_free_quota(
1603 struct xfs_inode *ip,
1604 unsigned int iwalk_flags)
1606 return xfs_blockgc_free_dquots(ip->i_mount,
1607 xfs_inode_dquot(ip, XFS_DQTYPE_USER),
1608 xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
1609 xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), iwalk_flags);
1612 /* XFS Inode Cache Walking Code */
1615 * The inode lookup is done in batches to keep the amount of lock traffic and
1616 * radix tree lookups to a minimum. The batch size is a trade off between
1617 * lookup reduction and stack usage. This is in the reclaim path, so we can't
1620 #define XFS_LOOKUP_BATCH 32
1624 * Decide if we want to grab this inode in anticipation of doing work towards
1629 enum xfs_icwalk_goal goal,
1630 struct xfs_inode *ip,
1631 struct xfs_icwalk *icw)
1634 case XFS_ICWALK_BLOCKGC:
1635 return xfs_blockgc_igrab(ip);
1636 case XFS_ICWALK_RECLAIM:
1637 return xfs_reclaim_igrab(ip, icw);
1644 * Process an inode. Each processing function must handle any state changes
1645 * made by the icwalk igrab function. Return -EAGAIN to skip an inode.
1648 xfs_icwalk_process_inode(
1649 enum xfs_icwalk_goal goal,
1650 struct xfs_inode *ip,
1651 struct xfs_perag *pag,
1652 struct xfs_icwalk *icw)
1657 case XFS_ICWALK_BLOCKGC:
1658 error = xfs_blockgc_scan_inode(ip, icw);
1660 case XFS_ICWALK_RECLAIM:
1661 xfs_reclaim_inode(ip, pag);
1668 * For a given per-AG structure @pag and a goal, grab qualifying inodes and
1669 * process them in some manner.
1673 struct xfs_perag *pag,
1674 enum xfs_icwalk_goal goal,
1675 struct xfs_icwalk *icw)
1677 struct xfs_mount *mp = pag->pag_mount;
1678 uint32_t first_index;
1687 if (goal == XFS_ICWALK_RECLAIM)
1688 first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
1693 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1699 nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
1700 (void **) batch, first_index,
1701 XFS_LOOKUP_BATCH, goal);
1709 * Grab the inodes before we drop the lock. if we found
1710 * nothing, nr == 0 and the loop will be skipped.
1712 for (i = 0; i < nr_found; i++) {
1713 struct xfs_inode *ip = batch[i];
1715 if (done || !xfs_icwalk_igrab(goal, ip, icw))
1719 * Update the index for the next lookup. Catch
1720 * overflows into the next AG range which can occur if
1721 * we have inodes in the last block of the AG and we
1722 * are currently pointing to the last inode.
1724 * Because we may see inodes that are from the wrong AG
1725 * due to RCU freeing and reallocation, only update the
1726 * index if it lies in this AG. It was a race that lead
1727 * us to see this inode, so another lookup from the
1728 * same index will not find it again.
1730 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
1732 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1733 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1737 /* unlock now we've grabbed the inodes. */
1740 for (i = 0; i < nr_found; i++) {
1743 error = xfs_icwalk_process_inode(goal, batch[i], pag,
1745 if (error == -EAGAIN) {
1749 if (error && last_error != -EFSCORRUPTED)
1753 /* bail out if the filesystem is corrupted. */
1754 if (error == -EFSCORRUPTED)
1759 if (icw && (icw->icw_flags & XFS_ICWALK_FLAG_SCAN_LIMIT)) {
1760 icw->icw_scan_limit -= XFS_LOOKUP_BATCH;
1761 if (icw->icw_scan_limit <= 0)
1764 } while (nr_found && !done);
1766 if (goal == XFS_ICWALK_RECLAIM) {
1769 WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
1779 /* Walk all incore inodes to achieve a given goal. */
1782 struct xfs_mount *mp,
1783 enum xfs_icwalk_goal goal,
1784 struct xfs_icwalk *icw)
1786 struct xfs_perag *pag;
1789 xfs_agnumber_t agno;
1791 for_each_perag_tag(mp, agno, pag, goal) {
1792 error = xfs_icwalk_ag(pag, goal, icw);
1795 if (error == -EFSCORRUPTED) {
1796 xfs_perag_rele(pag);
1802 BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_ICWALK_FLAGS_VALID);
1808 struct xfs_inode *ip,
1811 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
1812 struct xfs_bmbt_irec got;
1813 struct xfs_iext_cursor icur;
1815 if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
1818 if (isnullstartblock(got.br_startblock)) {
1819 xfs_warn(ip->i_mount,
1820 "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
1822 whichfork == XFS_DATA_FORK ? "data" : "cow",
1823 got.br_startoff, got.br_blockcount);
1825 } while (xfs_iext_next_extent(ifp, &icur, &got));
1828 #define xfs_check_delalloc(ip, whichfork) do { } while (0)
1831 /* Schedule the inode for reclaim. */
1833 xfs_inodegc_set_reclaimable(
1834 struct xfs_inode *ip)
1836 struct xfs_mount *mp = ip->i_mount;
1837 struct xfs_perag *pag;
1839 if (!xfs_is_shutdown(mp) && ip->i_delayed_blks) {
1840 xfs_check_delalloc(ip, XFS_DATA_FORK);
1841 xfs_check_delalloc(ip, XFS_COW_FORK);
1845 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1846 spin_lock(&pag->pag_ici_lock);
1847 spin_lock(&ip->i_flags_lock);
1849 trace_xfs_inode_set_reclaimable(ip);
1850 ip->i_flags &= ~(XFS_NEED_INACTIVE | XFS_INACTIVATING);
1851 ip->i_flags |= XFS_IRECLAIMABLE;
1852 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1853 XFS_ICI_RECLAIM_TAG);
1855 spin_unlock(&ip->i_flags_lock);
1856 spin_unlock(&pag->pag_ici_lock);
1861 * Free all speculative preallocations and possibly even the inode itself.
1862 * This is the last chance to make changes to an otherwise unreferenced file
1863 * before incore reclamation happens.
1866 xfs_inodegc_inactivate(
1867 struct xfs_inode *ip)
1871 trace_xfs_inode_inactivating(ip);
1872 error = xfs_inactive(ip);
1873 xfs_inodegc_set_reclaimable(ip);
1880 struct work_struct *work)
1882 struct xfs_inodegc *gc = container_of(to_delayed_work(work),
1883 struct xfs_inodegc, work);
1884 struct llist_node *node = llist_del_all(&gc->list);
1885 struct xfs_inode *ip, *n;
1886 unsigned int nofs_flag;
1888 ASSERT(gc->cpu == smp_processor_id());
1890 WRITE_ONCE(gc->items, 0);
1896 * We can allocate memory here while doing writeback on behalf of
1897 * memory reclaim. To avoid memory allocation deadlocks set the
1898 * task-wide nofs context for the following operations.
1900 nofs_flag = memalloc_nofs_save();
1902 ip = llist_entry(node, struct xfs_inode, i_gclist);
1903 trace_xfs_inodegc_worker(ip->i_mount, READ_ONCE(gc->shrinker_hits));
1905 WRITE_ONCE(gc->shrinker_hits, 0);
1906 llist_for_each_entry_safe(ip, n, node, i_gclist) {
1909 xfs_iflags_set(ip, XFS_INACTIVATING);
1910 error = xfs_inodegc_inactivate(ip);
1911 if (error && !gc->error)
1915 memalloc_nofs_restore(nofs_flag);
1919 * Expedite all pending inodegc work to run immediately. This does not wait for
1920 * completion of the work.
1924 struct xfs_mount *mp)
1926 if (!xfs_is_inodegc_enabled(mp))
1928 trace_xfs_inodegc_push(mp, __return_address);
1929 xfs_inodegc_queue_all(mp);
1933 * Force all currently queued inode inactivation work to run immediately and
1934 * wait for the work to finish.
1938 struct xfs_mount *mp)
1940 xfs_inodegc_push(mp);
1941 trace_xfs_inodegc_flush(mp, __return_address);
1942 return xfs_inodegc_wait_all(mp);
1946 * Flush all the pending work and then disable the inode inactivation background
1947 * workers and wait for them to stop. Caller must hold sb->s_umount to
1948 * coordinate changes in the inodegc_enabled state.
1952 struct xfs_mount *mp)
1956 if (!xfs_clear_inodegc_enabled(mp))
1960 * Drain all pending inodegc work, including inodes that could be
1961 * queued by racing xfs_inodegc_queue or xfs_inodegc_shrinker_scan
1962 * threads that sample the inodegc state just prior to us clearing it.
1963 * The inodegc flag state prevents new threads from queuing more
1964 * inodes, so we queue pending work items and flush the workqueue until
1965 * all inodegc lists are empty. IOWs, we cannot use drain_workqueue
1966 * here because it does not allow other unserialized mechanisms to
1967 * reschedule inodegc work while this draining is in progress.
1969 xfs_inodegc_queue_all(mp);
1971 flush_workqueue(mp->m_inodegc_wq);
1972 rerun = xfs_inodegc_queue_all(mp);
1975 trace_xfs_inodegc_stop(mp, __return_address);
1979 * Enable the inode inactivation background workers and schedule deferred inode
1980 * inactivation work if there is any. Caller must hold sb->s_umount to
1981 * coordinate changes in the inodegc_enabled state.
1985 struct xfs_mount *mp)
1987 if (xfs_set_inodegc_enabled(mp))
1990 trace_xfs_inodegc_start(mp, __return_address);
1991 xfs_inodegc_queue_all(mp);
1994 #ifdef CONFIG_XFS_RT
1996 xfs_inodegc_want_queue_rt_file(
1997 struct xfs_inode *ip)
1999 struct xfs_mount *mp = ip->i_mount;
2001 if (!XFS_IS_REALTIME_INODE(ip))
2004 if (__percpu_counter_compare(&mp->m_frextents,
2005 mp->m_low_rtexts[XFS_LOWSP_5_PCNT],
2006 XFS_FDBLOCKS_BATCH) < 0)
2012 # define xfs_inodegc_want_queue_rt_file(ip) (false)
2013 #endif /* CONFIG_XFS_RT */
2016 * Schedule the inactivation worker when:
2018 * - We've accumulated more than one inode cluster buffer's worth of inodes.
2019 * - There is less than 5% free space left.
2020 * - Any of the quotas for this inode are near an enforcement limit.
2023 xfs_inodegc_want_queue_work(
2024 struct xfs_inode *ip,
2027 struct xfs_mount *mp = ip->i_mount;
2029 if (items > mp->m_ino_geo.inodes_per_cluster)
2032 if (__percpu_counter_compare(&mp->m_fdblocks,
2033 mp->m_low_space[XFS_LOWSP_5_PCNT],
2034 XFS_FDBLOCKS_BATCH) < 0)
2037 if (xfs_inodegc_want_queue_rt_file(ip))
2040 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_USER))
2043 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_GROUP))
2046 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_PROJ))
2053 * Upper bound on the number of inodes in each AG that can be queued for
2054 * inactivation at any given time, to avoid monopolizing the workqueue.
2056 #define XFS_INODEGC_MAX_BACKLOG (4 * XFS_INODES_PER_CHUNK)
2059 * Make the frontend wait for inactivations when:
2061 * - Memory shrinkers queued the inactivation worker and it hasn't finished.
2062 * - The queue depth exceeds the maximum allowable percpu backlog.
2064 * Note: If the current thread is running a transaction, we don't ever want to
2065 * wait for other transactions because that could introduce a deadlock.
2068 xfs_inodegc_want_flush_work(
2069 struct xfs_inode *ip,
2071 unsigned int shrinker_hits)
2073 if (current->journal_info)
2076 if (shrinker_hits > 0)
2079 if (items > XFS_INODEGC_MAX_BACKLOG)
2086 * Queue a background inactivation worker if there are inodes that need to be
2087 * inactivated and higher level xfs code hasn't disabled the background
2092 struct xfs_inode *ip)
2094 struct xfs_mount *mp = ip->i_mount;
2095 struct xfs_inodegc *gc;
2097 unsigned int shrinker_hits;
2098 unsigned long queue_delay = 1;
2100 trace_xfs_inode_set_need_inactive(ip);
2101 spin_lock(&ip->i_flags_lock);
2102 ip->i_flags |= XFS_NEED_INACTIVE;
2103 spin_unlock(&ip->i_flags_lock);
2105 gc = get_cpu_ptr(mp->m_inodegc);
2106 llist_add(&ip->i_gclist, &gc->list);
2107 items = READ_ONCE(gc->items);
2108 WRITE_ONCE(gc->items, items + 1);
2109 shrinker_hits = READ_ONCE(gc->shrinker_hits);
2112 * We queue the work while holding the current CPU so that the work
2113 * is scheduled to run on this CPU.
2115 if (!xfs_is_inodegc_enabled(mp)) {
2120 if (xfs_inodegc_want_queue_work(ip, items))
2123 trace_xfs_inodegc_queue(mp, __return_address);
2124 mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work,
2128 if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
2129 trace_xfs_inodegc_throttle(mp, __return_address);
2130 flush_delayed_work(&gc->work);
2135 * Fold the dead CPU inodegc queue into the current CPUs queue.
2138 xfs_inodegc_cpu_dead(
2139 struct xfs_mount *mp,
2140 unsigned int dead_cpu)
2142 struct xfs_inodegc *dead_gc, *gc;
2143 struct llist_node *first, *last;
2144 unsigned int count = 0;
2146 dead_gc = per_cpu_ptr(mp->m_inodegc, dead_cpu);
2147 cancel_delayed_work_sync(&dead_gc->work);
2149 if (llist_empty(&dead_gc->list))
2152 first = dead_gc->list.first;
2154 while (last->next) {
2158 dead_gc->list.first = NULL;
2161 /* Add pending work to current CPU */
2162 gc = get_cpu_ptr(mp->m_inodegc);
2163 llist_add_batch(first, last, &gc->list);
2164 count += READ_ONCE(gc->items);
2165 WRITE_ONCE(gc->items, count);
2167 if (xfs_is_inodegc_enabled(mp)) {
2168 trace_xfs_inodegc_queue(mp, __return_address);
2169 mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work,
2176 * We set the inode flag atomically with the radix tree tag. Once we get tag
2177 * lookups on the radix tree, this inode flag can go away.
2179 * We always use background reclaim here because even if the inode is clean, it
2180 * still may be under IO and hence we have wait for IO completion to occur
2181 * before we can reclaim the inode. The background reclaim path handles this
2182 * more efficiently than we can here, so simply let background reclaim tear down
2186 xfs_inode_mark_reclaimable(
2187 struct xfs_inode *ip)
2189 struct xfs_mount *mp = ip->i_mount;
2192 XFS_STATS_INC(mp, vn_reclaim);
2195 * We should never get here with any of the reclaim flags already set.
2197 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_ALL_IRECLAIM_FLAGS));
2199 need_inactive = xfs_inode_needs_inactive(ip);
2200 if (need_inactive) {
2201 xfs_inodegc_queue(ip);
2205 /* Going straight to reclaim, so drop the dquots. */
2206 xfs_qm_dqdetach(ip);
2207 xfs_inodegc_set_reclaimable(ip);
2211 * Register a phony shrinker so that we can run background inodegc sooner when
2212 * there's memory pressure. Inactivation does not itself free any memory but
2213 * it does make inodes reclaimable, which eventually frees memory.
2215 * The count function, seek value, and batch value are crafted to trigger the
2216 * scan function during the second round of scanning. Hopefully this means
2217 * that we reclaimed enough memory that initiating metadata transactions won't
2218 * make things worse.
2220 #define XFS_INODEGC_SHRINKER_COUNT (1UL << DEF_PRIORITY)
2221 #define XFS_INODEGC_SHRINKER_BATCH ((XFS_INODEGC_SHRINKER_COUNT / 2) + 1)
2223 static unsigned long
2224 xfs_inodegc_shrinker_count(
2225 struct shrinker *shrink,
2226 struct shrink_control *sc)
2228 struct xfs_mount *mp = container_of(shrink, struct xfs_mount,
2229 m_inodegc_shrinker);
2230 struct xfs_inodegc *gc;
2233 if (!xfs_is_inodegc_enabled(mp))
2236 for_each_online_cpu(cpu) {
2237 gc = per_cpu_ptr(mp->m_inodegc, cpu);
2238 if (!llist_empty(&gc->list))
2239 return XFS_INODEGC_SHRINKER_COUNT;
2245 static unsigned long
2246 xfs_inodegc_shrinker_scan(
2247 struct shrinker *shrink,
2248 struct shrink_control *sc)
2250 struct xfs_mount *mp = container_of(shrink, struct xfs_mount,
2251 m_inodegc_shrinker);
2252 struct xfs_inodegc *gc;
2254 bool no_items = true;
2256 if (!xfs_is_inodegc_enabled(mp))
2259 trace_xfs_inodegc_shrinker_scan(mp, sc, __return_address);
2261 for_each_online_cpu(cpu) {
2262 gc = per_cpu_ptr(mp->m_inodegc, cpu);
2263 if (!llist_empty(&gc->list)) {
2264 unsigned int h = READ_ONCE(gc->shrinker_hits);
2266 WRITE_ONCE(gc->shrinker_hits, h + 1);
2267 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
2273 * If there are no inodes to inactivate, we don't want the shrinker
2274 * to think there's deferred work to call us back about.
2282 /* Register a shrinker so we can accelerate inodegc and throttle queuing. */
2284 xfs_inodegc_register_shrinker(
2285 struct xfs_mount *mp)
2287 struct shrinker *shrink = &mp->m_inodegc_shrinker;
2289 shrink->count_objects = xfs_inodegc_shrinker_count;
2290 shrink->scan_objects = xfs_inodegc_shrinker_scan;
2292 shrink->flags = SHRINKER_NONSLAB;
2293 shrink->batch = XFS_INODEGC_SHRINKER_BATCH;
2295 return register_shrinker(shrink, "xfs-inodegc:%s", mp->m_super->s_id);