1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_trans.h"
16 #include "xfs_trans_priv.h"
17 #include "xfs_inode_item.h"
18 #include "xfs_quota.h"
19 #include "xfs_trace.h"
20 #include "xfs_icache.h"
21 #include "xfs_bmap_util.h"
22 #include "xfs_dquot_item.h"
23 #include "xfs_dquot.h"
24 #include "xfs_reflink.h"
26 #include <linux/iversion.h>
29 * Allocate and initialise an xfs_inode.
39 * if this didn't occur in transactions, we could use
40 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
41 * code up to do this anyway.
43 ip = kmem_zone_alloc(xfs_inode_zone, 0);
46 if (inode_init_always(mp->m_super, VFS_I(ip))) {
47 kmem_cache_free(xfs_inode_zone, ip);
51 /* VFS doesn't initialise i_mode! */
52 VFS_I(ip)->i_mode = 0;
54 XFS_STATS_INC(mp, vn_active);
55 ASSERT(atomic_read(&ip->i_pincount) == 0);
56 ASSERT(!xfs_isiflocked(ip));
57 ASSERT(ip->i_ino == 0);
59 /* initialise the xfs inode */
62 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
66 ip->i_cformat = XFS_DINODE_FMT_EXTENTS;
67 memset(&ip->i_df, 0, sizeof(ip->i_df));
69 ip->i_delayed_blks = 0;
70 memset(&ip->i_d, 0, sizeof(ip->i_d));
73 INIT_WORK(&ip->i_ioend_work, xfs_end_io);
74 INIT_LIST_HEAD(&ip->i_ioend_list);
75 spin_lock_init(&ip->i_ioend_lock);
81 xfs_inode_free_callback(
82 struct rcu_head *head)
84 struct inode *inode = container_of(head, struct inode, i_rcu);
85 struct xfs_inode *ip = XFS_I(inode);
87 switch (VFS_I(ip)->i_mode & S_IFMT) {
91 xfs_idestroy_fork(ip, XFS_DATA_FORK);
96 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
98 xfs_idestroy_fork(ip, XFS_COW_FORK);
101 ASSERT(!test_bit(XFS_LI_IN_AIL,
102 &ip->i_itemp->ili_item.li_flags));
103 xfs_inode_item_destroy(ip);
107 kmem_cache_free(xfs_inode_zone, ip);
112 struct xfs_inode *ip)
114 /* asserts to verify all state is correct here */
115 ASSERT(atomic_read(&ip->i_pincount) == 0);
116 XFS_STATS_DEC(ip->i_mount, vn_active);
118 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
123 struct xfs_inode *ip)
125 ASSERT(!xfs_isiflocked(ip));
128 * Because we use RCU freeing we need to ensure the inode always
129 * appears to be reclaimed with an invalid inode number when in the
130 * free state. The ip->i_flags_lock provides the barrier against lookup
133 spin_lock(&ip->i_flags_lock);
134 ip->i_flags = XFS_IRECLAIM;
136 spin_unlock(&ip->i_flags_lock);
138 __xfs_inode_free(ip);
142 * Queue a new inode reclaim pass if there are reclaimable inodes and there
143 * isn't a reclaim pass already in progress. By default it runs every 5s based
144 * on the xfs periodic sync default of 30s. Perhaps this should have it's own
145 * tunable, but that can be done if this method proves to be ineffective or too
149 xfs_reclaim_work_queue(
150 struct xfs_mount *mp)
154 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
155 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
156 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
162 * This is a fast pass over the inode cache to try to get reclaim moving on as
163 * many inodes as possible in a short period of time. It kicks itself every few
164 * seconds, as well as being kicked by the inode cache shrinker when memory
165 * goes low. It scans as quickly as possible avoiding locked inodes or those
166 * already being flushed, and once done schedules a future pass.
170 struct work_struct *work)
172 struct xfs_mount *mp = container_of(to_delayed_work(work),
173 struct xfs_mount, m_reclaim_work);
175 xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
176 xfs_reclaim_work_queue(mp);
180 xfs_perag_set_reclaim_tag(
181 struct xfs_perag *pag)
183 struct xfs_mount *mp = pag->pag_mount;
185 lockdep_assert_held(&pag->pag_ici_lock);
186 if (pag->pag_ici_reclaimable++)
189 /* propagate the reclaim tag up into the perag radix tree */
190 spin_lock(&mp->m_perag_lock);
191 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
192 XFS_ICI_RECLAIM_TAG);
193 spin_unlock(&mp->m_perag_lock);
195 /* schedule periodic background inode reclaim */
196 xfs_reclaim_work_queue(mp);
198 trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
202 xfs_perag_clear_reclaim_tag(
203 struct xfs_perag *pag)
205 struct xfs_mount *mp = pag->pag_mount;
207 lockdep_assert_held(&pag->pag_ici_lock);
208 if (--pag->pag_ici_reclaimable)
211 /* clear the reclaim tag from the perag radix tree */
212 spin_lock(&mp->m_perag_lock);
213 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
214 XFS_ICI_RECLAIM_TAG);
215 spin_unlock(&mp->m_perag_lock);
216 trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
221 * We set the inode flag atomically with the radix tree tag.
222 * Once we get tag lookups on the radix tree, this inode flag
226 xfs_inode_set_reclaim_tag(
227 struct xfs_inode *ip)
229 struct xfs_mount *mp = ip->i_mount;
230 struct xfs_perag *pag;
232 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
233 spin_lock(&pag->pag_ici_lock);
234 spin_lock(&ip->i_flags_lock);
236 radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
237 XFS_ICI_RECLAIM_TAG);
238 xfs_perag_set_reclaim_tag(pag);
239 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
241 spin_unlock(&ip->i_flags_lock);
242 spin_unlock(&pag->pag_ici_lock);
247 xfs_inode_clear_reclaim_tag(
248 struct xfs_perag *pag,
251 radix_tree_tag_clear(&pag->pag_ici_root,
252 XFS_INO_TO_AGINO(pag->pag_mount, ino),
253 XFS_ICI_RECLAIM_TAG);
254 xfs_perag_clear_reclaim_tag(pag);
259 struct xfs_inode *ip)
261 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
262 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
265 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
266 if (!xfs_iflags_test(ip, XFS_INEW))
270 finish_wait(wq, &wait.wq_entry);
274 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
275 * part of the structure. This is made more complex by the fact we store
276 * information about the on-disk values in the VFS inode and so we can't just
277 * overwrite the values unconditionally. Hence we save the parameters we
278 * need to retain across reinitialisation, and rewrite them into the VFS inode
279 * after reinitialisation even if it fails.
283 struct xfs_mount *mp,
287 uint32_t nlink = inode->i_nlink;
288 uint32_t generation = inode->i_generation;
289 uint64_t version = inode_peek_iversion(inode);
290 umode_t mode = inode->i_mode;
291 dev_t dev = inode->i_rdev;
292 kuid_t uid = inode->i_uid;
293 kgid_t gid = inode->i_gid;
295 error = inode_init_always(mp->m_super, inode);
297 set_nlink(inode, nlink);
298 inode->i_generation = generation;
299 inode_set_iversion_queried(inode, version);
300 inode->i_mode = mode;
308 * If we are allocating a new inode, then check what was returned is
309 * actually a free, empty inode. If we are not allocating an inode,
310 * then check we didn't find a free inode.
313 * 0 if the inode free state matches the lookup context
314 * -ENOENT if the inode is free and we are not allocating
315 * -EFSCORRUPTED if there is any state mismatch at all
318 xfs_iget_check_free_state(
319 struct xfs_inode *ip,
322 if (flags & XFS_IGET_CREATE) {
323 /* should be a free inode */
324 if (VFS_I(ip)->i_mode != 0) {
325 xfs_warn(ip->i_mount,
326 "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
327 ip->i_ino, VFS_I(ip)->i_mode);
328 return -EFSCORRUPTED;
331 if (ip->i_d.di_nblocks != 0) {
332 xfs_warn(ip->i_mount,
333 "Corruption detected! Free inode 0x%llx has blocks allocated!",
335 return -EFSCORRUPTED;
340 /* should be an allocated inode */
341 if (VFS_I(ip)->i_mode == 0)
348 * Check the validity of the inode we just found it the cache
352 struct xfs_perag *pag,
353 struct xfs_inode *ip,
356 int lock_flags) __releases(RCU)
358 struct inode *inode = VFS_I(ip);
359 struct xfs_mount *mp = ip->i_mount;
363 * check for re-use of an inode within an RCU grace period due to the
364 * radix tree nodes not being updated yet. We monitor for this by
365 * setting the inode number to zero before freeing the inode structure.
366 * If the inode has been reallocated and set up, then the inode number
367 * will not match, so check for that, too.
369 spin_lock(&ip->i_flags_lock);
370 if (ip->i_ino != ino) {
371 trace_xfs_iget_skip(ip);
372 XFS_STATS_INC(mp, xs_ig_frecycle);
379 * If we are racing with another cache hit that is currently
380 * instantiating this inode or currently recycling it out of
381 * reclaimabe state, wait for the initialisation to complete
384 * XXX(hch): eventually we should do something equivalent to
385 * wait_on_inode to wait for these flags to be cleared
386 * instead of polling for it.
388 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
389 trace_xfs_iget_skip(ip);
390 XFS_STATS_INC(mp, xs_ig_frecycle);
396 * Check the inode free state is valid. This also detects lookup
397 * racing with unlinks.
399 error = xfs_iget_check_free_state(ip, flags);
404 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
405 * Need to carefully get it back into useable state.
407 if (ip->i_flags & XFS_IRECLAIMABLE) {
408 trace_xfs_iget_reclaim(ip);
410 if (flags & XFS_IGET_INCORE) {
416 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
417 * from stomping over us while we recycle the inode. We can't
418 * clear the radix tree reclaimable tag yet as it requires
419 * pag_ici_lock to be held exclusive.
421 ip->i_flags |= XFS_IRECLAIM;
423 spin_unlock(&ip->i_flags_lock);
426 error = xfs_reinit_inode(mp, inode);
430 * Re-initializing the inode failed, and we are in deep
431 * trouble. Try to re-add it to the reclaim list.
434 spin_lock(&ip->i_flags_lock);
435 wake = !!__xfs_iflags_test(ip, XFS_INEW);
436 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
438 wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
439 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
440 trace_xfs_iget_reclaim_fail(ip);
444 spin_lock(&pag->pag_ici_lock);
445 spin_lock(&ip->i_flags_lock);
448 * Clear the per-lifetime state in the inode as we are now
449 * effectively a new inode and need to return to the initial
450 * state before reuse occurs.
452 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
453 ip->i_flags |= XFS_INEW;
454 xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
455 inode->i_state = I_NEW;
459 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
460 init_rwsem(&inode->i_rwsem);
462 spin_unlock(&ip->i_flags_lock);
463 spin_unlock(&pag->pag_ici_lock);
465 /* If the VFS inode is being torn down, pause and try again. */
467 trace_xfs_iget_skip(ip);
472 /* We've got a live one. */
473 spin_unlock(&ip->i_flags_lock);
475 trace_xfs_iget_hit(ip);
479 xfs_ilock(ip, lock_flags);
481 if (!(flags & XFS_IGET_INCORE))
482 xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
483 XFS_STATS_INC(mp, xs_ig_found);
488 spin_unlock(&ip->i_flags_lock);
496 struct xfs_mount *mp,
497 struct xfs_perag *pag,
500 struct xfs_inode **ipp,
504 struct xfs_inode *ip;
506 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
509 ip = xfs_inode_alloc(mp, ino);
513 error = xfs_iread(mp, tp, ip, flags);
517 if (!xfs_inode_verify_forks(ip)) {
518 error = -EFSCORRUPTED;
522 trace_xfs_iget_miss(ip);
526 * Check the inode free state is valid. This also detects lookup
527 * racing with unlinks.
529 error = xfs_iget_check_free_state(ip, flags);
534 * Preload the radix tree so we can insert safely under the
535 * write spinlock. Note that we cannot sleep inside the preload
536 * region. Since we can be called from transaction context, don't
537 * recurse into the file system.
539 if (radix_tree_preload(GFP_NOFS)) {
545 * Because the inode hasn't been added to the radix-tree yet it can't
546 * be found by another thread, so we can do the non-sleeping lock here.
549 if (!xfs_ilock_nowait(ip, lock_flags))
554 * These values must be set before inserting the inode into the radix
555 * tree as the moment it is inserted a concurrent lookup (allowed by the
556 * RCU locking mechanism) can find it and that lookup must see that this
557 * is an inode currently under construction (i.e. that XFS_INEW is set).
558 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
559 * memory barrier that ensures this detection works correctly at lookup
563 if (flags & XFS_IGET_DONTCACHE)
564 iflags |= XFS_IDONTCACHE;
568 xfs_iflags_set(ip, iflags);
570 /* insert the new inode */
571 spin_lock(&pag->pag_ici_lock);
572 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
573 if (unlikely(error)) {
574 WARN_ON(error != -EEXIST);
575 XFS_STATS_INC(mp, xs_ig_dup);
577 goto out_preload_end;
579 spin_unlock(&pag->pag_ici_lock);
580 radix_tree_preload_end();
586 spin_unlock(&pag->pag_ici_lock);
587 radix_tree_preload_end();
589 xfs_iunlock(ip, lock_flags);
591 __destroy_inode(VFS_I(ip));
597 * Look up an inode by number in the given file system.
598 * The inode is looked up in the cache held in each AG.
599 * If the inode is found in the cache, initialise the vfs inode
602 * If it is not in core, read it in from the file system's device,
603 * add it to the cache and initialise the vfs inode.
605 * The inode is locked according to the value of the lock_flags parameter.
606 * This flag parameter indicates how and if the inode's IO lock and inode lock
609 * mp -- the mount point structure for the current file system. It points
610 * to the inode hash table.
611 * tp -- a pointer to the current transaction if there is one. This is
612 * simply passed through to the xfs_iread() call.
613 * ino -- the number of the inode desired. This is the unique identifier
614 * within the file system for the inode being requested.
615 * lock_flags -- flags indicating how to lock the inode. See the comment
616 * for xfs_ilock() for a list of valid values.
633 * xfs_reclaim_inode() uses the ILOCK to ensure an inode
634 * doesn't get freed while it's being referenced during a
635 * radix tree traversal here. It assumes this function
636 * aqcuires only the ILOCK (and therefore it has no need to
637 * involve the IOLOCK in this synchronization).
639 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
641 /* reject inode numbers outside existing AGs */
642 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
645 XFS_STATS_INC(mp, xs_ig_attempts);
647 /* get the perag structure and ensure that it's inode capable */
648 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
649 agino = XFS_INO_TO_AGINO(mp, ino);
654 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
657 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
659 goto out_error_or_again;
662 if (flags & XFS_IGET_INCORE) {
664 goto out_error_or_again;
666 XFS_STATS_INC(mp, xs_ig_missed);
668 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
671 goto out_error_or_again;
678 * If we have a real type for an on-disk inode, we can setup the inode
679 * now. If it's a new inode being created, xfs_ialloc will handle it.
681 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
682 xfs_setup_existing_inode(ip);
686 if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
695 * "Is this a cached inode that's also allocated?"
697 * Look up an inode by number in the given file system. If the inode is
698 * in cache and isn't in purgatory, return 1 if the inode is allocated
699 * and 0 if it is not. For all other cases (not in cache, being torn
700 * down, etc.), return a negative error code.
702 * The caller has to prevent inode allocation and freeing activity,
703 * presumably by locking the AGI buffer. This is to ensure that an
704 * inode cannot transition from allocated to freed until the caller is
705 * ready to allow that. If the inode is in an intermediate state (new,
706 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
707 * inode is not in the cache, -ENOENT will be returned. The caller must
708 * deal with these scenarios appropriately.
710 * This is a specialized use case for the online scrubber; if you're
711 * reading this, you probably want xfs_iget.
714 xfs_icache_inode_is_allocated(
715 struct xfs_mount *mp,
716 struct xfs_trans *tp,
720 struct xfs_inode *ip;
723 error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
727 *inuse = !!(VFS_I(ip)->i_mode);
733 * The inode lookup is done in batches to keep the amount of lock traffic and
734 * radix tree lookups to a minimum. The batch size is a trade off between
735 * lookup reduction and stack usage. This is in the reclaim path, so we can't
738 #define XFS_LOOKUP_BATCH 32
741 xfs_inode_ag_walk_grab(
742 struct xfs_inode *ip,
745 struct inode *inode = VFS_I(ip);
746 bool newinos = !!(flags & XFS_AGITER_INEW_WAIT);
748 ASSERT(rcu_read_lock_held());
751 * check for stale RCU freed inode
753 * If the inode has been reallocated, it doesn't matter if it's not in
754 * the AG we are walking - we are walking for writeback, so if it
755 * passes all the "valid inode" checks and is dirty, then we'll write
756 * it back anyway. If it has been reallocated and still being
757 * initialised, the XFS_INEW check below will catch it.
759 spin_lock(&ip->i_flags_lock);
761 goto out_unlock_noent;
763 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
764 if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
765 __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
766 goto out_unlock_noent;
767 spin_unlock(&ip->i_flags_lock);
769 /* nothing to sync during shutdown */
770 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
771 return -EFSCORRUPTED;
773 /* If we can't grab the inode, it must on it's way to reclaim. */
781 spin_unlock(&ip->i_flags_lock);
787 struct xfs_mount *mp,
788 struct xfs_perag *pag,
789 int (*execute)(struct xfs_inode *ip, int flags,
796 uint32_t first_index;
808 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
815 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
816 (void **)batch, first_index,
819 nr_found = radix_tree_gang_lookup_tag(
821 (void **) batch, first_index,
822 XFS_LOOKUP_BATCH, tag);
830 * Grab the inodes before we drop the lock. if we found
831 * nothing, nr == 0 and the loop will be skipped.
833 for (i = 0; i < nr_found; i++) {
834 struct xfs_inode *ip = batch[i];
836 if (done || xfs_inode_ag_walk_grab(ip, iter_flags))
840 * Update the index for the next lookup. Catch
841 * overflows into the next AG range which can occur if
842 * we have inodes in the last block of the AG and we
843 * are currently pointing to the last inode.
845 * Because we may see inodes that are from the wrong AG
846 * due to RCU freeing and reallocation, only update the
847 * index if it lies in this AG. It was a race that lead
848 * us to see this inode, so another lookup from the
849 * same index will not find it again.
851 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
853 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
854 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
858 /* unlock now we've grabbed the inodes. */
861 for (i = 0; i < nr_found; i++) {
864 if ((iter_flags & XFS_AGITER_INEW_WAIT) &&
865 xfs_iflags_test(batch[i], XFS_INEW))
866 xfs_inew_wait(batch[i]);
867 error = execute(batch[i], flags, args);
869 if (error == -EAGAIN) {
873 if (error && last_error != -EFSCORRUPTED)
877 /* bail out if the filesystem is corrupted. */
878 if (error == -EFSCORRUPTED)
883 } while (nr_found && !done);
893 * Background scanning to trim post-EOF preallocated space. This is queued
894 * based on the 'speculative_prealloc_lifetime' tunable (5m by default).
898 struct xfs_mount *mp)
901 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
902 queue_delayed_work(mp->m_eofblocks_workqueue,
903 &mp->m_eofblocks_work,
904 msecs_to_jiffies(xfs_eofb_secs * 1000));
909 xfs_eofblocks_worker(
910 struct work_struct *work)
912 struct xfs_mount *mp = container_of(to_delayed_work(work),
913 struct xfs_mount, m_eofblocks_work);
915 if (!sb_start_write_trylock(mp->m_super))
917 xfs_icache_free_eofblocks(mp, NULL);
918 sb_end_write(mp->m_super);
920 xfs_queue_eofblocks(mp);
924 * Background scanning to trim preallocated CoW space. This is queued
925 * based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default).
926 * (We'll just piggyback on the post-EOF prealloc space workqueue.)
930 struct xfs_mount *mp)
933 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_COWBLOCKS_TAG))
934 queue_delayed_work(mp->m_eofblocks_workqueue,
935 &mp->m_cowblocks_work,
936 msecs_to_jiffies(xfs_cowb_secs * 1000));
941 xfs_cowblocks_worker(
942 struct work_struct *work)
944 struct xfs_mount *mp = container_of(to_delayed_work(work),
945 struct xfs_mount, m_cowblocks_work);
947 if (!sb_start_write_trylock(mp->m_super))
949 xfs_icache_free_cowblocks(mp, NULL);
950 sb_end_write(mp->m_super);
952 xfs_queue_cowblocks(mp);
956 xfs_inode_ag_iterator_flags(
957 struct xfs_mount *mp,
958 int (*execute)(struct xfs_inode *ip, int flags,
964 struct xfs_perag *pag;
970 while ((pag = xfs_perag_get(mp, ag))) {
971 ag = pag->pag_agno + 1;
972 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1,
977 if (error == -EFSCORRUPTED)
985 xfs_inode_ag_iterator(
986 struct xfs_mount *mp,
987 int (*execute)(struct xfs_inode *ip, int flags,
992 return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0);
996 xfs_inode_ag_iterator_tag(
997 struct xfs_mount *mp,
998 int (*execute)(struct xfs_inode *ip, int flags,
1004 struct xfs_perag *pag;
1010 while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
1011 ag = pag->pag_agno + 1;
1012 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag,
1017 if (error == -EFSCORRUPTED)
1025 * Grab the inode for reclaim exclusively.
1026 * Return 0 if we grabbed it, non-zero otherwise.
1029 xfs_reclaim_inode_grab(
1030 struct xfs_inode *ip,
1033 ASSERT(rcu_read_lock_held());
1035 /* quick check for stale RCU freed inode */
1040 * If we are asked for non-blocking operation, do unlocked checks to
1041 * see if the inode already is being flushed or in reclaim to avoid
1044 if ((flags & SYNC_TRYLOCK) &&
1045 __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
1049 * The radix tree lock here protects a thread in xfs_iget from racing
1050 * with us starting reclaim on the inode. Once we have the
1051 * XFS_IRECLAIM flag set it will not touch us.
1053 * Due to RCU lookup, we may find inodes that have been freed and only
1054 * have XFS_IRECLAIM set. Indeed, we may see reallocated inodes that
1055 * aren't candidates for reclaim at all, so we must check the
1056 * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
1058 spin_lock(&ip->i_flags_lock);
1059 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
1060 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
1061 /* not a reclaim candidate. */
1062 spin_unlock(&ip->i_flags_lock);
1065 __xfs_iflags_set(ip, XFS_IRECLAIM);
1066 spin_unlock(&ip->i_flags_lock);
1071 * Inodes in different states need to be treated differently. The following
1072 * table lists the inode states and the reclaim actions necessary:
1074 * inode state iflush ret required action
1075 * --------------- ---------- ---------------
1077 * shutdown EIO unpin and reclaim
1078 * clean, unpinned 0 reclaim
1079 * stale, unpinned 0 reclaim
1080 * clean, pinned(*) 0 requeue
1081 * stale, pinned EAGAIN requeue
1082 * dirty, async - requeue
1083 * dirty, sync 0 reclaim
1085 * (*) dgc: I don't think the clean, pinned state is possible but it gets
1086 * handled anyway given the order of checks implemented.
1088 * Also, because we get the flush lock first, we know that any inode that has
1089 * been flushed delwri has had the flush completed by the time we check that
1090 * the inode is clean.
1092 * Note that because the inode is flushed delayed write by AIL pushing, the
1093 * flush lock may already be held here and waiting on it can result in very
1094 * long latencies. Hence for sync reclaims, where we wait on the flush lock,
1095 * the caller should push the AIL first before trying to reclaim inodes to
1096 * minimise the amount of time spent waiting. For background relaim, we only
1097 * bother to reclaim clean inodes anyway.
1099 * Hence the order of actions after gaining the locks should be:
1101 * shutdown => unpin and reclaim
1102 * pinned, async => requeue
1103 * pinned, sync => unpin
1106 * dirty, async => requeue
1107 * dirty, sync => flush, wait and reclaim
1111 struct xfs_inode *ip,
1112 struct xfs_perag *pag,
1115 struct xfs_buf *bp = NULL;
1116 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
1121 xfs_ilock(ip, XFS_ILOCK_EXCL);
1122 if (!xfs_iflock_nowait(ip)) {
1123 if (!(sync_mode & SYNC_WAIT))
1128 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1129 xfs_iunpin_wait(ip);
1130 /* xfs_iflush_abort() drops the flush lock */
1131 xfs_iflush_abort(ip, false);
1134 if (xfs_ipincount(ip)) {
1135 if (!(sync_mode & SYNC_WAIT))
1137 xfs_iunpin_wait(ip);
1139 if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) {
1145 * Never flush out dirty data during non-blocking reclaim, as it would
1146 * just contend with AIL pushing trying to do the same job.
1148 if (!(sync_mode & SYNC_WAIT))
1152 * Now we have an inode that needs flushing.
1154 * Note that xfs_iflush will never block on the inode buffer lock, as
1155 * xfs_ifree_cluster() can lock the inode buffer before it locks the
1156 * ip->i_lock, and we are doing the exact opposite here. As a result,
1157 * doing a blocking xfs_imap_to_bp() to get the cluster buffer would
1158 * result in an ABBA deadlock with xfs_ifree_cluster().
1160 * As xfs_ifree_cluser() must gather all inodes that are active in the
1161 * cache to mark them stale, if we hit this case we don't actually want
1162 * to do IO here - we want the inode marked stale so we can simply
1163 * reclaim it. Hence if we get an EAGAIN error here, just unlock the
1164 * inode, back off and try again. Hopefully the next pass through will
1165 * see the stale flag set on the inode.
1167 error = xfs_iflush(ip, &bp);
1168 if (error == -EAGAIN) {
1169 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1170 /* backoff longer than in xfs_ifree_cluster */
1176 error = xfs_bwrite(bp);
1181 ASSERT(!xfs_isiflocked(ip));
1184 * Because we use RCU freeing we need to ensure the inode always appears
1185 * to be reclaimed with an invalid inode number when in the free state.
1186 * We do this as early as possible under the ILOCK so that
1187 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
1188 * detect races with us here. By doing this, we guarantee that once
1189 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
1190 * it will see either a valid inode that will serialise correctly, or it
1191 * will see an invalid inode that it can skip.
1193 spin_lock(&ip->i_flags_lock);
1194 ip->i_flags = XFS_IRECLAIM;
1196 spin_unlock(&ip->i_flags_lock);
1198 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1200 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
1202 * Remove the inode from the per-AG radix tree.
1204 * Because radix_tree_delete won't complain even if the item was never
1205 * added to the tree assert that it's been there before to catch
1206 * problems with the inode life time early on.
1208 spin_lock(&pag->pag_ici_lock);
1209 if (!radix_tree_delete(&pag->pag_ici_root,
1210 XFS_INO_TO_AGINO(ip->i_mount, ino)))
1212 xfs_perag_clear_reclaim_tag(pag);
1213 spin_unlock(&pag->pag_ici_lock);
1216 * Here we do an (almost) spurious inode lock in order to coordinate
1217 * with inode cache radix tree lookups. This is because the lookup
1218 * can reference the inodes in the cache without taking references.
1220 * We make that OK here by ensuring that we wait until the inode is
1221 * unlocked after the lookup before we go ahead and free it.
1223 xfs_ilock(ip, XFS_ILOCK_EXCL);
1224 xfs_qm_dqdetach(ip);
1225 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1227 __xfs_inode_free(ip);
1233 xfs_iflags_clear(ip, XFS_IRECLAIM);
1234 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1236 * We could return -EAGAIN here to make reclaim rescan the inode tree in
1237 * a short while. However, this just burns CPU time scanning the tree
1238 * waiting for IO to complete and the reclaim work never goes back to
1239 * the idle state. Instead, return 0 to let the next scheduled
1240 * background reclaim attempt to reclaim the inode again.
1246 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
1247 * corrupted, we still want to try to reclaim all the inodes. If we don't,
1248 * then a shut down during filesystem unmount reclaim walk leak all the
1249 * unreclaimed inodes.
1252 xfs_reclaim_inodes_ag(
1253 struct xfs_mount *mp,
1257 struct xfs_perag *pag;
1261 int trylock = flags & SYNC_TRYLOCK;
1267 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1268 unsigned long first_index = 0;
1272 ag = pag->pag_agno + 1;
1275 if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
1280 first_index = pag->pag_ici_reclaim_cursor;
1282 mutex_lock(&pag->pag_ici_reclaim_lock);
1285 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1289 nr_found = radix_tree_gang_lookup_tag(
1291 (void **)batch, first_index,
1293 XFS_ICI_RECLAIM_TAG);
1301 * Grab the inodes before we drop the lock. if we found
1302 * nothing, nr == 0 and the loop will be skipped.
1304 for (i = 0; i < nr_found; i++) {
1305 struct xfs_inode *ip = batch[i];
1307 if (done || xfs_reclaim_inode_grab(ip, flags))
1311 * Update the index for the next lookup. Catch
1312 * overflows into the next AG range which can
1313 * occur if we have inodes in the last block of
1314 * the AG and we are currently pointing to the
1317 * Because we may see inodes that are from the
1318 * wrong AG due to RCU freeing and
1319 * reallocation, only update the index if it
1320 * lies in this AG. It was a race that lead us
1321 * to see this inode, so another lookup from
1322 * the same index will not find it again.
1324 if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1327 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1328 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1332 /* unlock now we've grabbed the inodes. */
1335 for (i = 0; i < nr_found; i++) {
1338 error = xfs_reclaim_inode(batch[i], pag, flags);
1339 if (error && last_error != -EFSCORRUPTED)
1343 *nr_to_scan -= XFS_LOOKUP_BATCH;
1347 } while (nr_found && !done && *nr_to_scan > 0);
1349 if (trylock && !done)
1350 pag->pag_ici_reclaim_cursor = first_index;
1352 pag->pag_ici_reclaim_cursor = 0;
1353 mutex_unlock(&pag->pag_ici_reclaim_lock);
1358 * if we skipped any AG, and we still have scan count remaining, do
1359 * another pass this time using blocking reclaim semantics (i.e
1360 * waiting on the reclaim locks and ignoring the reclaim cursors). This
1361 * ensure that when we get more reclaimers than AGs we block rather
1362 * than spin trying to execute reclaim.
1364 if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1376 int nr_to_scan = INT_MAX;
1378 return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
1382 * Scan a certain number of inodes for reclaim.
1384 * When called we make sure that there is a background (fast) inode reclaim in
1385 * progress, while we will throttle the speed of reclaim via doing synchronous
1386 * reclaim of inodes. That means if we come across dirty inodes, we wait for
1387 * them to be cleaned, which we hope will not be very long due to the
1388 * background walker having already kicked the IO off on those dirty inodes.
1391 xfs_reclaim_inodes_nr(
1392 struct xfs_mount *mp,
1395 /* kick background reclaimer and push the AIL */
1396 xfs_reclaim_work_queue(mp);
1397 xfs_ail_push_all(mp->m_ail);
1399 return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
1403 * Return the number of reclaimable inodes in the filesystem for
1404 * the shrinker to determine how much to reclaim.
1407 xfs_reclaim_inodes_count(
1408 struct xfs_mount *mp)
1410 struct xfs_perag *pag;
1411 xfs_agnumber_t ag = 0;
1412 int reclaimable = 0;
1414 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1415 ag = pag->pag_agno + 1;
1416 reclaimable += pag->pag_ici_reclaimable;
1424 struct xfs_inode *ip,
1425 struct xfs_eofblocks *eofb)
1427 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1428 !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1431 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1432 !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1435 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1436 ip->i_d.di_projid != eofb->eof_prid)
1443 * A union-based inode filtering algorithm. Process the inode if any of the
1444 * criteria match. This is for global/internal scans only.
1447 xfs_inode_match_id_union(
1448 struct xfs_inode *ip,
1449 struct xfs_eofblocks *eofb)
1451 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1452 uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1455 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1456 gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1459 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1460 ip->i_d.di_projid == eofb->eof_prid)
1467 xfs_inode_free_eofblocks(
1468 struct xfs_inode *ip,
1473 struct xfs_eofblocks *eofb = args;
1476 if (!xfs_can_free_eofblocks(ip, false)) {
1477 /* inode could be preallocated or append-only */
1478 trace_xfs_inode_free_eofblocks_invalid(ip);
1479 xfs_inode_clear_eofblocks_tag(ip);
1484 * If the mapping is dirty the operation can block and wait for some
1485 * time. Unless we are waiting, skip it.
1487 if (!(flags & SYNC_WAIT) &&
1488 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1492 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1493 match = xfs_inode_match_id_union(ip, eofb);
1495 match = xfs_inode_match_id(ip, eofb);
1499 /* skip the inode if the file size is too small */
1500 if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1501 XFS_ISIZE(ip) < eofb->eof_min_file_size)
1506 * If the caller is waiting, return -EAGAIN to keep the background
1507 * scanner moving and revisit the inode in a subsequent pass.
1509 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1510 if (flags & SYNC_WAIT)
1514 ret = xfs_free_eofblocks(ip);
1515 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1521 __xfs_icache_free_eofblocks(
1522 struct xfs_mount *mp,
1523 struct xfs_eofblocks *eofb,
1524 int (*execute)(struct xfs_inode *ip, int flags,
1528 int flags = SYNC_TRYLOCK;
1530 if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC))
1533 return xfs_inode_ag_iterator_tag(mp, execute, flags,
1538 xfs_icache_free_eofblocks(
1539 struct xfs_mount *mp,
1540 struct xfs_eofblocks *eofb)
1542 return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_eofblocks,
1543 XFS_ICI_EOFBLOCKS_TAG);
1547 * Run eofblocks scans on the quotas applicable to the inode. For inodes with
1548 * multiple quotas, we don't know exactly which quota caused an allocation
1549 * failure. We make a best effort by including each quota under low free space
1550 * conditions (less than 1% free space) in the scan.
1553 __xfs_inode_free_quota_eofblocks(
1554 struct xfs_inode *ip,
1555 int (*execute)(struct xfs_mount *mp,
1556 struct xfs_eofblocks *eofb))
1559 struct xfs_eofblocks eofb = {0};
1560 struct xfs_dquot *dq;
1563 * Run a sync scan to increase effectiveness and use the union filter to
1564 * cover all applicable quotas in a single scan.
1566 eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;
1568 if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
1569 dq = xfs_inode_dquot(ip, XFS_DQ_USER);
1570 if (dq && xfs_dquot_lowsp(dq)) {
1571 eofb.eof_uid = VFS_I(ip)->i_uid;
1572 eofb.eof_flags |= XFS_EOF_FLAGS_UID;
1577 if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) {
1578 dq = xfs_inode_dquot(ip, XFS_DQ_GROUP);
1579 if (dq && xfs_dquot_lowsp(dq)) {
1580 eofb.eof_gid = VFS_I(ip)->i_gid;
1581 eofb.eof_flags |= XFS_EOF_FLAGS_GID;
1587 execute(ip->i_mount, &eofb);
1593 xfs_inode_free_quota_eofblocks(
1594 struct xfs_inode *ip)
1596 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks);
1599 static inline unsigned long
1604 case XFS_ICI_EOFBLOCKS_TAG:
1605 return XFS_IEOFBLOCKS;
1606 case XFS_ICI_COWBLOCKS_TAG:
1607 return XFS_ICOWBLOCKS;
1615 __xfs_inode_set_blocks_tag(
1617 void (*execute)(struct xfs_mount *mp),
1618 void (*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1619 int error, unsigned long caller_ip),
1622 struct xfs_mount *mp = ip->i_mount;
1623 struct xfs_perag *pag;
1627 * Don't bother locking the AG and looking up in the radix trees
1628 * if we already know that we have the tag set.
1630 if (ip->i_flags & xfs_iflag_for_tag(tag))
1632 spin_lock(&ip->i_flags_lock);
1633 ip->i_flags |= xfs_iflag_for_tag(tag);
1634 spin_unlock(&ip->i_flags_lock);
1636 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1637 spin_lock(&pag->pag_ici_lock);
1639 tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
1640 radix_tree_tag_set(&pag->pag_ici_root,
1641 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1643 /* propagate the eofblocks tag up into the perag radix tree */
1644 spin_lock(&ip->i_mount->m_perag_lock);
1645 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1646 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1648 spin_unlock(&ip->i_mount->m_perag_lock);
1650 /* kick off background trimming */
1651 execute(ip->i_mount);
1653 set_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1656 spin_unlock(&pag->pag_ici_lock);
1661 xfs_inode_set_eofblocks_tag(
1664 trace_xfs_inode_set_eofblocks_tag(ip);
1665 return __xfs_inode_set_blocks_tag(ip, xfs_queue_eofblocks,
1666 trace_xfs_perag_set_eofblocks,
1667 XFS_ICI_EOFBLOCKS_TAG);
1671 __xfs_inode_clear_blocks_tag(
1673 void (*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1674 int error, unsigned long caller_ip),
1677 struct xfs_mount *mp = ip->i_mount;
1678 struct xfs_perag *pag;
1680 spin_lock(&ip->i_flags_lock);
1681 ip->i_flags &= ~xfs_iflag_for_tag(tag);
1682 spin_unlock(&ip->i_flags_lock);
1684 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1685 spin_lock(&pag->pag_ici_lock);
1687 radix_tree_tag_clear(&pag->pag_ici_root,
1688 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1689 if (!radix_tree_tagged(&pag->pag_ici_root, tag)) {
1690 /* clear the eofblocks tag from the perag radix tree */
1691 spin_lock(&ip->i_mount->m_perag_lock);
1692 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1693 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1695 spin_unlock(&ip->i_mount->m_perag_lock);
1696 clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1699 spin_unlock(&pag->pag_ici_lock);
1704 xfs_inode_clear_eofblocks_tag(
1707 trace_xfs_inode_clear_eofblocks_tag(ip);
1708 return __xfs_inode_clear_blocks_tag(ip,
1709 trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG);
1713 * Set ourselves up to free CoW blocks from this file. If it's already clean
1714 * then we can bail out quickly, but otherwise we must back off if the file
1715 * is undergoing some kind of write.
1718 xfs_prep_free_cowblocks(
1719 struct xfs_inode *ip)
1722 * Just clear the tag if we have an empty cow fork or none at all. It's
1723 * possible the inode was fully unshared since it was originally tagged.
1725 if (!xfs_inode_has_cow_data(ip)) {
1726 trace_xfs_inode_free_cowblocks_invalid(ip);
1727 xfs_inode_clear_cowblocks_tag(ip);
1732 * If the mapping is dirty or under writeback we cannot touch the
1733 * CoW fork. Leave it alone if we're in the midst of a directio.
1735 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1736 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1737 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1738 atomic_read(&VFS_I(ip)->i_dio_count))
1745 * Automatic CoW Reservation Freeing
1747 * These functions automatically garbage collect leftover CoW reservations
1748 * that were made on behalf of a cowextsize hint when we start to run out
1749 * of quota or when the reservations sit around for too long. If the file
1750 * has dirty pages or is undergoing writeback, its CoW reservations will
1753 * The actual garbage collection piggybacks off the same code that runs
1754 * the speculative EOF preallocation garbage collector.
1757 xfs_inode_free_cowblocks(
1758 struct xfs_inode *ip,
1762 struct xfs_eofblocks *eofb = args;
1766 if (!xfs_prep_free_cowblocks(ip))
1770 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1771 match = xfs_inode_match_id_union(ip, eofb);
1773 match = xfs_inode_match_id(ip, eofb);
1777 /* skip the inode if the file size is too small */
1778 if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1779 XFS_ISIZE(ip) < eofb->eof_min_file_size)
1783 /* Free the CoW blocks */
1784 xfs_ilock(ip, XFS_IOLOCK_EXCL);
1785 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
1788 * Check again, nobody else should be able to dirty blocks or change
1789 * the reflink iflag now that we have the first two locks held.
1791 if (xfs_prep_free_cowblocks(ip))
1792 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1794 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
1795 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1801 xfs_icache_free_cowblocks(
1802 struct xfs_mount *mp,
1803 struct xfs_eofblocks *eofb)
1805 return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_cowblocks,
1806 XFS_ICI_COWBLOCKS_TAG);
1810 xfs_inode_free_quota_cowblocks(
1811 struct xfs_inode *ip)
1813 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_cowblocks);
1817 xfs_inode_set_cowblocks_tag(
1820 trace_xfs_inode_set_cowblocks_tag(ip);
1821 return __xfs_inode_set_blocks_tag(ip, xfs_queue_cowblocks,
1822 trace_xfs_perag_set_cowblocks,
1823 XFS_ICI_COWBLOCKS_TAG);
1827 xfs_inode_clear_cowblocks_tag(
1830 trace_xfs_inode_clear_cowblocks_tag(ip);
1831 return __xfs_inode_clear_blocks_tag(ip,
1832 trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG);
1835 /* Disable post-EOF and CoW block auto-reclamation. */
1837 xfs_stop_block_reaping(
1838 struct xfs_mount *mp)
1840 cancel_delayed_work_sync(&mp->m_eofblocks_work);
1841 cancel_delayed_work_sync(&mp->m_cowblocks_work);
1844 /* Enable post-EOF and CoW block auto-reclamation. */
1846 xfs_start_block_reaping(
1847 struct xfs_mount *mp)
1849 xfs_queue_eofblocks(mp);
1850 xfs_queue_cowblocks(mp);