1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
6 #include <linux/iversion.h>
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans_resv.h"
15 #include "xfs_mount.h"
16 #include "xfs_defer.h"
17 #include "xfs_inode.h"
20 #include "xfs_trans_space.h"
21 #include "xfs_trans.h"
22 #include "xfs_buf_item.h"
23 #include "xfs_inode_item.h"
24 #include "xfs_ialloc.h"
26 #include "xfs_bmap_util.h"
27 #include "xfs_errortag.h"
28 #include "xfs_error.h"
29 #include "xfs_quota.h"
30 #include "xfs_filestream.h"
31 #include "xfs_trace.h"
32 #include "xfs_icache.h"
33 #include "xfs_symlink.h"
34 #include "xfs_trans_priv.h"
36 #include "xfs_bmap_btree.h"
37 #include "xfs_reflink.h"
39 kmem_zone_t *xfs_inode_zone;
42 * Used in xfs_itruncate_extents(). This is the maximum number of extents
43 * freed from a file in a single transaction.
45 #define XFS_ITRUNC_MAX_EXTENTS 2
47 STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
48 STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *);
51 * helper function to extract extent size hint from inode
58 * No point in aligning allocations if we need to COW to actually
61 if (xfs_is_always_cow_inode(ip))
63 if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
64 return ip->i_d.di_extsize;
65 if (XFS_IS_REALTIME_INODE(ip))
66 return ip->i_mount->m_sb.sb_rextsize;
71 * Helper function to extract CoW extent size hint from inode.
72 * Between the extent size hint and the CoW extent size hint, we
73 * return the greater of the two. If the value is zero (automatic),
74 * use the default size.
77 xfs_get_cowextsz_hint(
83 if (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
84 a = ip->i_d.di_cowextsize;
85 b = xfs_get_extsz_hint(ip);
89 return XFS_DEFAULT_COWEXTSZ_HINT;
94 * These two are wrapper routines around the xfs_ilock() routine used to
95 * centralize some grungy code. They are used in places that wish to lock the
96 * inode solely for reading the extents. The reason these places can't just
97 * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
98 * bringing in of the extents from disk for a file in b-tree format. If the
99 * inode is in b-tree format, then we need to lock the inode exclusively until
100 * the extents are read in. Locking it exclusively all the time would limit
101 * our parallelism unnecessarily, though. What we do instead is check to see
102 * if the extents have been read in yet, and only lock the inode exclusively
105 * The functions return a value which should be given to the corresponding
106 * xfs_iunlock() call.
109 xfs_ilock_data_map_shared(
110 struct xfs_inode *ip)
112 uint lock_mode = XFS_ILOCK_SHARED;
114 if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE &&
115 (ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
116 lock_mode = XFS_ILOCK_EXCL;
117 xfs_ilock(ip, lock_mode);
122 xfs_ilock_attr_map_shared(
123 struct xfs_inode *ip)
125 uint lock_mode = XFS_ILOCK_SHARED;
128 ip->i_afp->if_format == XFS_DINODE_FMT_BTREE &&
129 (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
130 lock_mode = XFS_ILOCK_EXCL;
131 xfs_ilock(ip, lock_mode);
136 * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
137 * multi-reader locks: i_mmap_lock and the i_lock. This routine allows
138 * various combinations of the locks to be obtained.
140 * The 3 locks should always be ordered so that the IO lock is obtained first,
141 * the mmap lock second and the ilock last in order to prevent deadlock.
143 * Basic locking order:
145 * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
147 * mmap_lock locking order:
149 * i_rwsem -> page lock -> mmap_lock
150 * mmap_lock -> i_mmap_lock -> page_lock
152 * The difference in mmap_lock locking order mean that we cannot hold the
153 * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
154 * fault in pages during copy in/out (for buffered IO) or require the mmap_lock
155 * in get_user_pages() to map the user pages into the kernel address space for
156 * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because
157 * page faults already hold the mmap_lock.
159 * Hence to serialise fully against both syscall and mmap based IO, we need to
160 * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both
161 * taken in places where we need to invalidate the page cache in a race
162 * free manner (e.g. truncate, hole punch and other extent manipulation
170 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
173 * You can't set both SHARED and EXCL for the same lock,
174 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
175 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
177 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
178 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
179 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
180 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
181 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
182 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
183 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
185 if (lock_flags & XFS_IOLOCK_EXCL) {
186 down_write_nested(&VFS_I(ip)->i_rwsem,
187 XFS_IOLOCK_DEP(lock_flags));
188 } else if (lock_flags & XFS_IOLOCK_SHARED) {
189 down_read_nested(&VFS_I(ip)->i_rwsem,
190 XFS_IOLOCK_DEP(lock_flags));
193 if (lock_flags & XFS_MMAPLOCK_EXCL)
194 mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
195 else if (lock_flags & XFS_MMAPLOCK_SHARED)
196 mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
198 if (lock_flags & XFS_ILOCK_EXCL)
199 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
200 else if (lock_flags & XFS_ILOCK_SHARED)
201 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
205 * This is just like xfs_ilock(), except that the caller
206 * is guaranteed not to sleep. It returns 1 if it gets
207 * the requested locks and 0 otherwise. If the IO lock is
208 * obtained but the inode lock cannot be, then the IO lock
209 * is dropped before returning.
211 * ip -- the inode being locked
212 * lock_flags -- this parameter indicates the inode's locks to be
213 * to be locked. See the comment for xfs_ilock() for a list
221 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
224 * You can't set both SHARED and EXCL for the same lock,
225 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
226 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
228 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
229 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
230 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
231 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
232 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
233 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
234 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
236 if (lock_flags & XFS_IOLOCK_EXCL) {
237 if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
239 } else if (lock_flags & XFS_IOLOCK_SHARED) {
240 if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
244 if (lock_flags & XFS_MMAPLOCK_EXCL) {
245 if (!mrtryupdate(&ip->i_mmaplock))
246 goto out_undo_iolock;
247 } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
248 if (!mrtryaccess(&ip->i_mmaplock))
249 goto out_undo_iolock;
252 if (lock_flags & XFS_ILOCK_EXCL) {
253 if (!mrtryupdate(&ip->i_lock))
254 goto out_undo_mmaplock;
255 } else if (lock_flags & XFS_ILOCK_SHARED) {
256 if (!mrtryaccess(&ip->i_lock))
257 goto out_undo_mmaplock;
262 if (lock_flags & XFS_MMAPLOCK_EXCL)
263 mrunlock_excl(&ip->i_mmaplock);
264 else if (lock_flags & XFS_MMAPLOCK_SHARED)
265 mrunlock_shared(&ip->i_mmaplock);
267 if (lock_flags & XFS_IOLOCK_EXCL)
268 up_write(&VFS_I(ip)->i_rwsem);
269 else if (lock_flags & XFS_IOLOCK_SHARED)
270 up_read(&VFS_I(ip)->i_rwsem);
276 * xfs_iunlock() is used to drop the inode locks acquired with
277 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
278 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
279 * that we know which locks to drop.
281 * ip -- the inode being unlocked
282 * lock_flags -- this parameter indicates the inode's locks to be
283 * to be unlocked. See the comment for xfs_ilock() for a list
284 * of valid values for this parameter.
293 * You can't set both SHARED and EXCL for the same lock,
294 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
295 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
297 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
298 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
299 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
300 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
301 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
302 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
303 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
304 ASSERT(lock_flags != 0);
306 if (lock_flags & XFS_IOLOCK_EXCL)
307 up_write(&VFS_I(ip)->i_rwsem);
308 else if (lock_flags & XFS_IOLOCK_SHARED)
309 up_read(&VFS_I(ip)->i_rwsem);
311 if (lock_flags & XFS_MMAPLOCK_EXCL)
312 mrunlock_excl(&ip->i_mmaplock);
313 else if (lock_flags & XFS_MMAPLOCK_SHARED)
314 mrunlock_shared(&ip->i_mmaplock);
316 if (lock_flags & XFS_ILOCK_EXCL)
317 mrunlock_excl(&ip->i_lock);
318 else if (lock_flags & XFS_ILOCK_SHARED)
319 mrunlock_shared(&ip->i_lock);
321 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
325 * give up write locks. the i/o lock cannot be held nested
326 * if it is being demoted.
333 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
335 ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
337 if (lock_flags & XFS_ILOCK_EXCL)
338 mrdemote(&ip->i_lock);
339 if (lock_flags & XFS_MMAPLOCK_EXCL)
340 mrdemote(&ip->i_mmaplock);
341 if (lock_flags & XFS_IOLOCK_EXCL)
342 downgrade_write(&VFS_I(ip)->i_rwsem);
344 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
347 #if defined(DEBUG) || defined(XFS_WARN)
353 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
354 if (!(lock_flags & XFS_ILOCK_SHARED))
355 return !!ip->i_lock.mr_writer;
356 return rwsem_is_locked(&ip->i_lock.mr_lock);
359 if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
360 if (!(lock_flags & XFS_MMAPLOCK_SHARED))
361 return !!ip->i_mmaplock.mr_writer;
362 return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
365 if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
366 if (!(lock_flags & XFS_IOLOCK_SHARED))
367 return !debug_locks ||
368 lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0);
369 return rwsem_is_locked(&VFS_I(ip)->i_rwsem);
378 * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
379 * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
380 * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
381 * errors and warnings.
383 #if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
385 xfs_lockdep_subclass_ok(
388 return subclass < MAX_LOCKDEP_SUBCLASSES;
391 #define xfs_lockdep_subclass_ok(subclass) (true)
395 * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
396 * value. This can be called for any type of inode lock combination, including
397 * parent locking. Care must be taken to ensure we don't overrun the subclass
398 * storage fields in the class mask we build.
401 xfs_lock_inumorder(int lock_mode, int subclass)
405 ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
407 ASSERT(xfs_lockdep_subclass_ok(subclass));
409 if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
410 ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
411 class += subclass << XFS_IOLOCK_SHIFT;
414 if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
415 ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
416 class += subclass << XFS_MMAPLOCK_SHIFT;
419 if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
420 ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
421 class += subclass << XFS_ILOCK_SHIFT;
424 return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
428 * The following routine will lock n inodes in exclusive mode. We assume the
429 * caller calls us with the inodes in i_ino order.
431 * We need to detect deadlock where an inode that we lock is in the AIL and we
432 * start waiting for another inode that is locked by a thread in a long running
433 * transaction (such as truncate). This can result in deadlock since the long
434 * running trans might need to wait for the inode we just locked in order to
435 * push the tail and free space in the log.
437 * xfs_lock_inodes() can only be used to lock one type of lock at a time -
438 * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
439 * lock more than one at a time, lockdep will report false positives saying we
440 * have violated locking orders.
444 struct xfs_inode **ips,
448 int attempts = 0, i, j, try_lock;
449 struct xfs_log_item *lp;
452 * Currently supports between 2 and 5 inodes with exclusive locking. We
453 * support an arbitrary depth of locking here, but absolute limits on
454 * inodes depend on the type of locking and the limits placed by
455 * lockdep annotations in xfs_lock_inumorder. These are all checked by
458 ASSERT(ips && inodes >= 2 && inodes <= 5);
459 ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
461 ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
463 ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
464 inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
465 ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
466 inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
468 if (lock_mode & XFS_IOLOCK_EXCL) {
469 ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
470 } else if (lock_mode & XFS_MMAPLOCK_EXCL)
471 ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
476 for (; i < inodes; i++) {
479 if (i && (ips[i] == ips[i - 1])) /* Already locked */
483 * If try_lock is not set yet, make sure all locked inodes are
484 * not in the AIL. If any are, set try_lock to be used later.
487 for (j = (i - 1); j >= 0 && !try_lock; j--) {
488 lp = &ips[j]->i_itemp->ili_item;
489 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
495 * If any of the previous locks we have locked is in the AIL,
496 * we must TRY to get the second and subsequent locks. If
497 * we can't get any, we must release all we have
501 xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
505 /* try_lock means we have an inode locked that is in the AIL. */
507 if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
511 * Unlock all previous guys and try again. xfs_iunlock will try
512 * to push the tail if the inode is in the AIL.
515 for (j = i - 1; j >= 0; j--) {
517 * Check to see if we've already unlocked this one. Not
518 * the first one going back, and the inode ptr is the
521 if (j != (i - 1) && ips[j] == ips[j + 1])
524 xfs_iunlock(ips[j], lock_mode);
527 if ((attempts % 5) == 0) {
528 delay(1); /* Don't just spin the CPU */
537 * xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
538 * the mmaplock or the ilock, but not more than one type at a time. If we lock
539 * more than one at a time, lockdep will report false positives saying we have
540 * violated locking orders. The iolock must be double-locked separately since
541 * we use i_rwsem for that. We now support taking one lock EXCL and the other
546 struct xfs_inode *ip0,
548 struct xfs_inode *ip1,
551 struct xfs_inode *temp;
554 struct xfs_log_item *lp;
556 ASSERT(hweight32(ip0_mode) == 1);
557 ASSERT(hweight32(ip1_mode) == 1);
558 ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
559 ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
560 ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
561 !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
562 ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
563 !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
564 ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
565 !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
566 ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
567 !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
569 ASSERT(ip0->i_ino != ip1->i_ino);
571 if (ip0->i_ino > ip1->i_ino) {
575 mode_temp = ip0_mode;
577 ip1_mode = mode_temp;
581 xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
584 * If the first lock we have locked is in the AIL, we must TRY to get
585 * the second lock. If we can't get it, we must release the first one
588 lp = &ip0->i_itemp->ili_item;
589 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
590 if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
591 xfs_iunlock(ip0, ip0_mode);
592 if ((++attempts % 5) == 0)
593 delay(1); /* Don't just spin the CPU */
597 xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
609 if (di_flags & XFS_DIFLAG_ANY) {
610 if (di_flags & XFS_DIFLAG_REALTIME)
611 flags |= FS_XFLAG_REALTIME;
612 if (di_flags & XFS_DIFLAG_PREALLOC)
613 flags |= FS_XFLAG_PREALLOC;
614 if (di_flags & XFS_DIFLAG_IMMUTABLE)
615 flags |= FS_XFLAG_IMMUTABLE;
616 if (di_flags & XFS_DIFLAG_APPEND)
617 flags |= FS_XFLAG_APPEND;
618 if (di_flags & XFS_DIFLAG_SYNC)
619 flags |= FS_XFLAG_SYNC;
620 if (di_flags & XFS_DIFLAG_NOATIME)
621 flags |= FS_XFLAG_NOATIME;
622 if (di_flags & XFS_DIFLAG_NODUMP)
623 flags |= FS_XFLAG_NODUMP;
624 if (di_flags & XFS_DIFLAG_RTINHERIT)
625 flags |= FS_XFLAG_RTINHERIT;
626 if (di_flags & XFS_DIFLAG_PROJINHERIT)
627 flags |= FS_XFLAG_PROJINHERIT;
628 if (di_flags & XFS_DIFLAG_NOSYMLINKS)
629 flags |= FS_XFLAG_NOSYMLINKS;
630 if (di_flags & XFS_DIFLAG_EXTSIZE)
631 flags |= FS_XFLAG_EXTSIZE;
632 if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
633 flags |= FS_XFLAG_EXTSZINHERIT;
634 if (di_flags & XFS_DIFLAG_NODEFRAG)
635 flags |= FS_XFLAG_NODEFRAG;
636 if (di_flags & XFS_DIFLAG_FILESTREAM)
637 flags |= FS_XFLAG_FILESTREAM;
640 if (di_flags2 & XFS_DIFLAG2_ANY) {
641 if (di_flags2 & XFS_DIFLAG2_DAX)
642 flags |= FS_XFLAG_DAX;
643 if (di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
644 flags |= FS_XFLAG_COWEXTSIZE;
648 flags |= FS_XFLAG_HASATTR;
655 struct xfs_inode *ip)
657 struct xfs_icdinode *dic = &ip->i_d;
659 return _xfs_dic2xflags(dic->di_flags, dic->di_flags2, XFS_IFORK_Q(ip));
663 * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
664 * is allowed, otherwise it has to be an exact match. If a CI match is found,
665 * ci_name->name will point to a the actual name (caller must free) or
666 * will be set to NULL if an exact match is found.
671 struct xfs_name *name,
673 struct xfs_name *ci_name)
678 trace_xfs_lookup(dp, name);
680 if (XFS_FORCED_SHUTDOWN(dp->i_mount))
683 error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
687 error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
695 kmem_free(ci_name->name);
701 /* Propagate di_flags from a parent inode to a child inode. */
703 xfs_inode_inherit_flags(
704 struct xfs_inode *ip,
705 const struct xfs_inode *pip)
707 unsigned int di_flags = 0;
708 umode_t mode = VFS_I(ip)->i_mode;
711 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
712 di_flags |= XFS_DIFLAG_RTINHERIT;
713 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
714 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
715 ip->i_d.di_extsize = pip->i_d.di_extsize;
717 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
718 di_flags |= XFS_DIFLAG_PROJINHERIT;
719 } else if (S_ISREG(mode)) {
720 if ((pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) &&
721 xfs_sb_version_hasrealtime(&ip->i_mount->m_sb))
722 di_flags |= XFS_DIFLAG_REALTIME;
723 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
724 di_flags |= XFS_DIFLAG_EXTSIZE;
725 ip->i_d.di_extsize = pip->i_d.di_extsize;
728 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
730 di_flags |= XFS_DIFLAG_NOATIME;
731 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
733 di_flags |= XFS_DIFLAG_NODUMP;
734 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
736 di_flags |= XFS_DIFLAG_SYNC;
737 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
738 xfs_inherit_nosymlinks)
739 di_flags |= XFS_DIFLAG_NOSYMLINKS;
740 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
741 xfs_inherit_nodefrag)
742 di_flags |= XFS_DIFLAG_NODEFRAG;
743 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
744 di_flags |= XFS_DIFLAG_FILESTREAM;
746 ip->i_d.di_flags |= di_flags;
749 /* Propagate di_flags2 from a parent inode to a child inode. */
751 xfs_inode_inherit_flags2(
752 struct xfs_inode *ip,
753 const struct xfs_inode *pip)
755 if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
756 ip->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
757 ip->i_d.di_cowextsize = pip->i_d.di_cowextsize;
759 if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
760 ip->i_d.di_flags2 |= XFS_DIFLAG2_DAX;
764 * Initialise a newly allocated inode and return the in-core inode to the
765 * caller locked exclusively.
769 struct user_namespace *mnt_userns,
770 struct xfs_trans *tp,
771 struct xfs_inode *pip,
777 struct xfs_inode **ipp)
779 struct inode *dir = pip ? VFS_I(pip) : NULL;
780 struct xfs_mount *mp = tp->t_mountp;
781 struct xfs_inode *ip;
784 struct timespec64 tv;
788 * Protect against obviously corrupt allocation btree records. Later
789 * xfs_iget checks will catch re-allocation of other active in-memory
790 * and on-disk inodes. If we don't catch reallocating the parent inode
791 * here we will deadlock in xfs_iget() so we have to do these checks
794 if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
795 xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
796 return -EFSCORRUPTED;
800 * Get the in-core inode with the lock held exclusively to prevent
801 * others from looking at until we're done.
803 error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
809 set_nlink(inode, nlink);
810 inode->i_rdev = rdev;
811 ip->i_d.di_projid = prid;
813 if (dir && !(dir->i_mode & S_ISGID) &&
814 (mp->m_flags & XFS_MOUNT_GRPID)) {
815 inode->i_uid = fsuid_into_mnt(mnt_userns);
816 inode->i_gid = dir->i_gid;
817 inode->i_mode = mode;
819 inode_init_owner(mnt_userns, inode, dir, mode);
823 * If the group ID of the new file does not match the effective group
824 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
825 * (and only if the irix_sgid_inherit compatibility variable is set).
827 if (irix_sgid_inherit &&
828 (inode->i_mode & S_ISGID) &&
829 !in_group_p(i_gid_into_mnt(mnt_userns, inode)))
830 inode->i_mode &= ~S_ISGID;
833 ip->i_df.if_nextents = 0;
834 ASSERT(ip->i_d.di_nblocks == 0);
836 tv = current_time(inode);
841 ip->i_d.di_extsize = 0;
842 ip->i_d.di_dmevmask = 0;
843 ip->i_d.di_dmstate = 0;
844 ip->i_d.di_flags = 0;
846 if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
847 inode_set_iversion(inode, 1);
848 ip->i_d.di_flags2 = mp->m_ino_geo.new_diflags2;
849 ip->i_d.di_cowextsize = 0;
850 ip->i_d.di_crtime = tv;
853 flags = XFS_ILOG_CORE;
854 switch (mode & S_IFMT) {
859 ip->i_df.if_format = XFS_DINODE_FMT_DEV;
860 ip->i_df.if_flags = 0;
861 flags |= XFS_ILOG_DEV;
865 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY))
866 xfs_inode_inherit_flags(ip, pip);
867 if (pip && (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY))
868 xfs_inode_inherit_flags2(ip, pip);
871 ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
872 ip->i_df.if_flags = XFS_IFEXTENTS;
873 ip->i_df.if_bytes = 0;
874 ip->i_df.if_u1.if_root = NULL;
881 * Log the new values stuffed into the inode.
883 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
884 xfs_trans_log_inode(tp, ip, flags);
886 /* now that we have an i_mode we can setup the inode structure */
894 * Allocates a new inode from disk and return a pointer to the incore copy. This
895 * routine will internally commit the current transaction and allocate a new one
896 * if we needed to allocate more on-disk free inodes to perform the requested
899 * If we are allocating quota inodes, we do not have a parent inode to attach to
900 * or associate with (i.e. dp == NULL) because they are not linked into the
901 * directory structure - they are attached directly to the superblock - and so
906 struct user_namespace *mnt_userns,
907 struct xfs_trans **tpp,
908 struct xfs_inode *dp,
913 struct xfs_inode **ipp)
915 struct xfs_buf *agibp;
916 xfs_ino_t parent_ino = dp ? dp->i_ino : 0;
920 ASSERT((*tpp)->t_flags & XFS_TRANS_PERM_LOG_RES);
923 * Call the space management code to pick the on-disk inode to be
926 error = xfs_dialloc_select_ag(tpp, parent_ino, mode, &agibp);
933 /* Allocate an inode from the selected AG */
934 error = xfs_dialloc_ag(*tpp, agibp, parent_ino, &ino);
937 ASSERT(ino != NULLFSINO);
939 return xfs_init_new_inode(mnt_userns, *tpp, dp, ino, mode, nlink, rdev,
944 * Decrement the link count on an inode & log the change. If this causes the
945 * link count to go to zero, move the inode to AGI unlinked list so that it can
946 * be freed when the last active reference goes away via xfs_inactive().
948 static int /* error */
953 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
955 drop_nlink(VFS_I(ip));
956 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
958 if (VFS_I(ip)->i_nlink)
961 return xfs_iunlink(tp, ip);
965 * Increment the link count on an inode & log the change.
972 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
974 inc_nlink(VFS_I(ip));
975 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
980 struct user_namespace *mnt_userns,
982 struct xfs_name *name,
987 int is_dir = S_ISDIR(mode);
988 struct xfs_mount *mp = dp->i_mount;
989 struct xfs_inode *ip = NULL;
990 struct xfs_trans *tp = NULL;
992 bool unlock_dp_on_error = false;
994 struct xfs_dquot *udqp = NULL;
995 struct xfs_dquot *gdqp = NULL;
996 struct xfs_dquot *pdqp = NULL;
997 struct xfs_trans_res *tres;
1000 trace_xfs_create(dp, name);
1002 if (XFS_FORCED_SHUTDOWN(mp))
1005 prid = xfs_get_initial_prid(dp);
1008 * Make sure that we have allocated dquot(s) on disk.
1010 error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
1011 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1012 &udqp, &gdqp, &pdqp);
1017 resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
1018 tres = &M_RES(mp)->tr_mkdir;
1020 resblks = XFS_CREATE_SPACE_RES(mp, name->len);
1021 tres = &M_RES(mp)->tr_create;
1025 * Initially assume that the file does not exist and
1026 * reserve the resources for that case. If that is not
1027 * the case we'll drop the one we have and get a more
1028 * appropriate transaction later.
1030 error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1032 if (error == -ENOSPC) {
1033 /* flush outstanding delalloc blocks and retry */
1034 xfs_flush_inodes(mp);
1035 error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp,
1039 goto out_release_dquots;
1041 xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1042 unlock_dp_on_error = true;
1044 error = xfs_iext_count_may_overflow(dp, XFS_DATA_FORK,
1045 XFS_IEXT_DIR_MANIP_CNT(mp));
1047 goto out_trans_cancel;
1050 * A newly created regular or special file just has one directory
1051 * entry pointing to them, but a directory also the "." entry
1052 * pointing to itself.
1054 error = xfs_dir_ialloc(mnt_userns, &tp, dp, mode, is_dir ? 2 : 1, rdev,
1057 goto out_trans_cancel;
1060 * Now we join the directory inode to the transaction. We do not do it
1061 * earlier because xfs_dir_ialloc might commit the previous transaction
1062 * (and release all the locks). An error from here on will result in
1063 * the transaction cancel unlocking dp so don't do it explicitly in the
1066 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1067 unlock_dp_on_error = false;
1069 error = xfs_dir_createname(tp, dp, name, ip->i_ino,
1070 resblks - XFS_IALLOC_SPACE_RES(mp));
1072 ASSERT(error != -ENOSPC);
1073 goto out_trans_cancel;
1075 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1076 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1079 error = xfs_dir_init(tp, ip, dp);
1081 goto out_trans_cancel;
1083 xfs_bumplink(tp, dp);
1087 * If this is a synchronous mount, make sure that the
1088 * create transaction goes to disk before returning to
1091 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1092 xfs_trans_set_sync(tp);
1095 * Attach the dquot(s) to the inodes and modify them incore.
1096 * These ids of the inode couldn't have changed since the new
1097 * inode has been locked ever since it was created.
1099 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1101 error = xfs_trans_commit(tp);
1103 goto out_release_inode;
1105 xfs_qm_dqrele(udqp);
1106 xfs_qm_dqrele(gdqp);
1107 xfs_qm_dqrele(pdqp);
1113 xfs_trans_cancel(tp);
1116 * Wait until after the current transaction is aborted to finish the
1117 * setup of the inode and release the inode. This prevents recursive
1118 * transactions and deadlocks from xfs_inactive.
1121 xfs_finish_inode_setup(ip);
1125 xfs_qm_dqrele(udqp);
1126 xfs_qm_dqrele(gdqp);
1127 xfs_qm_dqrele(pdqp);
1129 if (unlock_dp_on_error)
1130 xfs_iunlock(dp, XFS_ILOCK_EXCL);
1136 struct user_namespace *mnt_userns,
1137 struct xfs_inode *dp,
1139 struct xfs_inode **ipp)
1141 struct xfs_mount *mp = dp->i_mount;
1142 struct xfs_inode *ip = NULL;
1143 struct xfs_trans *tp = NULL;
1146 struct xfs_dquot *udqp = NULL;
1147 struct xfs_dquot *gdqp = NULL;
1148 struct xfs_dquot *pdqp = NULL;
1149 struct xfs_trans_res *tres;
1152 if (XFS_FORCED_SHUTDOWN(mp))
1155 prid = xfs_get_initial_prid(dp);
1158 * Make sure that we have allocated dquot(s) on disk.
1160 error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
1161 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1162 &udqp, &gdqp, &pdqp);
1166 resblks = XFS_IALLOC_SPACE_RES(mp);
1167 tres = &M_RES(mp)->tr_create_tmpfile;
1169 error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1172 goto out_release_dquots;
1174 error = xfs_dir_ialloc(mnt_userns, &tp, dp, mode, 0, 0, prid, &ip);
1176 goto out_trans_cancel;
1178 if (mp->m_flags & XFS_MOUNT_WSYNC)
1179 xfs_trans_set_sync(tp);
1182 * Attach the dquot(s) to the inodes and modify them incore.
1183 * These ids of the inode couldn't have changed since the new
1184 * inode has been locked ever since it was created.
1186 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1188 error = xfs_iunlink(tp, ip);
1190 goto out_trans_cancel;
1192 error = xfs_trans_commit(tp);
1194 goto out_release_inode;
1196 xfs_qm_dqrele(udqp);
1197 xfs_qm_dqrele(gdqp);
1198 xfs_qm_dqrele(pdqp);
1204 xfs_trans_cancel(tp);
1207 * Wait until after the current transaction is aborted to finish the
1208 * setup of the inode and release the inode. This prevents recursive
1209 * transactions and deadlocks from xfs_inactive.
1212 xfs_finish_inode_setup(ip);
1216 xfs_qm_dqrele(udqp);
1217 xfs_qm_dqrele(gdqp);
1218 xfs_qm_dqrele(pdqp);
1227 struct xfs_name *target_name)
1229 xfs_mount_t *mp = tdp->i_mount;
1234 trace_xfs_link(tdp, target_name);
1236 ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
1238 if (XFS_FORCED_SHUTDOWN(mp))
1241 error = xfs_qm_dqattach(sip);
1245 error = xfs_qm_dqattach(tdp);
1249 resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1250 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp);
1251 if (error == -ENOSPC) {
1253 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp);
1258 xfs_lock_two_inodes(sip, XFS_ILOCK_EXCL, tdp, XFS_ILOCK_EXCL);
1260 xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
1261 xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
1263 error = xfs_iext_count_may_overflow(tdp, XFS_DATA_FORK,
1264 XFS_IEXT_DIR_MANIP_CNT(mp));
1269 * If we are using project inheritance, we only allow hard link
1270 * creation in our tree when the project IDs are the same; else
1271 * the tree quota mechanism could be circumvented.
1273 if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1274 tdp->i_d.di_projid != sip->i_d.di_projid)) {
1280 error = xfs_dir_canenter(tp, tdp, target_name);
1286 * Handle initial link state of O_TMPFILE inode
1288 if (VFS_I(sip)->i_nlink == 0) {
1289 error = xfs_iunlink_remove(tp, sip);
1294 error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1298 xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1299 xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1301 xfs_bumplink(tp, sip);
1304 * If this is a synchronous mount, make sure that the
1305 * link transaction goes to disk before returning to
1308 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1309 xfs_trans_set_sync(tp);
1311 return xfs_trans_commit(tp);
1314 xfs_trans_cancel(tp);
1319 /* Clear the reflink flag and the cowblocks tag if possible. */
1321 xfs_itruncate_clear_reflink_flags(
1322 struct xfs_inode *ip)
1324 struct xfs_ifork *dfork;
1325 struct xfs_ifork *cfork;
1327 if (!xfs_is_reflink_inode(ip))
1329 dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1330 cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1331 if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1332 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1333 if (cfork->if_bytes == 0)
1334 xfs_inode_clear_cowblocks_tag(ip);
1338 * Free up the underlying blocks past new_size. The new size must be smaller
1339 * than the current size. This routine can be used both for the attribute and
1340 * data fork, and does not modify the inode size, which is left to the caller.
1342 * The transaction passed to this routine must have made a permanent log
1343 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
1344 * given transaction and start new ones, so make sure everything involved in
1345 * the transaction is tidy before calling here. Some transaction will be
1346 * returned to the caller to be committed. The incoming transaction must
1347 * already include the inode, and both inode locks must be held exclusively.
1348 * The inode must also be "held" within the transaction. On return the inode
1349 * will be "held" within the returned transaction. This routine does NOT
1350 * require any disk space to be reserved for it within the transaction.
1352 * If we get an error, we must return with the inode locked and linked into the
1353 * current transaction. This keeps things simple for the higher level code,
1354 * because it always knows that the inode is locked and held in the transaction
1355 * that returns to it whether errors occur or not. We don't mark the inode
1356 * dirty on error so that transactions can be easily aborted if possible.
1359 xfs_itruncate_extents_flags(
1360 struct xfs_trans **tpp,
1361 struct xfs_inode *ip,
1363 xfs_fsize_t new_size,
1366 struct xfs_mount *mp = ip->i_mount;
1367 struct xfs_trans *tp = *tpp;
1368 xfs_fileoff_t first_unmap_block;
1369 xfs_filblks_t unmap_len;
1372 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1373 ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1374 xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1375 ASSERT(new_size <= XFS_ISIZE(ip));
1376 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1377 ASSERT(ip->i_itemp != NULL);
1378 ASSERT(ip->i_itemp->ili_lock_flags == 0);
1379 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1381 trace_xfs_itruncate_extents_start(ip, new_size);
1383 flags |= xfs_bmapi_aflag(whichfork);
1386 * Since it is possible for space to become allocated beyond
1387 * the end of the file (in a crash where the space is allocated
1388 * but the inode size is not yet updated), simply remove any
1389 * blocks which show up between the new EOF and the maximum
1390 * possible file size.
1392 * We have to free all the blocks to the bmbt maximum offset, even if
1393 * the page cache can't scale that far.
1395 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1396 if (!xfs_verify_fileoff(mp, first_unmap_block)) {
1397 WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
1401 unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1;
1402 while (unmap_len > 0) {
1403 ASSERT(tp->t_firstblock == NULLFSBLOCK);
1404 error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len,
1405 flags, XFS_ITRUNC_MAX_EXTENTS);
1409 /* free the just unmapped extents */
1410 error = xfs_defer_finish(&tp);
1415 if (whichfork == XFS_DATA_FORK) {
1416 /* Remove all pending CoW reservations. */
1417 error = xfs_reflink_cancel_cow_blocks(ip, &tp,
1418 first_unmap_block, XFS_MAX_FILEOFF, true);
1422 xfs_itruncate_clear_reflink_flags(ip);
1426 * Always re-log the inode so that our permanent transaction can keep
1427 * on rolling it forward in the log.
1429 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1431 trace_xfs_itruncate_extents_end(ip, new_size);
1442 xfs_mount_t *mp = ip->i_mount;
1445 if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
1448 /* If this is a read-only mount, don't do this (would generate I/O) */
1449 if (mp->m_flags & XFS_MOUNT_RDONLY)
1452 if (!XFS_FORCED_SHUTDOWN(mp)) {
1456 * If we previously truncated this file and removed old data
1457 * in the process, we want to initiate "early" writeout on
1458 * the last close. This is an attempt to combat the notorious
1459 * NULL files problem which is particularly noticeable from a
1460 * truncate down, buffered (re-)write (delalloc), followed by
1461 * a crash. What we are effectively doing here is
1462 * significantly reducing the time window where we'd otherwise
1463 * be exposed to that problem.
1465 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1467 xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1468 if (ip->i_delayed_blks > 0) {
1469 error = filemap_flush(VFS_I(ip)->i_mapping);
1476 if (VFS_I(ip)->i_nlink == 0)
1479 if (xfs_can_free_eofblocks(ip, false)) {
1482 * Check if the inode is being opened, written and closed
1483 * frequently and we have delayed allocation blocks outstanding
1484 * (e.g. streaming writes from the NFS server), truncating the
1485 * blocks past EOF will cause fragmentation to occur.
1487 * In this case don't do the truncation, but we have to be
1488 * careful how we detect this case. Blocks beyond EOF show up as
1489 * i_delayed_blks even when the inode is clean, so we need to
1490 * truncate them away first before checking for a dirty release.
1491 * Hence on the first dirty close we will still remove the
1492 * speculative allocation, but after that we will leave it in
1495 if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1498 * If we can't get the iolock just skip truncating the blocks
1499 * past EOF because we could deadlock with the mmap_lock
1500 * otherwise. We'll get another chance to drop them once the
1501 * last reference to the inode is dropped, so we'll never leak
1502 * blocks permanently.
1504 if (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1505 error = xfs_free_eofblocks(ip);
1506 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1511 /* delalloc blocks after truncation means it really is dirty */
1512 if (ip->i_delayed_blks)
1513 xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1519 * xfs_inactive_truncate
1521 * Called to perform a truncate when an inode becomes unlinked.
1524 xfs_inactive_truncate(
1525 struct xfs_inode *ip)
1527 struct xfs_mount *mp = ip->i_mount;
1528 struct xfs_trans *tp;
1531 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1533 ASSERT(XFS_FORCED_SHUTDOWN(mp));
1536 xfs_ilock(ip, XFS_ILOCK_EXCL);
1537 xfs_trans_ijoin(tp, ip, 0);
1540 * Log the inode size first to prevent stale data exposure in the event
1541 * of a system crash before the truncate completes. See the related
1542 * comment in xfs_vn_setattr_size() for details.
1544 ip->i_d.di_size = 0;
1545 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1547 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1549 goto error_trans_cancel;
1551 ASSERT(ip->i_df.if_nextents == 0);
1553 error = xfs_trans_commit(tp);
1557 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1561 xfs_trans_cancel(tp);
1563 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1568 * xfs_inactive_ifree()
1570 * Perform the inode free when an inode is unlinked.
1574 struct xfs_inode *ip)
1576 struct xfs_mount *mp = ip->i_mount;
1577 struct xfs_trans *tp;
1581 * We try to use a per-AG reservation for any block needed by the finobt
1582 * tree, but as the finobt feature predates the per-AG reservation
1583 * support a degraded file system might not have enough space for the
1584 * reservation at mount time. In that case try to dip into the reserved
1587 * Send a warning if the reservation does happen to fail, as the inode
1588 * now remains allocated and sits on the unlinked list until the fs is
1591 if (unlikely(mp->m_finobt_nores)) {
1592 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1593 XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1596 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1599 if (error == -ENOSPC) {
1600 xfs_warn_ratelimited(mp,
1601 "Failed to remove inode(s) from unlinked list. "
1602 "Please free space, unmount and run xfs_repair.");
1604 ASSERT(XFS_FORCED_SHUTDOWN(mp));
1610 * We do not hold the inode locked across the entire rolling transaction
1611 * here. We only need to hold it for the first transaction that
1612 * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
1613 * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
1614 * here breaks the relationship between cluster buffer invalidation and
1615 * stale inode invalidation on cluster buffer item journal commit
1616 * completion, and can result in leaving dirty stale inodes hanging
1619 * We have no need for serialising this inode operation against other
1620 * operations - we freed the inode and hence reallocation is required
1621 * and that will serialise on reallocating the space the deferops need
1622 * to free. Hence we can unlock the inode on the first commit of
1623 * the transaction rather than roll it right through the deferops. This
1624 * avoids relogging the XFS_ISTALE inode.
1626 * We check that xfs_ifree() hasn't grown an internal transaction roll
1627 * by asserting that the inode is still locked when it returns.
1629 xfs_ilock(ip, XFS_ILOCK_EXCL);
1630 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1632 error = xfs_ifree(tp, ip);
1633 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1636 * If we fail to free the inode, shut down. The cancel
1637 * might do that, we need to make sure. Otherwise the
1638 * inode might be lost for a long time or forever.
1640 if (!XFS_FORCED_SHUTDOWN(mp)) {
1641 xfs_notice(mp, "%s: xfs_ifree returned error %d",
1643 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1645 xfs_trans_cancel(tp);
1650 * Credit the quota account(s). The inode is gone.
1652 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1655 * Just ignore errors at this point. There is nothing we can do except
1656 * to try to keep going. Make sure it's not a silent error.
1658 error = xfs_trans_commit(tp);
1660 xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
1669 * This is called when the vnode reference count for the vnode
1670 * goes to zero. If the file has been unlinked, then it must
1671 * now be truncated. Also, we clear all of the read-ahead state
1672 * kept for the inode here since the file is now closed.
1678 struct xfs_mount *mp;
1683 * If the inode is already free, then there can be nothing
1686 if (VFS_I(ip)->i_mode == 0) {
1687 ASSERT(ip->i_df.if_broot_bytes == 0);
1692 ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1694 /* If this is a read-only mount, don't do this (would generate I/O) */
1695 if (mp->m_flags & XFS_MOUNT_RDONLY)
1698 /* Try to clean out the cow blocks if there are any. */
1699 if (xfs_inode_has_cow_data(ip))
1700 xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
1702 if (VFS_I(ip)->i_nlink != 0) {
1704 * force is true because we are evicting an inode from the
1705 * cache. Post-eof blocks must be freed, lest we end up with
1706 * broken free space accounting.
1708 * Note: don't bother with iolock here since lockdep complains
1709 * about acquiring it in reclaim context. We have the only
1710 * reference to the inode at this point anyways.
1712 if (xfs_can_free_eofblocks(ip, true))
1713 xfs_free_eofblocks(ip);
1718 if (S_ISREG(VFS_I(ip)->i_mode) &&
1719 (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
1720 ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
1723 error = xfs_qm_dqattach(ip);
1727 if (S_ISLNK(VFS_I(ip)->i_mode))
1728 error = xfs_inactive_symlink(ip);
1730 error = xfs_inactive_truncate(ip);
1735 * If there are attributes associated with the file then blow them away
1736 * now. The code calls a routine that recursively deconstructs the
1737 * attribute fork. If also blows away the in-core attribute fork.
1739 if (XFS_IFORK_Q(ip)) {
1740 error = xfs_attr_inactive(ip);
1746 ASSERT(ip->i_d.di_forkoff == 0);
1751 error = xfs_inactive_ifree(ip);
1756 * Release the dquots held by inode, if any.
1758 xfs_qm_dqdetach(ip);
1762 * In-Core Unlinked List Lookups
1763 * =============================
1765 * Every inode is supposed to be reachable from some other piece of metadata
1766 * with the exception of the root directory. Inodes with a connection to a
1767 * file descriptor but not linked from anywhere in the on-disk directory tree
1768 * are collectively known as unlinked inodes, though the filesystem itself
1769 * maintains links to these inodes so that on-disk metadata are consistent.
1771 * XFS implements a per-AG on-disk hash table of unlinked inodes. The AGI
1772 * header contains a number of buckets that point to an inode, and each inode
1773 * record has a pointer to the next inode in the hash chain. This
1774 * singly-linked list causes scaling problems in the iunlink remove function
1775 * because we must walk that list to find the inode that points to the inode
1776 * being removed from the unlinked hash bucket list.
1778 * What if we modelled the unlinked list as a collection of records capturing
1779 * "X.next_unlinked = Y" relations? If we indexed those records on Y, we'd
1780 * have a fast way to look up unlinked list predecessors, which avoids the
1781 * slow list walk. That's exactly what we do here (in-core) with a per-AG
1784 * Because this is a backref cache, we ignore operational failures since the
1785 * iunlink code can fall back to the slow bucket walk. The only errors that
1786 * should bubble out are for obviously incorrect situations.
1788 * All users of the backref cache MUST hold the AGI buffer lock to serialize
1789 * access or have otherwise provided for concurrency control.
1792 /* Capture a "X.next_unlinked = Y" relationship. */
1793 struct xfs_iunlink {
1794 struct rhash_head iu_rhash_head;
1795 xfs_agino_t iu_agino; /* X */
1796 xfs_agino_t iu_next_unlinked; /* Y */
1799 /* Unlinked list predecessor lookup hashtable construction */
1801 xfs_iunlink_obj_cmpfn(
1802 struct rhashtable_compare_arg *arg,
1805 const xfs_agino_t *key = arg->key;
1806 const struct xfs_iunlink *iu = obj;
1808 if (iu->iu_next_unlinked != *key)
1813 static const struct rhashtable_params xfs_iunlink_hash_params = {
1814 .min_size = XFS_AGI_UNLINKED_BUCKETS,
1815 .key_len = sizeof(xfs_agino_t),
1816 .key_offset = offsetof(struct xfs_iunlink,
1818 .head_offset = offsetof(struct xfs_iunlink, iu_rhash_head),
1819 .automatic_shrinking = true,
1820 .obj_cmpfn = xfs_iunlink_obj_cmpfn,
1824 * Return X, where X.next_unlinked == @agino. Returns NULLAGINO if no such
1825 * relation is found.
1828 xfs_iunlink_lookup_backref(
1829 struct xfs_perag *pag,
1832 struct xfs_iunlink *iu;
1834 iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
1835 xfs_iunlink_hash_params);
1836 return iu ? iu->iu_agino : NULLAGINO;
1840 * Take ownership of an iunlink cache entry and insert it into the hash table.
1841 * If successful, the entry will be owned by the cache; if not, it is freed.
1842 * Either way, the caller does not own @iu after this call.
1845 xfs_iunlink_insert_backref(
1846 struct xfs_perag *pag,
1847 struct xfs_iunlink *iu)
1851 error = rhashtable_insert_fast(&pag->pagi_unlinked_hash,
1852 &iu->iu_rhash_head, xfs_iunlink_hash_params);
1854 * Fail loudly if there already was an entry because that's a sign of
1855 * corruption of in-memory data. Also fail loudly if we see an error
1856 * code we didn't anticipate from the rhashtable code. Currently we
1857 * only anticipate ENOMEM.
1860 WARN(error != -ENOMEM, "iunlink cache insert error %d", error);
1864 * Absorb any runtime errors that aren't a result of corruption because
1865 * this is a cache and we can always fall back to bucket list scanning.
1867 if (error != 0 && error != -EEXIST)
1872 /* Remember that @prev_agino.next_unlinked = @this_agino. */
1874 xfs_iunlink_add_backref(
1875 struct xfs_perag *pag,
1876 xfs_agino_t prev_agino,
1877 xfs_agino_t this_agino)
1879 struct xfs_iunlink *iu;
1881 if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK))
1884 iu = kmem_zalloc(sizeof(*iu), KM_NOFS);
1885 iu->iu_agino = prev_agino;
1886 iu->iu_next_unlinked = this_agino;
1888 return xfs_iunlink_insert_backref(pag, iu);
1892 * Replace X.next_unlinked = @agino with X.next_unlinked = @next_unlinked.
1893 * If @next_unlinked is NULLAGINO, we drop the backref and exit. If there
1894 * wasn't any such entry then we don't bother.
1897 xfs_iunlink_change_backref(
1898 struct xfs_perag *pag,
1900 xfs_agino_t next_unlinked)
1902 struct xfs_iunlink *iu;
1905 /* Look up the old entry; if there wasn't one then exit. */
1906 iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
1907 xfs_iunlink_hash_params);
1912 * Remove the entry. This shouldn't ever return an error, but if we
1913 * couldn't remove the old entry we don't want to add it again to the
1914 * hash table, and if the entry disappeared on us then someone's
1915 * violated the locking rules and we need to fail loudly. Either way
1916 * we cannot remove the inode because internal state is or would have
1919 error = rhashtable_remove_fast(&pag->pagi_unlinked_hash,
1920 &iu->iu_rhash_head, xfs_iunlink_hash_params);
1924 /* If there is no new next entry just free our item and return. */
1925 if (next_unlinked == NULLAGINO) {
1930 /* Update the entry and re-add it to the hash table. */
1931 iu->iu_next_unlinked = next_unlinked;
1932 return xfs_iunlink_insert_backref(pag, iu);
1935 /* Set up the in-core predecessor structures. */
1938 struct xfs_perag *pag)
1940 return rhashtable_init(&pag->pagi_unlinked_hash,
1941 &xfs_iunlink_hash_params);
1944 /* Free the in-core predecessor structures. */
1946 xfs_iunlink_free_item(
1950 struct xfs_iunlink *iu = ptr;
1951 bool *freed_anything = arg;
1953 *freed_anything = true;
1958 xfs_iunlink_destroy(
1959 struct xfs_perag *pag)
1961 bool freed_anything = false;
1963 rhashtable_free_and_destroy(&pag->pagi_unlinked_hash,
1964 xfs_iunlink_free_item, &freed_anything);
1966 ASSERT(freed_anything == false || XFS_FORCED_SHUTDOWN(pag->pag_mount));
1970 * Point the AGI unlinked bucket at an inode and log the results. The caller
1971 * is responsible for validating the old value.
1974 xfs_iunlink_update_bucket(
1975 struct xfs_trans *tp,
1976 xfs_agnumber_t agno,
1977 struct xfs_buf *agibp,
1978 unsigned int bucket_index,
1979 xfs_agino_t new_agino)
1981 struct xfs_agi *agi = agibp->b_addr;
1982 xfs_agino_t old_value;
1985 ASSERT(xfs_verify_agino_or_null(tp->t_mountp, agno, new_agino));
1987 old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1988 trace_xfs_iunlink_update_bucket(tp->t_mountp, agno, bucket_index,
1989 old_value, new_agino);
1992 * We should never find the head of the list already set to the value
1993 * passed in because either we're adding or removing ourselves from the
1996 if (old_value == new_agino) {
1997 xfs_buf_mark_corrupt(agibp);
1998 return -EFSCORRUPTED;
2001 agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
2002 offset = offsetof(struct xfs_agi, agi_unlinked) +
2003 (sizeof(xfs_agino_t) * bucket_index);
2004 xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
2008 /* Set an on-disk inode's next_unlinked pointer. */
2010 xfs_iunlink_update_dinode(
2011 struct xfs_trans *tp,
2012 xfs_agnumber_t agno,
2014 struct xfs_buf *ibp,
2015 struct xfs_dinode *dip,
2016 struct xfs_imap *imap,
2017 xfs_agino_t next_agino)
2019 struct xfs_mount *mp = tp->t_mountp;
2022 ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
2024 trace_xfs_iunlink_update_dinode(mp, agno, agino,
2025 be32_to_cpu(dip->di_next_unlinked), next_agino);
2027 dip->di_next_unlinked = cpu_to_be32(next_agino);
2028 offset = imap->im_boffset +
2029 offsetof(struct xfs_dinode, di_next_unlinked);
2031 /* need to recalc the inode CRC if appropriate */
2032 xfs_dinode_calc_crc(mp, dip);
2033 xfs_trans_inode_buf(tp, ibp);
2034 xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1);
2037 /* Set an in-core inode's unlinked pointer and return the old value. */
2039 xfs_iunlink_update_inode(
2040 struct xfs_trans *tp,
2041 struct xfs_inode *ip,
2042 xfs_agnumber_t agno,
2043 xfs_agino_t next_agino,
2044 xfs_agino_t *old_next_agino)
2046 struct xfs_mount *mp = tp->t_mountp;
2047 struct xfs_dinode *dip;
2048 struct xfs_buf *ibp;
2049 xfs_agino_t old_value;
2052 ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
2054 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 0);
2058 /* Make sure the old pointer isn't garbage. */
2059 old_value = be32_to_cpu(dip->di_next_unlinked);
2060 if (!xfs_verify_agino_or_null(mp, agno, old_value)) {
2061 xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
2062 sizeof(*dip), __this_address);
2063 error = -EFSCORRUPTED;
2068 * Since we're updating a linked list, we should never find that the
2069 * current pointer is the same as the new value, unless we're
2070 * terminating the list.
2072 *old_next_agino = old_value;
2073 if (old_value == next_agino) {
2074 if (next_agino != NULLAGINO) {
2075 xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
2076 dip, sizeof(*dip), __this_address);
2077 error = -EFSCORRUPTED;
2082 /* Ok, update the new pointer. */
2083 xfs_iunlink_update_dinode(tp, agno, XFS_INO_TO_AGINO(mp, ip->i_ino),
2084 ibp, dip, &ip->i_imap, next_agino);
2087 xfs_trans_brelse(tp, ibp);
2092 * This is called when the inode's link count has gone to 0 or we are creating
2093 * a tmpfile via O_TMPFILE. The inode @ip must have nlink == 0.
2095 * We place the on-disk inode on a list in the AGI. It will be pulled from this
2096 * list when the inode is freed.
2100 struct xfs_trans *tp,
2101 struct xfs_inode *ip)
2103 struct xfs_mount *mp = tp->t_mountp;
2104 struct xfs_agi *agi;
2105 struct xfs_buf *agibp;
2106 xfs_agino_t next_agino;
2107 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2108 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2109 short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2112 ASSERT(VFS_I(ip)->i_nlink == 0);
2113 ASSERT(VFS_I(ip)->i_mode != 0);
2114 trace_xfs_iunlink(ip);
2116 /* Get the agi buffer first. It ensures lock ordering on the list. */
2117 error = xfs_read_agi(mp, tp, agno, &agibp);
2120 agi = agibp->b_addr;
2123 * Get the index into the agi hash table for the list this inode will
2124 * go on. Make sure the pointer isn't garbage and that this inode
2125 * isn't already on the list.
2127 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2128 if (next_agino == agino ||
2129 !xfs_verify_agino_or_null(mp, agno, next_agino)) {
2130 xfs_buf_mark_corrupt(agibp);
2131 return -EFSCORRUPTED;
2134 if (next_agino != NULLAGINO) {
2135 xfs_agino_t old_agino;
2138 * There is already another inode in the bucket, so point this
2139 * inode to the current head of the list.
2141 error = xfs_iunlink_update_inode(tp, ip, agno, next_agino,
2145 ASSERT(old_agino == NULLAGINO);
2148 * agino has been unlinked, add a backref from the next inode
2151 error = xfs_iunlink_add_backref(agibp->b_pag, agino, next_agino);
2156 /* Point the head of the list to point to this inode. */
2157 return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index, agino);
2160 /* Return the imap, dinode pointer, and buffer for an inode. */
2162 xfs_iunlink_map_ino(
2163 struct xfs_trans *tp,
2164 xfs_agnumber_t agno,
2166 struct xfs_imap *imap,
2167 struct xfs_dinode **dipp,
2168 struct xfs_buf **bpp)
2170 struct xfs_mount *mp = tp->t_mountp;
2174 error = xfs_imap(mp, tp, XFS_AGINO_TO_INO(mp, agno, agino), imap, 0);
2176 xfs_warn(mp, "%s: xfs_imap returned error %d.",
2181 error = xfs_imap_to_bp(mp, tp, imap, dipp, bpp, 0);
2183 xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
2192 * Walk the unlinked chain from @head_agino until we find the inode that
2193 * points to @target_agino. Return the inode number, map, dinode pointer,
2194 * and inode cluster buffer of that inode as @agino, @imap, @dipp, and @bpp.
2196 * @tp, @pag, @head_agino, and @target_agino are input parameters.
2197 * @agino, @imap, @dipp, and @bpp are all output parameters.
2199 * Do not call this function if @target_agino is the head of the list.
2202 xfs_iunlink_map_prev(
2203 struct xfs_trans *tp,
2204 xfs_agnumber_t agno,
2205 xfs_agino_t head_agino,
2206 xfs_agino_t target_agino,
2208 struct xfs_imap *imap,
2209 struct xfs_dinode **dipp,
2210 struct xfs_buf **bpp,
2211 struct xfs_perag *pag)
2213 struct xfs_mount *mp = tp->t_mountp;
2214 xfs_agino_t next_agino;
2217 ASSERT(head_agino != target_agino);
2220 /* See if our backref cache can find it faster. */
2221 *agino = xfs_iunlink_lookup_backref(pag, target_agino);
2222 if (*agino != NULLAGINO) {
2223 error = xfs_iunlink_map_ino(tp, agno, *agino, imap, dipp, bpp);
2227 if (be32_to_cpu((*dipp)->di_next_unlinked) == target_agino)
2231 * If we get here the cache contents were corrupt, so drop the
2232 * buffer and fall back to walking the bucket list.
2234 xfs_trans_brelse(tp, *bpp);
2239 trace_xfs_iunlink_map_prev_fallback(mp, agno);
2241 /* Otherwise, walk the entire bucket until we find it. */
2242 next_agino = head_agino;
2243 while (next_agino != target_agino) {
2244 xfs_agino_t unlinked_agino;
2247 xfs_trans_brelse(tp, *bpp);
2249 *agino = next_agino;
2250 error = xfs_iunlink_map_ino(tp, agno, next_agino, imap, dipp,
2255 unlinked_agino = be32_to_cpu((*dipp)->di_next_unlinked);
2257 * Make sure this pointer is valid and isn't an obvious
2260 if (!xfs_verify_agino(mp, agno, unlinked_agino) ||
2261 next_agino == unlinked_agino) {
2262 XFS_CORRUPTION_ERROR(__func__,
2263 XFS_ERRLEVEL_LOW, mp,
2264 *dipp, sizeof(**dipp));
2265 error = -EFSCORRUPTED;
2268 next_agino = unlinked_agino;
2275 * Pull the on-disk inode from the AGI unlinked list.
2279 struct xfs_trans *tp,
2280 struct xfs_inode *ip)
2282 struct xfs_mount *mp = tp->t_mountp;
2283 struct xfs_agi *agi;
2284 struct xfs_buf *agibp;
2285 struct xfs_buf *last_ibp;
2286 struct xfs_dinode *last_dip = NULL;
2287 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2288 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2289 xfs_agino_t next_agino;
2290 xfs_agino_t head_agino;
2291 short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2294 trace_xfs_iunlink_remove(ip);
2296 /* Get the agi buffer first. It ensures lock ordering on the list. */
2297 error = xfs_read_agi(mp, tp, agno, &agibp);
2300 agi = agibp->b_addr;
2303 * Get the index into the agi hash table for the list this inode will
2304 * go on. Make sure the head pointer isn't garbage.
2306 head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2307 if (!xfs_verify_agino(mp, agno, head_agino)) {
2308 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2310 return -EFSCORRUPTED;
2314 * Set our inode's next_unlinked pointer to NULL and then return
2315 * the old pointer value so that we can update whatever was previous
2316 * to us in the list to point to whatever was next in the list.
2318 error = xfs_iunlink_update_inode(tp, ip, agno, NULLAGINO, &next_agino);
2323 * If there was a backref pointing from the next inode back to this
2324 * one, remove it because we've removed this inode from the list.
2326 * Later, if this inode was in the middle of the list we'll update
2327 * this inode's backref to point from the next inode.
2329 if (next_agino != NULLAGINO) {
2330 error = xfs_iunlink_change_backref(agibp->b_pag, next_agino,
2336 if (head_agino != agino) {
2337 struct xfs_imap imap;
2338 xfs_agino_t prev_agino;
2340 /* We need to search the list for the inode being freed. */
2341 error = xfs_iunlink_map_prev(tp, agno, head_agino, agino,
2342 &prev_agino, &imap, &last_dip, &last_ibp,
2347 /* Point the previous inode on the list to the next inode. */
2348 xfs_iunlink_update_dinode(tp, agno, prev_agino, last_ibp,
2349 last_dip, &imap, next_agino);
2352 * Now we deal with the backref for this inode. If this inode
2353 * pointed at a real inode, change the backref that pointed to
2354 * us to point to our old next. If this inode was the end of
2355 * the list, delete the backref that pointed to us. Note that
2356 * change_backref takes care of deleting the backref if
2357 * next_agino is NULLAGINO.
2359 return xfs_iunlink_change_backref(agibp->b_pag, agino,
2363 /* Point the head of the list to the next unlinked inode. */
2364 return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index,
2369 * Look up the inode number specified and if it is not already marked XFS_ISTALE
2370 * mark it stale. We should only find clean inodes in this lookup that aren't
2374 xfs_ifree_mark_inode_stale(
2376 struct xfs_inode *free_ip,
2379 struct xfs_mount *mp = bp->b_mount;
2380 struct xfs_perag *pag = bp->b_pag;
2381 struct xfs_inode_log_item *iip;
2382 struct xfs_inode *ip;
2386 ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));
2388 /* Inode not in memory, nothing to do */
2395 * because this is an RCU protected lookup, we could find a recently
2396 * freed or even reallocated inode during the lookup. We need to check
2397 * under the i_flags_lock for a valid inode here. Skip it if it is not
2398 * valid, the wrong inode or stale.
2400 spin_lock(&ip->i_flags_lock);
2401 if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE))
2402 goto out_iflags_unlock;
2405 * Don't try to lock/unlock the current inode, but we _cannot_ skip the
2406 * other inodes that we did not find in the list attached to the buffer
2407 * and are not already marked stale. If we can't lock it, back off and
2410 if (ip != free_ip) {
2411 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2412 spin_unlock(&ip->i_flags_lock);
2418 ip->i_flags |= XFS_ISTALE;
2421 * If the inode is flushing, it is already attached to the buffer. All
2422 * we needed to do here is mark the inode stale so buffer IO completion
2423 * will remove it from the AIL.
2426 if (__xfs_iflags_test(ip, XFS_IFLUSHING)) {
2427 ASSERT(!list_empty(&iip->ili_item.li_bio_list));
2428 ASSERT(iip->ili_last_fields);
2433 * Inodes not attached to the buffer can be released immediately.
2434 * Everything else has to go through xfs_iflush_abort() on journal
2435 * commit as the flock synchronises removal of the inode from the
2436 * cluster buffer against inode reclaim.
2438 if (!iip || list_empty(&iip->ili_item.li_bio_list))
2441 __xfs_iflags_set(ip, XFS_IFLUSHING);
2442 spin_unlock(&ip->i_flags_lock);
2445 /* we have a dirty inode in memory that has not yet been flushed. */
2446 spin_lock(&iip->ili_lock);
2447 iip->ili_last_fields = iip->ili_fields;
2448 iip->ili_fields = 0;
2449 iip->ili_fsync_fields = 0;
2450 spin_unlock(&iip->ili_lock);
2451 ASSERT(iip->ili_last_fields);
2454 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2459 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2461 spin_unlock(&ip->i_flags_lock);
2466 * A big issue when freeing the inode cluster is that we _cannot_ skip any
2467 * inodes that are in memory - they all must be marked stale and attached to
2468 * the cluster buffer.
2472 struct xfs_inode *free_ip,
2473 struct xfs_trans *tp,
2474 struct xfs_icluster *xic)
2476 struct xfs_mount *mp = free_ip->i_mount;
2477 struct xfs_ino_geometry *igeo = M_IGEO(mp);
2480 xfs_ino_t inum = xic->first_ino;
2486 nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
2488 for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
2490 * The allocation bitmap tells us which inodes of the chunk were
2491 * physically allocated. Skip the cluster if an inode falls into
2494 ioffset = inum - xic->first_ino;
2495 if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2496 ASSERT(ioffset % igeo->inodes_per_cluster == 0);
2500 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2501 XFS_INO_TO_AGBNO(mp, inum));
2504 * We obtain and lock the backing buffer first in the process
2505 * here to ensure dirty inodes attached to the buffer remain in
2506 * the flushing state while we mark them stale.
2508 * If we scan the in-memory inodes first, then buffer IO can
2509 * complete before we get a lock on it, and hence we may fail
2510 * to mark all the active inodes on the buffer stale.
2512 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2513 mp->m_bsize * igeo->blocks_per_cluster,
2519 * This buffer may not have been correctly initialised as we
2520 * didn't read it from disk. That's not important because we are
2521 * only using to mark the buffer as stale in the log, and to
2522 * attach stale cached inodes on it. That means it will never be
2523 * dispatched for IO. If it is, we want to know about it, and we
2524 * want it to fail. We can acheive this by adding a write
2525 * verifier to the buffer.
2527 bp->b_ops = &xfs_inode_buf_ops;
2530 * Now we need to set all the cached clean inodes as XFS_ISTALE,
2531 * too. This requires lookups, and will skip inodes that we've
2532 * already marked XFS_ISTALE.
2534 for (i = 0; i < igeo->inodes_per_cluster; i++)
2535 xfs_ifree_mark_inode_stale(bp, free_ip, inum + i);
2537 xfs_trans_stale_inode_buf(tp, bp);
2538 xfs_trans_binval(tp, bp);
2544 * This is called to return an inode to the inode free list.
2545 * The inode should already be truncated to 0 length and have
2546 * no pages associated with it. This routine also assumes that
2547 * the inode is already a part of the transaction.
2549 * The on-disk copy of the inode will have been added to the list
2550 * of unlinked inodes in the AGI. We need to remove the inode from
2551 * that list atomically with respect to freeing it here.
2555 struct xfs_trans *tp,
2556 struct xfs_inode *ip)
2559 struct xfs_icluster xic = { 0 };
2560 struct xfs_inode_log_item *iip = ip->i_itemp;
2562 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2563 ASSERT(VFS_I(ip)->i_nlink == 0);
2564 ASSERT(ip->i_df.if_nextents == 0);
2565 ASSERT(ip->i_d.di_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
2566 ASSERT(ip->i_d.di_nblocks == 0);
2569 * Pull the on-disk inode from the AGI unlinked list.
2571 error = xfs_iunlink_remove(tp, ip);
2575 error = xfs_difree(tp, ip->i_ino, &xic);
2580 * Free any local-format data sitting around before we reset the
2581 * data fork to extents format. Note that the attr fork data has
2582 * already been freed by xfs_attr_inactive.
2584 if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
2585 kmem_free(ip->i_df.if_u1.if_data);
2586 ip->i_df.if_u1.if_data = NULL;
2587 ip->i_df.if_bytes = 0;
2590 VFS_I(ip)->i_mode = 0; /* mark incore inode as free */
2591 ip->i_d.di_flags = 0;
2592 ip->i_d.di_flags2 = ip->i_mount->m_ino_geo.new_diflags2;
2593 ip->i_d.di_dmevmask = 0;
2594 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
2595 ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
2597 /* Don't attempt to replay owner changes for a deleted inode */
2598 spin_lock(&iip->ili_lock);
2599 iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER);
2600 spin_unlock(&iip->ili_lock);
2603 * Bump the generation count so no one will be confused
2604 * by reincarnations of this inode.
2606 VFS_I(ip)->i_generation++;
2607 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2610 error = xfs_ifree_cluster(ip, tp, &xic);
2616 * This is called to unpin an inode. The caller must have the inode locked
2617 * in at least shared mode so that the buffer cannot be subsequently pinned
2618 * once someone is waiting for it to be unpinned.
2622 struct xfs_inode *ip)
2624 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2626 trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2628 /* Give the log a push to start the unpinning I/O */
2629 xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0, NULL);
2635 struct xfs_inode *ip)
2637 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2638 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2643 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2644 if (xfs_ipincount(ip))
2646 } while (xfs_ipincount(ip));
2647 finish_wait(wq, &wait.wq_entry);
2652 struct xfs_inode *ip)
2654 if (xfs_ipincount(ip))
2655 __xfs_iunpin_wait(ip);
2659 * Removing an inode from the namespace involves removing the directory entry
2660 * and dropping the link count on the inode. Removing the directory entry can
2661 * result in locking an AGF (directory blocks were freed) and removing a link
2662 * count can result in placing the inode on an unlinked list which results in
2665 * The big problem here is that we have an ordering constraint on AGF and AGI
2666 * locking - inode allocation locks the AGI, then can allocate a new extent for
2667 * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
2668 * removes the inode from the unlinked list, requiring that we lock the AGI
2669 * first, and then freeing the inode can result in an inode chunk being freed
2670 * and hence freeing disk space requiring that we lock an AGF.
2672 * Hence the ordering that is imposed by other parts of the code is AGI before
2673 * AGF. This means we cannot remove the directory entry before we drop the inode
2674 * reference count and put it on the unlinked list as this results in a lock
2675 * order of AGF then AGI, and this can deadlock against inode allocation and
2676 * freeing. Therefore we must drop the link counts before we remove the
2679 * This is still safe from a transactional point of view - it is not until we
2680 * get to xfs_defer_finish() that we have the possibility of multiple
2681 * transactions in this operation. Hence as long as we remove the directory
2682 * entry and drop the link count in the first transaction of the remove
2683 * operation, there are no transactional constraints on the ordering here.
2688 struct xfs_name *name,
2691 xfs_mount_t *mp = dp->i_mount;
2692 xfs_trans_t *tp = NULL;
2693 int is_dir = S_ISDIR(VFS_I(ip)->i_mode);
2697 trace_xfs_remove(dp, name);
2699 if (XFS_FORCED_SHUTDOWN(mp))
2702 error = xfs_qm_dqattach(dp);
2706 error = xfs_qm_dqattach(ip);
2711 * We try to get the real space reservation first,
2712 * allowing for directory btree deletion(s) implying
2713 * possible bmap insert(s). If we can't get the space
2714 * reservation then we use 0 instead, and avoid the bmap
2715 * btree insert(s) in the directory code by, if the bmap
2716 * insert tries to happen, instead trimming the LAST
2717 * block from the directory.
2719 resblks = XFS_REMOVE_SPACE_RES(mp);
2720 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp);
2721 if (error == -ENOSPC) {
2723 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0,
2727 ASSERT(error != -ENOSPC);
2731 xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
2733 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
2734 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2737 * If we're removing a directory perform some additional validation.
2740 ASSERT(VFS_I(ip)->i_nlink >= 2);
2741 if (VFS_I(ip)->i_nlink != 2) {
2743 goto out_trans_cancel;
2745 if (!xfs_dir_isempty(ip)) {
2747 goto out_trans_cancel;
2750 /* Drop the link from ip's "..". */
2751 error = xfs_droplink(tp, dp);
2753 goto out_trans_cancel;
2755 /* Drop the "." link from ip to self. */
2756 error = xfs_droplink(tp, ip);
2758 goto out_trans_cancel;
2761 * When removing a non-directory we need to log the parent
2762 * inode here. For a directory this is done implicitly
2763 * by the xfs_droplink call for the ".." entry.
2765 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2767 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2769 /* Drop the link from dp to ip. */
2770 error = xfs_droplink(tp, ip);
2772 goto out_trans_cancel;
2774 error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
2776 ASSERT(error != -ENOENT);
2777 goto out_trans_cancel;
2781 * If this is a synchronous mount, make sure that the
2782 * remove transaction goes to disk before returning to
2785 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2786 xfs_trans_set_sync(tp);
2788 error = xfs_trans_commit(tp);
2792 if (is_dir && xfs_inode_is_filestream(ip))
2793 xfs_filestream_deassociate(ip);
2798 xfs_trans_cancel(tp);
2804 * Enter all inodes for a rename transaction into a sorted array.
2806 #define __XFS_SORT_INODES 5
2808 xfs_sort_for_rename(
2809 struct xfs_inode *dp1, /* in: old (source) directory inode */
2810 struct xfs_inode *dp2, /* in: new (target) directory inode */
2811 struct xfs_inode *ip1, /* in: inode of old entry */
2812 struct xfs_inode *ip2, /* in: inode of new entry */
2813 struct xfs_inode *wip, /* in: whiteout inode */
2814 struct xfs_inode **i_tab,/* out: sorted array of inodes */
2815 int *num_inodes) /* in/out: inodes in array */
2819 ASSERT(*num_inodes == __XFS_SORT_INODES);
2820 memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
2823 * i_tab contains a list of pointers to inodes. We initialize
2824 * the table here & we'll sort it. We will then use it to
2825 * order the acquisition of the inode locks.
2827 * Note that the table may contain duplicates. e.g., dp1 == dp2.
2840 * Sort the elements via bubble sort. (Remember, there are at
2841 * most 5 elements to sort, so this is adequate.)
2843 for (i = 0; i < *num_inodes; i++) {
2844 for (j = 1; j < *num_inodes; j++) {
2845 if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
2846 struct xfs_inode *temp = i_tab[j];
2847 i_tab[j] = i_tab[j-1];
2856 struct xfs_trans *tp)
2859 * If this is a synchronous mount, make sure that the rename transaction
2860 * goes to disk before returning to the user.
2862 if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2863 xfs_trans_set_sync(tp);
2865 return xfs_trans_commit(tp);
2869 * xfs_cross_rename()
2871 * responsible for handling RENAME_EXCHANGE flag in renameat2() sytemcall
2875 struct xfs_trans *tp,
2876 struct xfs_inode *dp1,
2877 struct xfs_name *name1,
2878 struct xfs_inode *ip1,
2879 struct xfs_inode *dp2,
2880 struct xfs_name *name2,
2881 struct xfs_inode *ip2,
2889 /* Swap inode number for dirent in first parent */
2890 error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
2892 goto out_trans_abort;
2894 /* Swap inode number for dirent in second parent */
2895 error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
2897 goto out_trans_abort;
2900 * If we're renaming one or more directories across different parents,
2901 * update the respective ".." entries (and link counts) to match the new
2905 dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2907 if (S_ISDIR(VFS_I(ip2)->i_mode)) {
2908 error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
2909 dp1->i_ino, spaceres);
2911 goto out_trans_abort;
2913 /* transfer ip2 ".." reference to dp1 */
2914 if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
2915 error = xfs_droplink(tp, dp2);
2917 goto out_trans_abort;
2918 xfs_bumplink(tp, dp1);
2922 * Although ip1 isn't changed here, userspace needs
2923 * to be warned about the change, so that applications
2924 * relying on it (like backup ones), will properly
2927 ip1_flags |= XFS_ICHGTIME_CHG;
2928 ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2931 if (S_ISDIR(VFS_I(ip1)->i_mode)) {
2932 error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
2933 dp2->i_ino, spaceres);
2935 goto out_trans_abort;
2937 /* transfer ip1 ".." reference to dp2 */
2938 if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
2939 error = xfs_droplink(tp, dp1);
2941 goto out_trans_abort;
2942 xfs_bumplink(tp, dp2);
2946 * Although ip2 isn't changed here, userspace needs
2947 * to be warned about the change, so that applications
2948 * relying on it (like backup ones), will properly
2951 ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2952 ip2_flags |= XFS_ICHGTIME_CHG;
2957 xfs_trans_ichgtime(tp, ip1, ip1_flags);
2958 xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
2961 xfs_trans_ichgtime(tp, ip2, ip2_flags);
2962 xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
2965 xfs_trans_ichgtime(tp, dp2, dp2_flags);
2966 xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
2968 xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2969 xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
2970 return xfs_finish_rename(tp);
2973 xfs_trans_cancel(tp);
2978 * xfs_rename_alloc_whiteout()
2980 * Return a referenced, unlinked, unlocked inode that can be used as a
2981 * whiteout in a rename transaction. We use a tmpfile inode here so that if we
2982 * crash between allocating the inode and linking it into the rename transaction
2983 * recovery will free the inode and we won't leak it.
2986 xfs_rename_alloc_whiteout(
2987 struct user_namespace *mnt_userns,
2988 struct xfs_inode *dp,
2989 struct xfs_inode **wip)
2991 struct xfs_inode *tmpfile;
2994 error = xfs_create_tmpfile(mnt_userns, dp, S_IFCHR | WHITEOUT_MODE,
3000 * Prepare the tmpfile inode as if it were created through the VFS.
3001 * Complete the inode setup and flag it as linkable. nlink is already
3002 * zero, so we can skip the drop_nlink.
3004 xfs_setup_iops(tmpfile);
3005 xfs_finish_inode_setup(tmpfile);
3006 VFS_I(tmpfile)->i_state |= I_LINKABLE;
3017 struct user_namespace *mnt_userns,
3018 struct xfs_inode *src_dp,
3019 struct xfs_name *src_name,
3020 struct xfs_inode *src_ip,
3021 struct xfs_inode *target_dp,
3022 struct xfs_name *target_name,
3023 struct xfs_inode *target_ip,
3026 struct xfs_mount *mp = src_dp->i_mount;
3027 struct xfs_trans *tp;
3028 struct xfs_inode *wip = NULL; /* whiteout inode */
3029 struct xfs_inode *inodes[__XFS_SORT_INODES];
3031 int num_inodes = __XFS_SORT_INODES;
3032 bool new_parent = (src_dp != target_dp);
3033 bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
3037 trace_xfs_rename(src_dp, target_dp, src_name, target_name);
3039 if ((flags & RENAME_EXCHANGE) && !target_ip)
3043 * If we are doing a whiteout operation, allocate the whiteout inode
3044 * we will be placing at the target and ensure the type is set
3047 if (flags & RENAME_WHITEOUT) {
3048 ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
3049 error = xfs_rename_alloc_whiteout(mnt_userns, target_dp, &wip);
3053 /* setup target dirent info as whiteout */
3054 src_name->type = XFS_DIR3_FT_CHRDEV;
3057 xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
3058 inodes, &num_inodes);
3060 spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
3061 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
3062 if (error == -ENOSPC) {
3064 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
3068 goto out_release_wip;
3071 * Attach the dquots to the inodes
3073 error = xfs_qm_vop_rename_dqattach(inodes);
3075 goto out_trans_cancel;
3078 * Lock all the participating inodes. Depending upon whether
3079 * the target_name exists in the target directory, and
3080 * whether the target directory is the same as the source
3081 * directory, we can lock from 2 to 4 inodes.
3083 xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
3086 * Join all the inodes to the transaction. From this point on,
3087 * we can rely on either trans_commit or trans_cancel to unlock
3090 xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
3092 xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
3093 xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
3095 xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
3097 xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
3100 * If we are using project inheritance, we only allow renames
3101 * into our tree when the project IDs are the same; else the
3102 * tree quota mechanism would be circumvented.
3104 if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
3105 target_dp->i_d.di_projid != src_ip->i_d.di_projid)) {
3107 goto out_trans_cancel;
3110 /* RENAME_EXCHANGE is unique from here on. */
3111 if (flags & RENAME_EXCHANGE)
3112 return xfs_cross_rename(tp, src_dp, src_name, src_ip,
3113 target_dp, target_name, target_ip,
3117 * Check for expected errors before we dirty the transaction
3118 * so we can return an error without a transaction abort.
3120 * Extent count overflow check:
3122 * From the perspective of src_dp, a rename operation is essentially a
3123 * directory entry remove operation. Hence the only place where we check
3124 * for extent count overflow for src_dp is in
3125 * xfs_bmap_del_extent_real(). xfs_bmap_del_extent_real() returns
3126 * -ENOSPC when it detects a possible extent count overflow and in
3127 * response, the higher layers of directory handling code do the
3129 * 1. Data/Free blocks: XFS lets these blocks linger until a
3130 * future remove operation removes them.
3131 * 2. Dabtree blocks: XFS swaps the blocks with the last block in the
3132 * Leaf space and unmaps the last block.
3134 * For target_dp, there are two cases depending on whether the
3135 * destination directory entry exists or not.
3137 * When destination directory entry does not exist (i.e. target_ip ==
3138 * NULL), extent count overflow check is performed only when transaction
3139 * has a non-zero sized space reservation associated with it. With a
3140 * zero-sized space reservation, XFS allows a rename operation to
3141 * continue only when the directory has sufficient free space in its
3142 * data/leaf/free space blocks to hold the new entry.
3144 * When destination directory entry exists (i.e. target_ip != NULL), all
3145 * we need to do is change the inode number associated with the already
3146 * existing entry. Hence there is no need to perform an extent count
3149 if (target_ip == NULL) {
3151 * If there's no space reservation, check the entry will
3152 * fit before actually inserting it.
3155 error = xfs_dir_canenter(tp, target_dp, target_name);
3157 goto out_trans_cancel;
3159 error = xfs_iext_count_may_overflow(target_dp,
3161 XFS_IEXT_DIR_MANIP_CNT(mp));
3163 goto out_trans_cancel;
3167 * If target exists and it's a directory, check that whether
3168 * it can be destroyed.
3170 if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
3171 (!xfs_dir_isempty(target_ip) ||
3172 (VFS_I(target_ip)->i_nlink > 2))) {
3174 goto out_trans_cancel;
3179 * Lock the AGI buffers we need to handle bumping the nlink of the
3180 * whiteout inode off the unlinked list and to handle dropping the
3181 * nlink of the target inode. Per locking order rules, do this in
3182 * increasing AG order and before directory block allocation tries to
3183 * grab AGFs because we grab AGIs before AGFs.
3185 * The (vfs) caller must ensure that if src is a directory then
3186 * target_ip is either null or an empty directory.
3188 for (i = 0; i < num_inodes && inodes[i] != NULL; i++) {
3189 if (inodes[i] == wip ||
3190 (inodes[i] == target_ip &&
3191 (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) {
3193 xfs_agnumber_t agno;
3195 agno = XFS_INO_TO_AGNO(mp, inodes[i]->i_ino);
3196 error = xfs_read_agi(mp, tp, agno, &bp);
3198 goto out_trans_cancel;
3203 * Directory entry creation below may acquire the AGF. Remove
3204 * the whiteout from the unlinked list first to preserve correct
3205 * AGI/AGF locking order. This dirties the transaction so failures
3206 * after this point will abort and log recovery will clean up the
3209 * For whiteouts, we need to bump the link count on the whiteout
3210 * inode. After this point, we have a real link, clear the tmpfile
3211 * state flag from the inode so it doesn't accidentally get misused
3215 ASSERT(VFS_I(wip)->i_nlink == 0);
3216 error = xfs_iunlink_remove(tp, wip);
3218 goto out_trans_cancel;
3220 xfs_bumplink(tp, wip);
3221 VFS_I(wip)->i_state &= ~I_LINKABLE;
3225 * Set up the target.
3227 if (target_ip == NULL) {
3229 * If target does not exist and the rename crosses
3230 * directories, adjust the target directory link count
3231 * to account for the ".." reference from the new entry.
3233 error = xfs_dir_createname(tp, target_dp, target_name,
3234 src_ip->i_ino, spaceres);
3236 goto out_trans_cancel;
3238 xfs_trans_ichgtime(tp, target_dp,
3239 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3241 if (new_parent && src_is_directory) {
3242 xfs_bumplink(tp, target_dp);
3244 } else { /* target_ip != NULL */
3246 * Link the source inode under the target name.
3247 * If the source inode is a directory and we are moving
3248 * it across directories, its ".." entry will be
3249 * inconsistent until we replace that down below.
3251 * In case there is already an entry with the same
3252 * name at the destination directory, remove it first.
3254 error = xfs_dir_replace(tp, target_dp, target_name,
3255 src_ip->i_ino, spaceres);
3257 goto out_trans_cancel;
3259 xfs_trans_ichgtime(tp, target_dp,
3260 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3263 * Decrement the link count on the target since the target
3264 * dir no longer points to it.
3266 error = xfs_droplink(tp, target_ip);
3268 goto out_trans_cancel;
3270 if (src_is_directory) {
3272 * Drop the link from the old "." entry.
3274 error = xfs_droplink(tp, target_ip);
3276 goto out_trans_cancel;
3278 } /* target_ip != NULL */
3281 * Remove the source.
3283 if (new_parent && src_is_directory) {
3285 * Rewrite the ".." entry to point to the new
3288 error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3289 target_dp->i_ino, spaceres);
3290 ASSERT(error != -EEXIST);
3292 goto out_trans_cancel;
3296 * We always want to hit the ctime on the source inode.
3298 * This isn't strictly required by the standards since the source
3299 * inode isn't really being changed, but old unix file systems did
3300 * it and some incremental backup programs won't work without it.
3302 xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3303 xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3306 * Adjust the link count on src_dp. This is necessary when
3307 * renaming a directory, either within one parent when
3308 * the target existed, or across two parent directories.
3310 if (src_is_directory && (new_parent || target_ip != NULL)) {
3313 * Decrement link count on src_directory since the
3314 * entry that's moved no longer points to it.
3316 error = xfs_droplink(tp, src_dp);
3318 goto out_trans_cancel;
3322 * For whiteouts, we only need to update the source dirent with the
3323 * inode number of the whiteout inode rather than removing it
3327 error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3331 * NOTE: We don't need to check for extent count overflow here
3332 * because the dir remove name code will leave the dir block in
3333 * place if the extent count would overflow.
3335 error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3340 goto out_trans_cancel;
3342 xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3343 xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3345 xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3347 error = xfs_finish_rename(tp);
3353 xfs_trans_cancel(tp);
3362 struct xfs_inode *ip,
3365 struct xfs_inode_log_item *iip = ip->i_itemp;
3366 struct xfs_dinode *dip;
3367 struct xfs_mount *mp = ip->i_mount;
3370 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3371 ASSERT(xfs_iflags_test(ip, XFS_IFLUSHING));
3372 ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
3373 ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3374 ASSERT(iip->ili_item.li_buf == bp);
3376 dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
3379 * We don't flush the inode if any of the following checks fail, but we
3380 * do still update the log item and attach to the backing buffer as if
3381 * the flush happened. This is a formality to facilitate predictable
3382 * error handling as the caller will shutdown and fail the buffer.
3384 error = -EFSCORRUPTED;
3385 if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
3386 mp, XFS_ERRTAG_IFLUSH_1)) {
3387 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3388 "%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT,
3389 __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3392 if (S_ISREG(VFS_I(ip)->i_mode)) {
3394 ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3395 ip->i_df.if_format != XFS_DINODE_FMT_BTREE,
3396 mp, XFS_ERRTAG_IFLUSH_3)) {
3397 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3398 "%s: Bad regular inode %Lu, ptr "PTR_FMT,
3399 __func__, ip->i_ino, ip);
3402 } else if (S_ISDIR(VFS_I(ip)->i_mode)) {
3404 ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3405 ip->i_df.if_format != XFS_DINODE_FMT_BTREE &&
3406 ip->i_df.if_format != XFS_DINODE_FMT_LOCAL,
3407 mp, XFS_ERRTAG_IFLUSH_4)) {
3408 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3409 "%s: Bad directory inode %Lu, ptr "PTR_FMT,
3410 __func__, ip->i_ino, ip);
3414 if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp) >
3415 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
3416 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3417 "%s: detected corrupt incore inode %Lu, "
3418 "total extents = %d, nblocks = %Ld, ptr "PTR_FMT,
3419 __func__, ip->i_ino,
3420 ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp),
3421 ip->i_d.di_nblocks, ip);
3424 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
3425 mp, XFS_ERRTAG_IFLUSH_6)) {
3426 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3427 "%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT,
3428 __func__, ip->i_ino, ip->i_d.di_forkoff, ip);
3433 * Inode item log recovery for v2 inodes are dependent on the
3434 * di_flushiter count for correct sequencing. We bump the flush
3435 * iteration count so we can detect flushes which postdate a log record
3436 * during recovery. This is redundant as we now log every change and
3437 * hence this can't happen but we need to still do it to ensure
3438 * backwards compatibility with old kernels that predate logging all
3441 if (!xfs_sb_version_has_v3inode(&mp->m_sb))
3442 ip->i_d.di_flushiter++;
3445 * If there are inline format data / attr forks attached to this inode,
3446 * make sure they are not corrupt.
3448 if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
3449 xfs_ifork_verify_local_data(ip))
3451 if (ip->i_afp && ip->i_afp->if_format == XFS_DINODE_FMT_LOCAL &&
3452 xfs_ifork_verify_local_attr(ip))
3456 * Copy the dirty parts of the inode into the on-disk inode. We always
3457 * copy out the core of the inode, because if the inode is dirty at all
3460 xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
3462 /* Wrap, we never let the log put out DI_MAX_FLUSH */
3463 if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3464 ip->i_d.di_flushiter = 0;
3466 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3467 if (XFS_IFORK_Q(ip))
3468 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3471 * We've recorded everything logged in the inode, so we'd like to clear
3472 * the ili_fields bits so we don't log and flush things unnecessarily.
3473 * However, we can't stop logging all this information until the data
3474 * we've copied into the disk buffer is written to disk. If we did we
3475 * might overwrite the copy of the inode in the log with all the data
3476 * after re-logging only part of it, and in the face of a crash we
3477 * wouldn't have all the data we need to recover.
3479 * What we do is move the bits to the ili_last_fields field. When
3480 * logging the inode, these bits are moved back to the ili_fields field.
3481 * In the xfs_buf_inode_iodone() routine we clear ili_last_fields, since
3482 * we know that the information those bits represent is permanently on
3483 * disk. As long as the flush completes before the inode is logged
3484 * again, then both ili_fields and ili_last_fields will be cleared.
3488 spin_lock(&iip->ili_lock);
3489 iip->ili_last_fields = iip->ili_fields;
3490 iip->ili_fields = 0;
3491 iip->ili_fsync_fields = 0;
3492 spin_unlock(&iip->ili_lock);
3495 * Store the current LSN of the inode so that we can tell whether the
3496 * item has moved in the AIL from xfs_buf_inode_iodone().
3498 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3499 &iip->ili_item.li_lsn);
3501 /* generate the checksum. */
3502 xfs_dinode_calc_crc(mp, dip);
3507 * Non-blocking flush of dirty inode metadata into the backing buffer.
3509 * The caller must have a reference to the inode and hold the cluster buffer
3510 * locked. The function will walk across all the inodes on the cluster buffer it
3511 * can find and lock without blocking, and flush them to the cluster buffer.
3513 * On successful flushing of at least one inode, the caller must write out the
3514 * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and
3515 * the caller needs to release the buffer. On failure, the filesystem will be
3516 * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED
3523 struct xfs_mount *mp = bp->b_mount;
3524 struct xfs_log_item *lip, *n;
3525 struct xfs_inode *ip;
3526 struct xfs_inode_log_item *iip;
3531 * We must use the safe variant here as on shutdown xfs_iflush_abort()
3532 * can remove itself from the list.
3534 list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
3535 iip = (struct xfs_inode_log_item *)lip;
3536 ip = iip->ili_inode;
3539 * Quick and dirty check to avoid locks if possible.
3541 if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING))
3543 if (xfs_ipincount(ip))
3547 * The inode is still attached to the buffer, which means it is
3548 * dirty but reclaim might try to grab it. Check carefully for
3549 * that, and grab the ilock while still holding the i_flags_lock
3550 * to guarantee reclaim will not be able to reclaim this inode
3551 * once we drop the i_flags_lock.
3553 spin_lock(&ip->i_flags_lock);
3554 ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE));
3555 if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) {
3556 spin_unlock(&ip->i_flags_lock);
3561 * ILOCK will pin the inode against reclaim and prevent
3562 * concurrent transactions modifying the inode while we are
3563 * flushing the inode. If we get the lock, set the flushing
3564 * state before we drop the i_flags_lock.
3566 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
3567 spin_unlock(&ip->i_flags_lock);
3570 __xfs_iflags_set(ip, XFS_IFLUSHING);
3571 spin_unlock(&ip->i_flags_lock);
3574 * Abort flushing this inode if we are shut down because the
3575 * inode may not currently be in the AIL. This can occur when
3576 * log I/O failure unpins the inode without inserting into the
3577 * AIL, leaving a dirty/unpinned inode attached to the buffer
3578 * that otherwise looks like it should be flushed.
3580 if (XFS_FORCED_SHUTDOWN(mp)) {
3581 xfs_iunpin_wait(ip);
3582 xfs_iflush_abort(ip);
3583 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3588 /* don't block waiting on a log force to unpin dirty inodes */
3589 if (xfs_ipincount(ip)) {
3590 xfs_iflags_clear(ip, XFS_IFLUSHING);
3591 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3595 if (!xfs_inode_clean(ip))
3596 error = xfs_iflush(ip, bp);
3598 xfs_iflags_clear(ip, XFS_IFLUSHING);
3599 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3606 bp->b_flags |= XBF_ASYNC;
3607 xfs_buf_ioend_fail(bp);
3608 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3615 XFS_STATS_INC(mp, xs_icluster_flushcnt);
3616 XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
3621 /* Release an inode. */
3624 struct xfs_inode *ip)
3626 trace_xfs_irele(ip, _RET_IP_);
3631 * Ensure all commited transactions touching the inode are written to the log.
3634 xfs_log_force_inode(
3635 struct xfs_inode *ip)
3639 xfs_ilock(ip, XFS_ILOCK_SHARED);
3640 if (xfs_ipincount(ip))
3641 lsn = ip->i_itemp->ili_last_lsn;
3642 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3646 return xfs_log_force_lsn(ip->i_mount, lsn, XFS_LOG_SYNC, NULL);
3650 * Grab the exclusive iolock for a data copy from src to dest, making sure to
3651 * abide vfs locking order (lowest pointer value goes first) and breaking the
3652 * layout leases before proceeding. The loop is needed because we cannot call
3653 * the blocking break_layout() with the iolocks held, and therefore have to
3654 * back out both locks.
3657 xfs_iolock_two_inodes_and_break_layout(
3667 /* Wait to break both inodes' layouts before we start locking. */
3668 error = break_layout(src, true);
3672 error = break_layout(dest, true);
3677 /* Lock one inode and make sure nobody got in and leased it. */
3679 error = break_layout(src, false);
3682 if (error == -EWOULDBLOCK)
3690 /* Lock the other inode and make sure nobody got in and leased it. */
3691 inode_lock_nested(dest, I_MUTEX_NONDIR2);
3692 error = break_layout(dest, false);
3696 if (error == -EWOULDBLOCK)
3705 * Lock two inodes so that userspace cannot initiate I/O via file syscalls or
3710 struct xfs_inode *ip1,
3711 struct xfs_inode *ip2)
3715 ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
3719 xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
3721 xfs_lock_two_inodes(ip1, XFS_MMAPLOCK_EXCL,
3722 ip2, XFS_MMAPLOCK_EXCL);
3726 /* Unlock both inodes to allow IO and mmap activity. */
3728 xfs_iunlock2_io_mmap(
3729 struct xfs_inode *ip1,
3730 struct xfs_inode *ip2)
3732 bool same_inode = (ip1 == ip2);
3734 xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3736 xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3737 inode_unlock(VFS_I(ip2));
3739 inode_unlock(VFS_I(ip1));