1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
6 #include <linux/iversion.h>
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans_resv.h"
15 #include "xfs_mount.h"
16 #include "xfs_defer.h"
17 #include "xfs_inode.h"
20 #include "xfs_trans_space.h"
21 #include "xfs_trans.h"
22 #include "xfs_buf_item.h"
23 #include "xfs_inode_item.h"
24 #include "xfs_ialloc.h"
26 #include "xfs_bmap_util.h"
27 #include "xfs_errortag.h"
28 #include "xfs_error.h"
29 #include "xfs_quota.h"
30 #include "xfs_filestream.h"
31 #include "xfs_trace.h"
32 #include "xfs_icache.h"
33 #include "xfs_symlink.h"
34 #include "xfs_trans_priv.h"
36 #include "xfs_bmap_btree.h"
37 #include "xfs_reflink.h"
39 kmem_zone_t *xfs_inode_zone;
42 * Used in xfs_itruncate_extents(). This is the maximum number of extents
43 * freed from a file in a single transaction.
45 #define XFS_ITRUNC_MAX_EXTENTS 2
47 STATIC int xfs_iflush_int(struct xfs_inode *, struct xfs_buf *);
48 STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
49 STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *);
52 * helper function to extract extent size hint from inode
59 * No point in aligning allocations if we need to COW to actually
62 if (xfs_is_always_cow_inode(ip))
64 if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
65 return ip->i_d.di_extsize;
66 if (XFS_IS_REALTIME_INODE(ip))
67 return ip->i_mount->m_sb.sb_rextsize;
72 * Helper function to extract CoW extent size hint from inode.
73 * Between the extent size hint and the CoW extent size hint, we
74 * return the greater of the two. If the value is zero (automatic),
75 * use the default size.
78 xfs_get_cowextsz_hint(
84 if (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
85 a = ip->i_d.di_cowextsize;
86 b = xfs_get_extsz_hint(ip);
90 return XFS_DEFAULT_COWEXTSZ_HINT;
95 * These two are wrapper routines around the xfs_ilock() routine used to
96 * centralize some grungy code. They are used in places that wish to lock the
97 * inode solely for reading the extents. The reason these places can't just
98 * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
99 * bringing in of the extents from disk for a file in b-tree format. If the
100 * inode is in b-tree format, then we need to lock the inode exclusively until
101 * the extents are read in. Locking it exclusively all the time would limit
102 * our parallelism unnecessarily, though. What we do instead is check to see
103 * if the extents have been read in yet, and only lock the inode exclusively
106 * The functions return a value which should be given to the corresponding
107 * xfs_iunlock() call.
110 xfs_ilock_data_map_shared(
111 struct xfs_inode *ip)
113 uint lock_mode = XFS_ILOCK_SHARED;
115 if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE &&
116 (ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
117 lock_mode = XFS_ILOCK_EXCL;
118 xfs_ilock(ip, lock_mode);
123 xfs_ilock_attr_map_shared(
124 struct xfs_inode *ip)
126 uint lock_mode = XFS_ILOCK_SHARED;
129 ip->i_afp->if_format == XFS_DINODE_FMT_BTREE &&
130 (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
131 lock_mode = XFS_ILOCK_EXCL;
132 xfs_ilock(ip, lock_mode);
137 * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
138 * multi-reader locks: i_mmap_lock and the i_lock. This routine allows
139 * various combinations of the locks to be obtained.
141 * The 3 locks should always be ordered so that the IO lock is obtained first,
142 * the mmap lock second and the ilock last in order to prevent deadlock.
144 * Basic locking order:
146 * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
148 * mmap_lock locking order:
150 * i_rwsem -> page lock -> mmap_lock
151 * mmap_lock -> i_mmap_lock -> page_lock
153 * The difference in mmap_lock locking order mean that we cannot hold the
154 * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
155 * fault in pages during copy in/out (for buffered IO) or require the mmap_lock
156 * in get_user_pages() to map the user pages into the kernel address space for
157 * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because
158 * page faults already hold the mmap_lock.
160 * Hence to serialise fully against both syscall and mmap based IO, we need to
161 * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both
162 * taken in places where we need to invalidate the page cache in a race
163 * free manner (e.g. truncate, hole punch and other extent manipulation
171 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
174 * You can't set both SHARED and EXCL for the same lock,
175 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
176 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
178 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
179 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
180 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
181 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
182 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
183 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
184 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
186 if (lock_flags & XFS_IOLOCK_EXCL) {
187 down_write_nested(&VFS_I(ip)->i_rwsem,
188 XFS_IOLOCK_DEP(lock_flags));
189 } else if (lock_flags & XFS_IOLOCK_SHARED) {
190 down_read_nested(&VFS_I(ip)->i_rwsem,
191 XFS_IOLOCK_DEP(lock_flags));
194 if (lock_flags & XFS_MMAPLOCK_EXCL)
195 mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
196 else if (lock_flags & XFS_MMAPLOCK_SHARED)
197 mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
199 if (lock_flags & XFS_ILOCK_EXCL)
200 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
201 else if (lock_flags & XFS_ILOCK_SHARED)
202 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
206 * This is just like xfs_ilock(), except that the caller
207 * is guaranteed not to sleep. It returns 1 if it gets
208 * the requested locks and 0 otherwise. If the IO lock is
209 * obtained but the inode lock cannot be, then the IO lock
210 * is dropped before returning.
212 * ip -- the inode being locked
213 * lock_flags -- this parameter indicates the inode's locks to be
214 * to be locked. See the comment for xfs_ilock() for a list
222 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
225 * You can't set both SHARED and EXCL for the same lock,
226 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
227 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
229 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
230 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
231 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
232 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
233 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
234 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
235 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
237 if (lock_flags & XFS_IOLOCK_EXCL) {
238 if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
240 } else if (lock_flags & XFS_IOLOCK_SHARED) {
241 if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
245 if (lock_flags & XFS_MMAPLOCK_EXCL) {
246 if (!mrtryupdate(&ip->i_mmaplock))
247 goto out_undo_iolock;
248 } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
249 if (!mrtryaccess(&ip->i_mmaplock))
250 goto out_undo_iolock;
253 if (lock_flags & XFS_ILOCK_EXCL) {
254 if (!mrtryupdate(&ip->i_lock))
255 goto out_undo_mmaplock;
256 } else if (lock_flags & XFS_ILOCK_SHARED) {
257 if (!mrtryaccess(&ip->i_lock))
258 goto out_undo_mmaplock;
263 if (lock_flags & XFS_MMAPLOCK_EXCL)
264 mrunlock_excl(&ip->i_mmaplock);
265 else if (lock_flags & XFS_MMAPLOCK_SHARED)
266 mrunlock_shared(&ip->i_mmaplock);
268 if (lock_flags & XFS_IOLOCK_EXCL)
269 up_write(&VFS_I(ip)->i_rwsem);
270 else if (lock_flags & XFS_IOLOCK_SHARED)
271 up_read(&VFS_I(ip)->i_rwsem);
277 * xfs_iunlock() is used to drop the inode locks acquired with
278 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
279 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
280 * that we know which locks to drop.
282 * ip -- the inode being unlocked
283 * lock_flags -- this parameter indicates the inode's locks to be
284 * to be unlocked. See the comment for xfs_ilock() for a list
285 * of valid values for this parameter.
294 * You can't set both SHARED and EXCL for the same lock,
295 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
296 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
298 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
299 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
300 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
301 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
302 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
303 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
304 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
305 ASSERT(lock_flags != 0);
307 if (lock_flags & XFS_IOLOCK_EXCL)
308 up_write(&VFS_I(ip)->i_rwsem);
309 else if (lock_flags & XFS_IOLOCK_SHARED)
310 up_read(&VFS_I(ip)->i_rwsem);
312 if (lock_flags & XFS_MMAPLOCK_EXCL)
313 mrunlock_excl(&ip->i_mmaplock);
314 else if (lock_flags & XFS_MMAPLOCK_SHARED)
315 mrunlock_shared(&ip->i_mmaplock);
317 if (lock_flags & XFS_ILOCK_EXCL)
318 mrunlock_excl(&ip->i_lock);
319 else if (lock_flags & XFS_ILOCK_SHARED)
320 mrunlock_shared(&ip->i_lock);
322 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
326 * give up write locks. the i/o lock cannot be held nested
327 * if it is being demoted.
334 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
336 ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
338 if (lock_flags & XFS_ILOCK_EXCL)
339 mrdemote(&ip->i_lock);
340 if (lock_flags & XFS_MMAPLOCK_EXCL)
341 mrdemote(&ip->i_mmaplock);
342 if (lock_flags & XFS_IOLOCK_EXCL)
343 downgrade_write(&VFS_I(ip)->i_rwsem);
345 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
348 #if defined(DEBUG) || defined(XFS_WARN)
354 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
355 if (!(lock_flags & XFS_ILOCK_SHARED))
356 return !!ip->i_lock.mr_writer;
357 return rwsem_is_locked(&ip->i_lock.mr_lock);
360 if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
361 if (!(lock_flags & XFS_MMAPLOCK_SHARED))
362 return !!ip->i_mmaplock.mr_writer;
363 return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
366 if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
367 if (!(lock_flags & XFS_IOLOCK_SHARED))
368 return !debug_locks ||
369 lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0);
370 return rwsem_is_locked(&VFS_I(ip)->i_rwsem);
379 * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
380 * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
381 * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
382 * errors and warnings.
384 #if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
386 xfs_lockdep_subclass_ok(
389 return subclass < MAX_LOCKDEP_SUBCLASSES;
392 #define xfs_lockdep_subclass_ok(subclass) (true)
396 * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
397 * value. This can be called for any type of inode lock combination, including
398 * parent locking. Care must be taken to ensure we don't overrun the subclass
399 * storage fields in the class mask we build.
402 xfs_lock_inumorder(int lock_mode, int subclass)
406 ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
408 ASSERT(xfs_lockdep_subclass_ok(subclass));
410 if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
411 ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
412 class += subclass << XFS_IOLOCK_SHIFT;
415 if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
416 ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
417 class += subclass << XFS_MMAPLOCK_SHIFT;
420 if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
421 ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
422 class += subclass << XFS_ILOCK_SHIFT;
425 return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
429 * The following routine will lock n inodes in exclusive mode. We assume the
430 * caller calls us with the inodes in i_ino order.
432 * We need to detect deadlock where an inode that we lock is in the AIL and we
433 * start waiting for another inode that is locked by a thread in a long running
434 * transaction (such as truncate). This can result in deadlock since the long
435 * running trans might need to wait for the inode we just locked in order to
436 * push the tail and free space in the log.
438 * xfs_lock_inodes() can only be used to lock one type of lock at a time -
439 * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
440 * lock more than one at a time, lockdep will report false positives saying we
441 * have violated locking orders.
445 struct xfs_inode **ips,
449 int attempts = 0, i, j, try_lock;
450 struct xfs_log_item *lp;
453 * Currently supports between 2 and 5 inodes with exclusive locking. We
454 * support an arbitrary depth of locking here, but absolute limits on
455 * inodes depend on the the type of locking and the limits placed by
456 * lockdep annotations in xfs_lock_inumorder. These are all checked by
459 ASSERT(ips && inodes >= 2 && inodes <= 5);
460 ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
462 ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
464 ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
465 inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
466 ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
467 inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
469 if (lock_mode & XFS_IOLOCK_EXCL) {
470 ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
471 } else if (lock_mode & XFS_MMAPLOCK_EXCL)
472 ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
477 for (; i < inodes; i++) {
480 if (i && (ips[i] == ips[i - 1])) /* Already locked */
484 * If try_lock is not set yet, make sure all locked inodes are
485 * not in the AIL. If any are, set try_lock to be used later.
488 for (j = (i - 1); j >= 0 && !try_lock; j--) {
489 lp = &ips[j]->i_itemp->ili_item;
490 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
496 * If any of the previous locks we have locked is in the AIL,
497 * we must TRY to get the second and subsequent locks. If
498 * we can't get any, we must release all we have
502 xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
506 /* try_lock means we have an inode locked that is in the AIL. */
508 if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
512 * Unlock all previous guys and try again. xfs_iunlock will try
513 * to push the tail if the inode is in the AIL.
516 for (j = i - 1; j >= 0; j--) {
518 * Check to see if we've already unlocked this one. Not
519 * the first one going back, and the inode ptr is the
522 if (j != (i - 1) && ips[j] == ips[j + 1])
525 xfs_iunlock(ips[j], lock_mode);
528 if ((attempts % 5) == 0) {
529 delay(1); /* Don't just spin the CPU */
538 * xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
539 * the mmaplock or the ilock, but not more than one type at a time. If we lock
540 * more than one at a time, lockdep will report false positives saying we have
541 * violated locking orders. The iolock must be double-locked separately since
542 * we use i_rwsem for that. We now support taking one lock EXCL and the other
547 struct xfs_inode *ip0,
549 struct xfs_inode *ip1,
552 struct xfs_inode *temp;
555 struct xfs_log_item *lp;
557 ASSERT(hweight32(ip0_mode) == 1);
558 ASSERT(hweight32(ip1_mode) == 1);
559 ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
560 ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
561 ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
562 !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
563 ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
564 !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
565 ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
566 !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
567 ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
568 !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
570 ASSERT(ip0->i_ino != ip1->i_ino);
572 if (ip0->i_ino > ip1->i_ino) {
576 mode_temp = ip0_mode;
578 ip1_mode = mode_temp;
582 xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
585 * If the first lock we have locked is in the AIL, we must TRY to get
586 * the second lock. If we can't get it, we must release the first one
589 lp = &ip0->i_itemp->ili_item;
590 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
591 if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
592 xfs_iunlock(ip0, ip0_mode);
593 if ((++attempts % 5) == 0)
594 delay(1); /* Don't just spin the CPU */
598 xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
604 struct xfs_inode *ip)
606 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
607 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
610 prepare_to_wait_exclusive(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
611 if (xfs_isiflocked(ip))
613 } while (!xfs_iflock_nowait(ip));
615 finish_wait(wq, &wait.wq_entry);
626 if (di_flags & XFS_DIFLAG_ANY) {
627 if (di_flags & XFS_DIFLAG_REALTIME)
628 flags |= FS_XFLAG_REALTIME;
629 if (di_flags & XFS_DIFLAG_PREALLOC)
630 flags |= FS_XFLAG_PREALLOC;
631 if (di_flags & XFS_DIFLAG_IMMUTABLE)
632 flags |= FS_XFLAG_IMMUTABLE;
633 if (di_flags & XFS_DIFLAG_APPEND)
634 flags |= FS_XFLAG_APPEND;
635 if (di_flags & XFS_DIFLAG_SYNC)
636 flags |= FS_XFLAG_SYNC;
637 if (di_flags & XFS_DIFLAG_NOATIME)
638 flags |= FS_XFLAG_NOATIME;
639 if (di_flags & XFS_DIFLAG_NODUMP)
640 flags |= FS_XFLAG_NODUMP;
641 if (di_flags & XFS_DIFLAG_RTINHERIT)
642 flags |= FS_XFLAG_RTINHERIT;
643 if (di_flags & XFS_DIFLAG_PROJINHERIT)
644 flags |= FS_XFLAG_PROJINHERIT;
645 if (di_flags & XFS_DIFLAG_NOSYMLINKS)
646 flags |= FS_XFLAG_NOSYMLINKS;
647 if (di_flags & XFS_DIFLAG_EXTSIZE)
648 flags |= FS_XFLAG_EXTSIZE;
649 if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
650 flags |= FS_XFLAG_EXTSZINHERIT;
651 if (di_flags & XFS_DIFLAG_NODEFRAG)
652 flags |= FS_XFLAG_NODEFRAG;
653 if (di_flags & XFS_DIFLAG_FILESTREAM)
654 flags |= FS_XFLAG_FILESTREAM;
657 if (di_flags2 & XFS_DIFLAG2_ANY) {
658 if (di_flags2 & XFS_DIFLAG2_DAX)
659 flags |= FS_XFLAG_DAX;
660 if (di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
661 flags |= FS_XFLAG_COWEXTSIZE;
665 flags |= FS_XFLAG_HASATTR;
672 struct xfs_inode *ip)
674 struct xfs_icdinode *dic = &ip->i_d;
676 return _xfs_dic2xflags(dic->di_flags, dic->di_flags2, XFS_IFORK_Q(ip));
680 * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
681 * is allowed, otherwise it has to be an exact match. If a CI match is found,
682 * ci_name->name will point to a the actual name (caller must free) or
683 * will be set to NULL if an exact match is found.
688 struct xfs_name *name,
690 struct xfs_name *ci_name)
695 trace_xfs_lookup(dp, name);
697 if (XFS_FORCED_SHUTDOWN(dp->i_mount))
700 error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
704 error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
712 kmem_free(ci_name->name);
719 * Allocate an inode on disk and return a copy of its in-core version.
720 * The in-core inode is locked exclusively. Set mode, nlink, and rdev
721 * appropriately within the inode. The uid and gid for the inode are
722 * set according to the contents of the given cred structure.
724 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
725 * has a free inode available, call xfs_iget() to obtain the in-core
726 * version of the allocated inode. Finally, fill in the inode and
727 * log its initial contents. In this case, ialloc_context would be
730 * If xfs_dialloc() does not have an available inode, it will replenish
731 * its supply by doing an allocation. Since we can only do one
732 * allocation within a transaction without deadlocks, we must commit
733 * the current transaction before returning the inode itself.
734 * In this case, therefore, we will set ialloc_context and return.
735 * The caller should then commit the current transaction, start a new
736 * transaction, and call xfs_ialloc() again to actually get the inode.
738 * To ensure that some other process does not grab the inode that
739 * was allocated during the first call to xfs_ialloc(), this routine
740 * also returns the [locked] bp pointing to the head of the freelist
741 * as ialloc_context. The caller should hold this buffer across
742 * the commit and pass it back into this routine on the second call.
744 * If we are allocating quota inodes, we do not have a parent inode
745 * to attach to or associate with (i.e. pip == NULL) because they
746 * are not linked into the directory structure - they are attached
747 * directly to the superblock - and so have no parent.
757 xfs_buf_t **ialloc_context,
760 struct xfs_mount *mp = tp->t_mountp;
765 struct timespec64 tv;
769 * Call the space management code to pick
770 * the on-disk inode to be allocated.
772 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode,
773 ialloc_context, &ino);
776 if (*ialloc_context || ino == NULLFSINO) {
780 ASSERT(*ialloc_context == NULL);
783 * Protect against obviously corrupt allocation btree records. Later
784 * xfs_iget checks will catch re-allocation of other active in-memory
785 * and on-disk inodes. If we don't catch reallocating the parent inode
786 * here we will deadlock in xfs_iget() so we have to do these checks
789 if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
790 xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
791 return -EFSCORRUPTED;
795 * Get the in-core inode with the lock held exclusively.
796 * This is because we're setting fields here we need
797 * to prevent others from looking at until we're done.
799 error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE,
800 XFS_ILOCK_EXCL, &ip);
805 inode->i_mode = mode;
806 set_nlink(inode, nlink);
807 inode->i_uid = current_fsuid();
808 inode->i_rdev = rdev;
809 ip->i_d.di_projid = prid;
811 if (pip && XFS_INHERIT_GID(pip)) {
812 inode->i_gid = VFS_I(pip)->i_gid;
813 if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode))
814 inode->i_mode |= S_ISGID;
816 inode->i_gid = current_fsgid();
820 * If the group ID of the new file does not match the effective group
821 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
822 * (and only if the irix_sgid_inherit compatibility variable is set).
824 if (irix_sgid_inherit &&
825 (inode->i_mode & S_ISGID) && !in_group_p(inode->i_gid))
826 inode->i_mode &= ~S_ISGID;
829 ip->i_df.if_nextents = 0;
830 ASSERT(ip->i_d.di_nblocks == 0);
832 tv = current_time(inode);
837 ip->i_d.di_extsize = 0;
838 ip->i_d.di_dmevmask = 0;
839 ip->i_d.di_dmstate = 0;
840 ip->i_d.di_flags = 0;
842 if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
843 inode_set_iversion(inode, 1);
844 ip->i_d.di_flags2 = 0;
845 ip->i_d.di_cowextsize = 0;
846 ip->i_d.di_crtime = tv;
849 flags = XFS_ILOG_CORE;
850 switch (mode & S_IFMT) {
855 ip->i_df.if_format = XFS_DINODE_FMT_DEV;
856 ip->i_df.if_flags = 0;
857 flags |= XFS_ILOG_DEV;
861 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
865 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
866 di_flags |= XFS_DIFLAG_RTINHERIT;
867 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
868 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
869 ip->i_d.di_extsize = pip->i_d.di_extsize;
871 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
872 di_flags |= XFS_DIFLAG_PROJINHERIT;
873 } else if (S_ISREG(mode)) {
874 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
875 di_flags |= XFS_DIFLAG_REALTIME;
876 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
877 di_flags |= XFS_DIFLAG_EXTSIZE;
878 ip->i_d.di_extsize = pip->i_d.di_extsize;
881 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
883 di_flags |= XFS_DIFLAG_NOATIME;
884 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
886 di_flags |= XFS_DIFLAG_NODUMP;
887 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
889 di_flags |= XFS_DIFLAG_SYNC;
890 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
891 xfs_inherit_nosymlinks)
892 di_flags |= XFS_DIFLAG_NOSYMLINKS;
893 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
894 xfs_inherit_nodefrag)
895 di_flags |= XFS_DIFLAG_NODEFRAG;
896 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
897 di_flags |= XFS_DIFLAG_FILESTREAM;
899 ip->i_d.di_flags |= di_flags;
901 if (pip && (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY)) {
902 if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
903 ip->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
904 ip->i_d.di_cowextsize = pip->i_d.di_cowextsize;
906 if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
907 ip->i_d.di_flags2 |= XFS_DIFLAG2_DAX;
911 ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
912 ip->i_df.if_flags = XFS_IFEXTENTS;
913 ip->i_df.if_bytes = 0;
914 ip->i_df.if_u1.if_root = NULL;
921 * Log the new values stuffed into the inode.
923 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
924 xfs_trans_log_inode(tp, ip, flags);
926 /* now that we have an i_mode we can setup the inode structure */
934 * Allocates a new inode from disk and return a pointer to the
935 * incore copy. This routine will internally commit the current
936 * transaction and allocate a new one if the Space Manager needed
937 * to do an allocation to replenish the inode free-list.
939 * This routine is designed to be called from xfs_create and
945 xfs_trans_t **tpp, /* input: current transaction;
946 output: may be a new transaction. */
947 xfs_inode_t *dp, /* directory within whose allocate
952 prid_t prid, /* project id */
953 xfs_inode_t **ipp) /* pointer to inode; it will be
958 xfs_buf_t *ialloc_context = NULL;
964 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
967 * xfs_ialloc will return a pointer to an incore inode if
968 * the Space Manager has an available inode on the free
969 * list. Otherwise, it will do an allocation and replenish
970 * the freelist. Since we can only do one allocation per
971 * transaction without deadlocks, we will need to commit the
972 * current transaction and start a new one. We will then
973 * need to call xfs_ialloc again to get the inode.
975 * If xfs_ialloc did an allocation to replenish the freelist,
976 * it returns the bp containing the head of the freelist as
977 * ialloc_context. We will hold a lock on it across the
978 * transaction commit so that no other process can steal
979 * the inode(s) that we've just allocated.
981 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, &ialloc_context,
985 * Return an error if we were unable to allocate a new inode.
986 * This should only happen if we run out of space on disk or
987 * encounter a disk error.
993 if (!ialloc_context && !ip) {
999 * If the AGI buffer is non-NULL, then we were unable to get an
1000 * inode in one operation. We need to commit the current
1001 * transaction and call xfs_ialloc() again. It is guaranteed
1002 * to succeed the second time.
1004 if (ialloc_context) {
1006 * Normally, xfs_trans_commit releases all the locks.
1007 * We call bhold to hang on to the ialloc_context across
1008 * the commit. Holding this buffer prevents any other
1009 * processes from doing any allocations in this
1012 xfs_trans_bhold(tp, ialloc_context);
1015 * We want the quota changes to be associated with the next
1016 * transaction, NOT this one. So, detach the dqinfo from this
1017 * and attach it to the next transaction.
1022 dqinfo = (void *)tp->t_dqinfo;
1023 tp->t_dqinfo = NULL;
1024 tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY;
1025 tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY);
1028 code = xfs_trans_roll(&tp);
1031 * Re-attach the quota info that we detached from prev trx.
1034 tp->t_dqinfo = dqinfo;
1035 tp->t_flags |= tflags;
1039 xfs_buf_relse(ialloc_context);
1044 xfs_trans_bjoin(tp, ialloc_context);
1047 * Call ialloc again. Since we've locked out all
1048 * other allocations in this allocation group,
1049 * this call should always succeed.
1051 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
1052 &ialloc_context, &ip);
1055 * If we get an error at this point, return to the caller
1056 * so that the current transaction can be aborted.
1063 ASSERT(!ialloc_context && ip);
1074 * Decrement the link count on an inode & log the change. If this causes the
1075 * link count to go to zero, move the inode to AGI unlinked list so that it can
1076 * be freed when the last active reference goes away via xfs_inactive().
1078 static int /* error */
1083 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1085 drop_nlink(VFS_I(ip));
1086 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1088 if (VFS_I(ip)->i_nlink)
1091 return xfs_iunlink(tp, ip);
1095 * Increment the link count on an inode & log the change.
1102 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1104 inc_nlink(VFS_I(ip));
1105 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1111 struct xfs_name *name,
1116 int is_dir = S_ISDIR(mode);
1117 struct xfs_mount *mp = dp->i_mount;
1118 struct xfs_inode *ip = NULL;
1119 struct xfs_trans *tp = NULL;
1121 bool unlock_dp_on_error = false;
1123 struct xfs_dquot *udqp = NULL;
1124 struct xfs_dquot *gdqp = NULL;
1125 struct xfs_dquot *pdqp = NULL;
1126 struct xfs_trans_res *tres;
1129 trace_xfs_create(dp, name);
1131 if (XFS_FORCED_SHUTDOWN(mp))
1134 prid = xfs_get_initial_prid(dp);
1137 * Make sure that we have allocated dquot(s) on disk.
1139 error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
1140 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1141 &udqp, &gdqp, &pdqp);
1146 resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
1147 tres = &M_RES(mp)->tr_mkdir;
1149 resblks = XFS_CREATE_SPACE_RES(mp, name->len);
1150 tres = &M_RES(mp)->tr_create;
1154 * Initially assume that the file does not exist and
1155 * reserve the resources for that case. If that is not
1156 * the case we'll drop the one we have and get a more
1157 * appropriate transaction later.
1159 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
1160 if (error == -ENOSPC) {
1161 /* flush outstanding delalloc blocks and retry */
1162 xfs_flush_inodes(mp);
1163 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
1166 goto out_release_inode;
1168 xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1169 unlock_dp_on_error = true;
1172 * Reserve disk quota and the inode.
1174 error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1175 pdqp, resblks, 1, 0);
1177 goto out_trans_cancel;
1180 * A newly created regular or special file just has one directory
1181 * entry pointing to them, but a directory also the "." entry
1182 * pointing to itself.
1184 error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, prid, &ip);
1186 goto out_trans_cancel;
1189 * Now we join the directory inode to the transaction. We do not do it
1190 * earlier because xfs_dir_ialloc might commit the previous transaction
1191 * (and release all the locks). An error from here on will result in
1192 * the transaction cancel unlocking dp so don't do it explicitly in the
1195 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1196 unlock_dp_on_error = false;
1198 error = xfs_dir_createname(tp, dp, name, ip->i_ino,
1199 resblks - XFS_IALLOC_SPACE_RES(mp));
1201 ASSERT(error != -ENOSPC);
1202 goto out_trans_cancel;
1204 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1205 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1208 error = xfs_dir_init(tp, ip, dp);
1210 goto out_trans_cancel;
1212 xfs_bumplink(tp, dp);
1216 * If this is a synchronous mount, make sure that the
1217 * create transaction goes to disk before returning to
1220 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1221 xfs_trans_set_sync(tp);
1224 * Attach the dquot(s) to the inodes and modify them incore.
1225 * These ids of the inode couldn't have changed since the new
1226 * inode has been locked ever since it was created.
1228 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1230 error = xfs_trans_commit(tp);
1232 goto out_release_inode;
1234 xfs_qm_dqrele(udqp);
1235 xfs_qm_dqrele(gdqp);
1236 xfs_qm_dqrele(pdqp);
1242 xfs_trans_cancel(tp);
1245 * Wait until after the current transaction is aborted to finish the
1246 * setup of the inode and release the inode. This prevents recursive
1247 * transactions and deadlocks from xfs_inactive.
1250 xfs_finish_inode_setup(ip);
1254 xfs_qm_dqrele(udqp);
1255 xfs_qm_dqrele(gdqp);
1256 xfs_qm_dqrele(pdqp);
1258 if (unlock_dp_on_error)
1259 xfs_iunlock(dp, XFS_ILOCK_EXCL);
1265 struct xfs_inode *dp,
1267 struct xfs_inode **ipp)
1269 struct xfs_mount *mp = dp->i_mount;
1270 struct xfs_inode *ip = NULL;
1271 struct xfs_trans *tp = NULL;
1274 struct xfs_dquot *udqp = NULL;
1275 struct xfs_dquot *gdqp = NULL;
1276 struct xfs_dquot *pdqp = NULL;
1277 struct xfs_trans_res *tres;
1280 if (XFS_FORCED_SHUTDOWN(mp))
1283 prid = xfs_get_initial_prid(dp);
1286 * Make sure that we have allocated dquot(s) on disk.
1288 error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
1289 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1290 &udqp, &gdqp, &pdqp);
1294 resblks = XFS_IALLOC_SPACE_RES(mp);
1295 tres = &M_RES(mp)->tr_create_tmpfile;
1297 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
1299 goto out_release_inode;
1301 error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1302 pdqp, resblks, 1, 0);
1304 goto out_trans_cancel;
1306 error = xfs_dir_ialloc(&tp, dp, mode, 0, 0, prid, &ip);
1308 goto out_trans_cancel;
1310 if (mp->m_flags & XFS_MOUNT_WSYNC)
1311 xfs_trans_set_sync(tp);
1314 * Attach the dquot(s) to the inodes and modify them incore.
1315 * These ids of the inode couldn't have changed since the new
1316 * inode has been locked ever since it was created.
1318 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1320 error = xfs_iunlink(tp, ip);
1322 goto out_trans_cancel;
1324 error = xfs_trans_commit(tp);
1326 goto out_release_inode;
1328 xfs_qm_dqrele(udqp);
1329 xfs_qm_dqrele(gdqp);
1330 xfs_qm_dqrele(pdqp);
1336 xfs_trans_cancel(tp);
1339 * Wait until after the current transaction is aborted to finish the
1340 * setup of the inode and release the inode. This prevents recursive
1341 * transactions and deadlocks from xfs_inactive.
1344 xfs_finish_inode_setup(ip);
1348 xfs_qm_dqrele(udqp);
1349 xfs_qm_dqrele(gdqp);
1350 xfs_qm_dqrele(pdqp);
1359 struct xfs_name *target_name)
1361 xfs_mount_t *mp = tdp->i_mount;
1366 trace_xfs_link(tdp, target_name);
1368 ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
1370 if (XFS_FORCED_SHUTDOWN(mp))
1373 error = xfs_qm_dqattach(sip);
1377 error = xfs_qm_dqattach(tdp);
1381 resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1382 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp);
1383 if (error == -ENOSPC) {
1385 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp);
1390 xfs_lock_two_inodes(sip, XFS_ILOCK_EXCL, tdp, XFS_ILOCK_EXCL);
1392 xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
1393 xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
1396 * If we are using project inheritance, we only allow hard link
1397 * creation in our tree when the project IDs are the same; else
1398 * the tree quota mechanism could be circumvented.
1400 if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1401 tdp->i_d.di_projid != sip->i_d.di_projid)) {
1407 error = xfs_dir_canenter(tp, tdp, target_name);
1413 * Handle initial link state of O_TMPFILE inode
1415 if (VFS_I(sip)->i_nlink == 0) {
1416 error = xfs_iunlink_remove(tp, sip);
1421 error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1425 xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1426 xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1428 xfs_bumplink(tp, sip);
1431 * If this is a synchronous mount, make sure that the
1432 * link transaction goes to disk before returning to
1435 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1436 xfs_trans_set_sync(tp);
1438 return xfs_trans_commit(tp);
1441 xfs_trans_cancel(tp);
1446 /* Clear the reflink flag and the cowblocks tag if possible. */
1448 xfs_itruncate_clear_reflink_flags(
1449 struct xfs_inode *ip)
1451 struct xfs_ifork *dfork;
1452 struct xfs_ifork *cfork;
1454 if (!xfs_is_reflink_inode(ip))
1456 dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1457 cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1458 if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1459 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1460 if (cfork->if_bytes == 0)
1461 xfs_inode_clear_cowblocks_tag(ip);
1465 * Free up the underlying blocks past new_size. The new size must be smaller
1466 * than the current size. This routine can be used both for the attribute and
1467 * data fork, and does not modify the inode size, which is left to the caller.
1469 * The transaction passed to this routine must have made a permanent log
1470 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
1471 * given transaction and start new ones, so make sure everything involved in
1472 * the transaction is tidy before calling here. Some transaction will be
1473 * returned to the caller to be committed. The incoming transaction must
1474 * already include the inode, and both inode locks must be held exclusively.
1475 * The inode must also be "held" within the transaction. On return the inode
1476 * will be "held" within the returned transaction. This routine does NOT
1477 * require any disk space to be reserved for it within the transaction.
1479 * If we get an error, we must return with the inode locked and linked into the
1480 * current transaction. This keeps things simple for the higher level code,
1481 * because it always knows that the inode is locked and held in the transaction
1482 * that returns to it whether errors occur or not. We don't mark the inode
1483 * dirty on error so that transactions can be easily aborted if possible.
1486 xfs_itruncate_extents_flags(
1487 struct xfs_trans **tpp,
1488 struct xfs_inode *ip,
1490 xfs_fsize_t new_size,
1493 struct xfs_mount *mp = ip->i_mount;
1494 struct xfs_trans *tp = *tpp;
1495 xfs_fileoff_t first_unmap_block;
1496 xfs_filblks_t unmap_len;
1499 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1500 ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1501 xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1502 ASSERT(new_size <= XFS_ISIZE(ip));
1503 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1504 ASSERT(ip->i_itemp != NULL);
1505 ASSERT(ip->i_itemp->ili_lock_flags == 0);
1506 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1508 trace_xfs_itruncate_extents_start(ip, new_size);
1510 flags |= xfs_bmapi_aflag(whichfork);
1513 * Since it is possible for space to become allocated beyond
1514 * the end of the file (in a crash where the space is allocated
1515 * but the inode size is not yet updated), simply remove any
1516 * blocks which show up between the new EOF and the maximum
1517 * possible file size.
1519 * We have to free all the blocks to the bmbt maximum offset, even if
1520 * the page cache can't scale that far.
1522 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1523 if (first_unmap_block >= XFS_MAX_FILEOFF) {
1524 WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
1528 unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1;
1529 while (unmap_len > 0) {
1530 ASSERT(tp->t_firstblock == NULLFSBLOCK);
1531 error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len,
1532 flags, XFS_ITRUNC_MAX_EXTENTS);
1537 * Duplicate the transaction that has the permanent
1538 * reservation and commit the old transaction.
1540 error = xfs_defer_finish(&tp);
1544 error = xfs_trans_roll_inode(&tp, ip);
1549 if (whichfork == XFS_DATA_FORK) {
1550 /* Remove all pending CoW reservations. */
1551 error = xfs_reflink_cancel_cow_blocks(ip, &tp,
1552 first_unmap_block, XFS_MAX_FILEOFF, true);
1556 xfs_itruncate_clear_reflink_flags(ip);
1560 * Always re-log the inode so that our permanent transaction can keep
1561 * on rolling it forward in the log.
1563 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1565 trace_xfs_itruncate_extents_end(ip, new_size);
1576 xfs_mount_t *mp = ip->i_mount;
1579 if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
1582 /* If this is a read-only mount, don't do this (would generate I/O) */
1583 if (mp->m_flags & XFS_MOUNT_RDONLY)
1586 if (!XFS_FORCED_SHUTDOWN(mp)) {
1590 * If we previously truncated this file and removed old data
1591 * in the process, we want to initiate "early" writeout on
1592 * the last close. This is an attempt to combat the notorious
1593 * NULL files problem which is particularly noticeable from a
1594 * truncate down, buffered (re-)write (delalloc), followed by
1595 * a crash. What we are effectively doing here is
1596 * significantly reducing the time window where we'd otherwise
1597 * be exposed to that problem.
1599 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1601 xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1602 if (ip->i_delayed_blks > 0) {
1603 error = filemap_flush(VFS_I(ip)->i_mapping);
1610 if (VFS_I(ip)->i_nlink == 0)
1613 if (xfs_can_free_eofblocks(ip, false)) {
1616 * Check if the inode is being opened, written and closed
1617 * frequently and we have delayed allocation blocks outstanding
1618 * (e.g. streaming writes from the NFS server), truncating the
1619 * blocks past EOF will cause fragmentation to occur.
1621 * In this case don't do the truncation, but we have to be
1622 * careful how we detect this case. Blocks beyond EOF show up as
1623 * i_delayed_blks even when the inode is clean, so we need to
1624 * truncate them away first before checking for a dirty release.
1625 * Hence on the first dirty close we will still remove the
1626 * speculative allocation, but after that we will leave it in
1629 if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1632 * If we can't get the iolock just skip truncating the blocks
1633 * past EOF because we could deadlock with the mmap_lock
1634 * otherwise. We'll get another chance to drop them once the
1635 * last reference to the inode is dropped, so we'll never leak
1636 * blocks permanently.
1638 if (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1639 error = xfs_free_eofblocks(ip);
1640 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1645 /* delalloc blocks after truncation means it really is dirty */
1646 if (ip->i_delayed_blks)
1647 xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1653 * xfs_inactive_truncate
1655 * Called to perform a truncate when an inode becomes unlinked.
1658 xfs_inactive_truncate(
1659 struct xfs_inode *ip)
1661 struct xfs_mount *mp = ip->i_mount;
1662 struct xfs_trans *tp;
1665 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1667 ASSERT(XFS_FORCED_SHUTDOWN(mp));
1670 xfs_ilock(ip, XFS_ILOCK_EXCL);
1671 xfs_trans_ijoin(tp, ip, 0);
1674 * Log the inode size first to prevent stale data exposure in the event
1675 * of a system crash before the truncate completes. See the related
1676 * comment in xfs_vn_setattr_size() for details.
1678 ip->i_d.di_size = 0;
1679 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1681 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1683 goto error_trans_cancel;
1685 ASSERT(ip->i_df.if_nextents == 0);
1687 error = xfs_trans_commit(tp);
1691 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1695 xfs_trans_cancel(tp);
1697 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1702 * xfs_inactive_ifree()
1704 * Perform the inode free when an inode is unlinked.
1708 struct xfs_inode *ip)
1710 struct xfs_mount *mp = ip->i_mount;
1711 struct xfs_trans *tp;
1715 * We try to use a per-AG reservation for any block needed by the finobt
1716 * tree, but as the finobt feature predates the per-AG reservation
1717 * support a degraded file system might not have enough space for the
1718 * reservation at mount time. In that case try to dip into the reserved
1721 * Send a warning if the reservation does happen to fail, as the inode
1722 * now remains allocated and sits on the unlinked list until the fs is
1725 if (unlikely(mp->m_finobt_nores)) {
1726 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1727 XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1730 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1733 if (error == -ENOSPC) {
1734 xfs_warn_ratelimited(mp,
1735 "Failed to remove inode(s) from unlinked list. "
1736 "Please free space, unmount and run xfs_repair.");
1738 ASSERT(XFS_FORCED_SHUTDOWN(mp));
1744 * We do not hold the inode locked across the entire rolling transaction
1745 * here. We only need to hold it for the first transaction that
1746 * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
1747 * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
1748 * here breaks the relationship between cluster buffer invalidation and
1749 * stale inode invalidation on cluster buffer item journal commit
1750 * completion, and can result in leaving dirty stale inodes hanging
1753 * We have no need for serialising this inode operation against other
1754 * operations - we freed the inode and hence reallocation is required
1755 * and that will serialise on reallocating the space the deferops need
1756 * to free. Hence we can unlock the inode on the first commit of
1757 * the transaction rather than roll it right through the deferops. This
1758 * avoids relogging the XFS_ISTALE inode.
1760 * We check that xfs_ifree() hasn't grown an internal transaction roll
1761 * by asserting that the inode is still locked when it returns.
1763 xfs_ilock(ip, XFS_ILOCK_EXCL);
1764 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1766 error = xfs_ifree(tp, ip);
1767 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1770 * If we fail to free the inode, shut down. The cancel
1771 * might do that, we need to make sure. Otherwise the
1772 * inode might be lost for a long time or forever.
1774 if (!XFS_FORCED_SHUTDOWN(mp)) {
1775 xfs_notice(mp, "%s: xfs_ifree returned error %d",
1777 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1779 xfs_trans_cancel(tp);
1784 * Credit the quota account(s). The inode is gone.
1786 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1789 * Just ignore errors at this point. There is nothing we can do except
1790 * to try to keep going. Make sure it's not a silent error.
1792 error = xfs_trans_commit(tp);
1794 xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
1803 * This is called when the vnode reference count for the vnode
1804 * goes to zero. If the file has been unlinked, then it must
1805 * now be truncated. Also, we clear all of the read-ahead state
1806 * kept for the inode here since the file is now closed.
1812 struct xfs_mount *mp;
1817 * If the inode is already free, then there can be nothing
1820 if (VFS_I(ip)->i_mode == 0) {
1821 ASSERT(ip->i_df.if_broot_bytes == 0);
1826 ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1828 /* If this is a read-only mount, don't do this (would generate I/O) */
1829 if (mp->m_flags & XFS_MOUNT_RDONLY)
1832 /* Try to clean out the cow blocks if there are any. */
1833 if (xfs_inode_has_cow_data(ip))
1834 xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
1836 if (VFS_I(ip)->i_nlink != 0) {
1838 * force is true because we are evicting an inode from the
1839 * cache. Post-eof blocks must be freed, lest we end up with
1840 * broken free space accounting.
1842 * Note: don't bother with iolock here since lockdep complains
1843 * about acquiring it in reclaim context. We have the only
1844 * reference to the inode at this point anyways.
1846 if (xfs_can_free_eofblocks(ip, true))
1847 xfs_free_eofblocks(ip);
1852 if (S_ISREG(VFS_I(ip)->i_mode) &&
1853 (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
1854 ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
1857 error = xfs_qm_dqattach(ip);
1861 if (S_ISLNK(VFS_I(ip)->i_mode))
1862 error = xfs_inactive_symlink(ip);
1864 error = xfs_inactive_truncate(ip);
1869 * If there are attributes associated with the file then blow them away
1870 * now. The code calls a routine that recursively deconstructs the
1871 * attribute fork. If also blows away the in-core attribute fork.
1873 if (XFS_IFORK_Q(ip)) {
1874 error = xfs_attr_inactive(ip);
1880 ASSERT(ip->i_d.di_forkoff == 0);
1885 error = xfs_inactive_ifree(ip);
1890 * Release the dquots held by inode, if any.
1892 xfs_qm_dqdetach(ip);
1896 * In-Core Unlinked List Lookups
1897 * =============================
1899 * Every inode is supposed to be reachable from some other piece of metadata
1900 * with the exception of the root directory. Inodes with a connection to a
1901 * file descriptor but not linked from anywhere in the on-disk directory tree
1902 * are collectively known as unlinked inodes, though the filesystem itself
1903 * maintains links to these inodes so that on-disk metadata are consistent.
1905 * XFS implements a per-AG on-disk hash table of unlinked inodes. The AGI
1906 * header contains a number of buckets that point to an inode, and each inode
1907 * record has a pointer to the next inode in the hash chain. This
1908 * singly-linked list causes scaling problems in the iunlink remove function
1909 * because we must walk that list to find the inode that points to the inode
1910 * being removed from the unlinked hash bucket list.
1912 * What if we modelled the unlinked list as a collection of records capturing
1913 * "X.next_unlinked = Y" relations? If we indexed those records on Y, we'd
1914 * have a fast way to look up unlinked list predecessors, which avoids the
1915 * slow list walk. That's exactly what we do here (in-core) with a per-AG
1918 * Because this is a backref cache, we ignore operational failures since the
1919 * iunlink code can fall back to the slow bucket walk. The only errors that
1920 * should bubble out are for obviously incorrect situations.
1922 * All users of the backref cache MUST hold the AGI buffer lock to serialize
1923 * access or have otherwise provided for concurrency control.
1926 /* Capture a "X.next_unlinked = Y" relationship. */
1927 struct xfs_iunlink {
1928 struct rhash_head iu_rhash_head;
1929 xfs_agino_t iu_agino; /* X */
1930 xfs_agino_t iu_next_unlinked; /* Y */
1933 /* Unlinked list predecessor lookup hashtable construction */
1935 xfs_iunlink_obj_cmpfn(
1936 struct rhashtable_compare_arg *arg,
1939 const xfs_agino_t *key = arg->key;
1940 const struct xfs_iunlink *iu = obj;
1942 if (iu->iu_next_unlinked != *key)
1947 static const struct rhashtable_params xfs_iunlink_hash_params = {
1948 .min_size = XFS_AGI_UNLINKED_BUCKETS,
1949 .key_len = sizeof(xfs_agino_t),
1950 .key_offset = offsetof(struct xfs_iunlink,
1952 .head_offset = offsetof(struct xfs_iunlink, iu_rhash_head),
1953 .automatic_shrinking = true,
1954 .obj_cmpfn = xfs_iunlink_obj_cmpfn,
1958 * Return X, where X.next_unlinked == @agino. Returns NULLAGINO if no such
1959 * relation is found.
1962 xfs_iunlink_lookup_backref(
1963 struct xfs_perag *pag,
1966 struct xfs_iunlink *iu;
1968 iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
1969 xfs_iunlink_hash_params);
1970 return iu ? iu->iu_agino : NULLAGINO;
1974 * Take ownership of an iunlink cache entry and insert it into the hash table.
1975 * If successful, the entry will be owned by the cache; if not, it is freed.
1976 * Either way, the caller does not own @iu after this call.
1979 xfs_iunlink_insert_backref(
1980 struct xfs_perag *pag,
1981 struct xfs_iunlink *iu)
1985 error = rhashtable_insert_fast(&pag->pagi_unlinked_hash,
1986 &iu->iu_rhash_head, xfs_iunlink_hash_params);
1988 * Fail loudly if there already was an entry because that's a sign of
1989 * corruption of in-memory data. Also fail loudly if we see an error
1990 * code we didn't anticipate from the rhashtable code. Currently we
1991 * only anticipate ENOMEM.
1994 WARN(error != -ENOMEM, "iunlink cache insert error %d", error);
1998 * Absorb any runtime errors that aren't a result of corruption because
1999 * this is a cache and we can always fall back to bucket list scanning.
2001 if (error != 0 && error != -EEXIST)
2006 /* Remember that @prev_agino.next_unlinked = @this_agino. */
2008 xfs_iunlink_add_backref(
2009 struct xfs_perag *pag,
2010 xfs_agino_t prev_agino,
2011 xfs_agino_t this_agino)
2013 struct xfs_iunlink *iu;
2015 if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK))
2018 iu = kmem_zalloc(sizeof(*iu), KM_NOFS);
2019 iu->iu_agino = prev_agino;
2020 iu->iu_next_unlinked = this_agino;
2022 return xfs_iunlink_insert_backref(pag, iu);
2026 * Replace X.next_unlinked = @agino with X.next_unlinked = @next_unlinked.
2027 * If @next_unlinked is NULLAGINO, we drop the backref and exit. If there
2028 * wasn't any such entry then we don't bother.
2031 xfs_iunlink_change_backref(
2032 struct xfs_perag *pag,
2034 xfs_agino_t next_unlinked)
2036 struct xfs_iunlink *iu;
2039 /* Look up the old entry; if there wasn't one then exit. */
2040 iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
2041 xfs_iunlink_hash_params);
2046 * Remove the entry. This shouldn't ever return an error, but if we
2047 * couldn't remove the old entry we don't want to add it again to the
2048 * hash table, and if the entry disappeared on us then someone's
2049 * violated the locking rules and we need to fail loudly. Either way
2050 * we cannot remove the inode because internal state is or would have
2053 error = rhashtable_remove_fast(&pag->pagi_unlinked_hash,
2054 &iu->iu_rhash_head, xfs_iunlink_hash_params);
2058 /* If there is no new next entry just free our item and return. */
2059 if (next_unlinked == NULLAGINO) {
2064 /* Update the entry and re-add it to the hash table. */
2065 iu->iu_next_unlinked = next_unlinked;
2066 return xfs_iunlink_insert_backref(pag, iu);
2069 /* Set up the in-core predecessor structures. */
2072 struct xfs_perag *pag)
2074 return rhashtable_init(&pag->pagi_unlinked_hash,
2075 &xfs_iunlink_hash_params);
2078 /* Free the in-core predecessor structures. */
2080 xfs_iunlink_free_item(
2084 struct xfs_iunlink *iu = ptr;
2085 bool *freed_anything = arg;
2087 *freed_anything = true;
2092 xfs_iunlink_destroy(
2093 struct xfs_perag *pag)
2095 bool freed_anything = false;
2097 rhashtable_free_and_destroy(&pag->pagi_unlinked_hash,
2098 xfs_iunlink_free_item, &freed_anything);
2100 ASSERT(freed_anything == false || XFS_FORCED_SHUTDOWN(pag->pag_mount));
2104 * Point the AGI unlinked bucket at an inode and log the results. The caller
2105 * is responsible for validating the old value.
2108 xfs_iunlink_update_bucket(
2109 struct xfs_trans *tp,
2110 xfs_agnumber_t agno,
2111 struct xfs_buf *agibp,
2112 unsigned int bucket_index,
2113 xfs_agino_t new_agino)
2115 struct xfs_agi *agi = agibp->b_addr;
2116 xfs_agino_t old_value;
2119 ASSERT(xfs_verify_agino_or_null(tp->t_mountp, agno, new_agino));
2121 old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2122 trace_xfs_iunlink_update_bucket(tp->t_mountp, agno, bucket_index,
2123 old_value, new_agino);
2126 * We should never find the head of the list already set to the value
2127 * passed in because either we're adding or removing ourselves from the
2130 if (old_value == new_agino) {
2131 xfs_buf_mark_corrupt(agibp);
2132 return -EFSCORRUPTED;
2135 agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
2136 offset = offsetof(struct xfs_agi, agi_unlinked) +
2137 (sizeof(xfs_agino_t) * bucket_index);
2138 xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
2142 /* Set an on-disk inode's next_unlinked pointer. */
2144 xfs_iunlink_update_dinode(
2145 struct xfs_trans *tp,
2146 xfs_agnumber_t agno,
2148 struct xfs_buf *ibp,
2149 struct xfs_dinode *dip,
2150 struct xfs_imap *imap,
2151 xfs_agino_t next_agino)
2153 struct xfs_mount *mp = tp->t_mountp;
2156 ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
2158 trace_xfs_iunlink_update_dinode(mp, agno, agino,
2159 be32_to_cpu(dip->di_next_unlinked), next_agino);
2161 dip->di_next_unlinked = cpu_to_be32(next_agino);
2162 offset = imap->im_boffset +
2163 offsetof(struct xfs_dinode, di_next_unlinked);
2165 /* need to recalc the inode CRC if appropriate */
2166 xfs_dinode_calc_crc(mp, dip);
2167 xfs_trans_inode_buf(tp, ibp);
2168 xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1);
2169 xfs_inobp_check(mp, ibp);
2172 /* Set an in-core inode's unlinked pointer and return the old value. */
2174 xfs_iunlink_update_inode(
2175 struct xfs_trans *tp,
2176 struct xfs_inode *ip,
2177 xfs_agnumber_t agno,
2178 xfs_agino_t next_agino,
2179 xfs_agino_t *old_next_agino)
2181 struct xfs_mount *mp = tp->t_mountp;
2182 struct xfs_dinode *dip;
2183 struct xfs_buf *ibp;
2184 xfs_agino_t old_value;
2187 ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
2189 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 0);
2193 /* Make sure the old pointer isn't garbage. */
2194 old_value = be32_to_cpu(dip->di_next_unlinked);
2195 if (!xfs_verify_agino_or_null(mp, agno, old_value)) {
2196 xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
2197 sizeof(*dip), __this_address);
2198 error = -EFSCORRUPTED;
2203 * Since we're updating a linked list, we should never find that the
2204 * current pointer is the same as the new value, unless we're
2205 * terminating the list.
2207 *old_next_agino = old_value;
2208 if (old_value == next_agino) {
2209 if (next_agino != NULLAGINO) {
2210 xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
2211 dip, sizeof(*dip), __this_address);
2212 error = -EFSCORRUPTED;
2217 /* Ok, update the new pointer. */
2218 xfs_iunlink_update_dinode(tp, agno, XFS_INO_TO_AGINO(mp, ip->i_ino),
2219 ibp, dip, &ip->i_imap, next_agino);
2222 xfs_trans_brelse(tp, ibp);
2227 * This is called when the inode's link count has gone to 0 or we are creating
2228 * a tmpfile via O_TMPFILE. The inode @ip must have nlink == 0.
2230 * We place the on-disk inode on a list in the AGI. It will be pulled from this
2231 * list when the inode is freed.
2235 struct xfs_trans *tp,
2236 struct xfs_inode *ip)
2238 struct xfs_mount *mp = tp->t_mountp;
2239 struct xfs_agi *agi;
2240 struct xfs_buf *agibp;
2241 xfs_agino_t next_agino;
2242 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2243 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2244 short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2247 ASSERT(VFS_I(ip)->i_nlink == 0);
2248 ASSERT(VFS_I(ip)->i_mode != 0);
2249 trace_xfs_iunlink(ip);
2251 /* Get the agi buffer first. It ensures lock ordering on the list. */
2252 error = xfs_read_agi(mp, tp, agno, &agibp);
2255 agi = agibp->b_addr;
2258 * Get the index into the agi hash table for the list this inode will
2259 * go on. Make sure the pointer isn't garbage and that this inode
2260 * isn't already on the list.
2262 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2263 if (next_agino == agino ||
2264 !xfs_verify_agino_or_null(mp, agno, next_agino)) {
2265 xfs_buf_mark_corrupt(agibp);
2266 return -EFSCORRUPTED;
2269 if (next_agino != NULLAGINO) {
2270 struct xfs_perag *pag;
2271 xfs_agino_t old_agino;
2274 * There is already another inode in the bucket, so point this
2275 * inode to the current head of the list.
2277 error = xfs_iunlink_update_inode(tp, ip, agno, next_agino,
2281 ASSERT(old_agino == NULLAGINO);
2284 * agino has been unlinked, add a backref from the next inode
2287 pag = xfs_perag_get(mp, agno);
2288 error = xfs_iunlink_add_backref(pag, agino, next_agino);
2294 /* Point the head of the list to point to this inode. */
2295 return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index, agino);
2298 /* Return the imap, dinode pointer, and buffer for an inode. */
2300 xfs_iunlink_map_ino(
2301 struct xfs_trans *tp,
2302 xfs_agnumber_t agno,
2304 struct xfs_imap *imap,
2305 struct xfs_dinode **dipp,
2306 struct xfs_buf **bpp)
2308 struct xfs_mount *mp = tp->t_mountp;
2312 error = xfs_imap(mp, tp, XFS_AGINO_TO_INO(mp, agno, agino), imap, 0);
2314 xfs_warn(mp, "%s: xfs_imap returned error %d.",
2319 error = xfs_imap_to_bp(mp, tp, imap, dipp, bpp, 0);
2321 xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
2330 * Walk the unlinked chain from @head_agino until we find the inode that
2331 * points to @target_agino. Return the inode number, map, dinode pointer,
2332 * and inode cluster buffer of that inode as @agino, @imap, @dipp, and @bpp.
2334 * @tp, @pag, @head_agino, and @target_agino are input parameters.
2335 * @agino, @imap, @dipp, and @bpp are all output parameters.
2337 * Do not call this function if @target_agino is the head of the list.
2340 xfs_iunlink_map_prev(
2341 struct xfs_trans *tp,
2342 xfs_agnumber_t agno,
2343 xfs_agino_t head_agino,
2344 xfs_agino_t target_agino,
2346 struct xfs_imap *imap,
2347 struct xfs_dinode **dipp,
2348 struct xfs_buf **bpp,
2349 struct xfs_perag *pag)
2351 struct xfs_mount *mp = tp->t_mountp;
2352 xfs_agino_t next_agino;
2355 ASSERT(head_agino != target_agino);
2358 /* See if our backref cache can find it faster. */
2359 *agino = xfs_iunlink_lookup_backref(pag, target_agino);
2360 if (*agino != NULLAGINO) {
2361 error = xfs_iunlink_map_ino(tp, agno, *agino, imap, dipp, bpp);
2365 if (be32_to_cpu((*dipp)->di_next_unlinked) == target_agino)
2369 * If we get here the cache contents were corrupt, so drop the
2370 * buffer and fall back to walking the bucket list.
2372 xfs_trans_brelse(tp, *bpp);
2377 trace_xfs_iunlink_map_prev_fallback(mp, agno);
2379 /* Otherwise, walk the entire bucket until we find it. */
2380 next_agino = head_agino;
2381 while (next_agino != target_agino) {
2382 xfs_agino_t unlinked_agino;
2385 xfs_trans_brelse(tp, *bpp);
2387 *agino = next_agino;
2388 error = xfs_iunlink_map_ino(tp, agno, next_agino, imap, dipp,
2393 unlinked_agino = be32_to_cpu((*dipp)->di_next_unlinked);
2395 * Make sure this pointer is valid and isn't an obvious
2398 if (!xfs_verify_agino(mp, agno, unlinked_agino) ||
2399 next_agino == unlinked_agino) {
2400 XFS_CORRUPTION_ERROR(__func__,
2401 XFS_ERRLEVEL_LOW, mp,
2402 *dipp, sizeof(**dipp));
2403 error = -EFSCORRUPTED;
2406 next_agino = unlinked_agino;
2413 * Pull the on-disk inode from the AGI unlinked list.
2417 struct xfs_trans *tp,
2418 struct xfs_inode *ip)
2420 struct xfs_mount *mp = tp->t_mountp;
2421 struct xfs_agi *agi;
2422 struct xfs_buf *agibp;
2423 struct xfs_buf *last_ibp;
2424 struct xfs_dinode *last_dip = NULL;
2425 struct xfs_perag *pag = NULL;
2426 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2427 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2428 xfs_agino_t next_agino;
2429 xfs_agino_t head_agino;
2430 short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2433 trace_xfs_iunlink_remove(ip);
2435 /* Get the agi buffer first. It ensures lock ordering on the list. */
2436 error = xfs_read_agi(mp, tp, agno, &agibp);
2439 agi = agibp->b_addr;
2442 * Get the index into the agi hash table for the list this inode will
2443 * go on. Make sure the head pointer isn't garbage.
2445 head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2446 if (!xfs_verify_agino(mp, agno, head_agino)) {
2447 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2449 return -EFSCORRUPTED;
2453 * Set our inode's next_unlinked pointer to NULL and then return
2454 * the old pointer value so that we can update whatever was previous
2455 * to us in the list to point to whatever was next in the list.
2457 error = xfs_iunlink_update_inode(tp, ip, agno, NULLAGINO, &next_agino);
2462 * If there was a backref pointing from the next inode back to this
2463 * one, remove it because we've removed this inode from the list.
2465 * Later, if this inode was in the middle of the list we'll update
2466 * this inode's backref to point from the next inode.
2468 if (next_agino != NULLAGINO) {
2469 pag = xfs_perag_get(mp, agno);
2470 error = xfs_iunlink_change_backref(pag, next_agino,
2476 if (head_agino == agino) {
2477 /* Point the head of the list to the next unlinked inode. */
2478 error = xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index,
2483 struct xfs_imap imap;
2484 xfs_agino_t prev_agino;
2487 pag = xfs_perag_get(mp, agno);
2489 /* We need to search the list for the inode being freed. */
2490 error = xfs_iunlink_map_prev(tp, agno, head_agino, agino,
2491 &prev_agino, &imap, &last_dip, &last_ibp,
2496 /* Point the previous inode on the list to the next inode. */
2497 xfs_iunlink_update_dinode(tp, agno, prev_agino, last_ibp,
2498 last_dip, &imap, next_agino);
2501 * Now we deal with the backref for this inode. If this inode
2502 * pointed at a real inode, change the backref that pointed to
2503 * us to point to our old next. If this inode was the end of
2504 * the list, delete the backref that pointed to us. Note that
2505 * change_backref takes care of deleting the backref if
2506 * next_agino is NULLAGINO.
2508 error = xfs_iunlink_change_backref(pag, agino, next_agino);
2520 * Look up the inode number specified and mark it stale if it is found. If it is
2521 * dirty, return the inode so it can be attached to the cluster buffer so it can
2522 * be processed appropriately when the cluster free transaction completes.
2524 static struct xfs_inode *
2525 xfs_ifree_get_one_inode(
2526 struct xfs_perag *pag,
2527 struct xfs_inode *free_ip,
2530 struct xfs_mount *mp = pag->pag_mount;
2531 struct xfs_inode *ip;
2535 ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));
2537 /* Inode not in memory, nothing to do */
2539 goto out_rcu_unlock;
2542 * because this is an RCU protected lookup, we could find a recently
2543 * freed or even reallocated inode during the lookup. We need to check
2544 * under the i_flags_lock for a valid inode here. Skip it if it is not
2545 * valid, the wrong inode or stale.
2547 spin_lock(&ip->i_flags_lock);
2548 if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE)) {
2549 spin_unlock(&ip->i_flags_lock);
2550 goto out_rcu_unlock;
2552 spin_unlock(&ip->i_flags_lock);
2555 * Don't try to lock/unlock the current inode, but we _cannot_ skip the
2556 * other inodes that we did not find in the list attached to the buffer
2557 * and are not already marked stale. If we can't lock it, back off and
2560 if (ip != free_ip) {
2561 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2568 * Check the inode number again in case we're racing with
2569 * freeing in xfs_reclaim_inode(). See the comments in that
2570 * function for more information as to why the initial check is
2573 if (ip->i_ino != inum) {
2574 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2575 goto out_rcu_unlock;
2581 xfs_iflags_set(ip, XFS_ISTALE);
2584 * We don't need to attach clean inodes or those only with unlogged
2585 * changes (which we throw away, anyway).
2587 if (!ip->i_itemp || xfs_inode_clean(ip)) {
2588 ASSERT(ip != free_ip);
2590 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2602 * A big issue when freeing the inode cluster is that we _cannot_ skip any
2603 * inodes that are in memory - they all must be marked stale and attached to
2604 * the cluster buffer.
2608 xfs_inode_t *free_ip,
2610 struct xfs_icluster *xic)
2612 xfs_mount_t *mp = free_ip->i_mount;
2619 struct xfs_inode_log_item *iip;
2620 struct xfs_log_item *lip;
2621 struct xfs_perag *pag;
2622 struct xfs_ino_geometry *igeo = M_IGEO(mp);
2626 inum = xic->first_ino;
2627 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
2628 nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
2630 for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
2632 * The allocation bitmap tells us which inodes of the chunk were
2633 * physically allocated. Skip the cluster if an inode falls into
2636 ioffset = inum - xic->first_ino;
2637 if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2638 ASSERT(ioffset % igeo->inodes_per_cluster == 0);
2642 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2643 XFS_INO_TO_AGBNO(mp, inum));
2646 * We obtain and lock the backing buffer first in the process
2647 * here, as we have to ensure that any dirty inode that we
2648 * can't get the flush lock on is attached to the buffer.
2649 * If we scan the in-memory inodes first, then buffer IO can
2650 * complete before we get a lock on it, and hence we may fail
2651 * to mark all the active inodes on the buffer stale.
2653 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2654 mp->m_bsize * igeo->blocks_per_cluster,
2662 * This buffer may not have been correctly initialised as we
2663 * didn't read it from disk. That's not important because we are
2664 * only using to mark the buffer as stale in the log, and to
2665 * attach stale cached inodes on it. That means it will never be
2666 * dispatched for IO. If it is, we want to know about it, and we
2667 * want it to fail. We can acheive this by adding a write
2668 * verifier to the buffer.
2670 bp->b_ops = &xfs_inode_buf_ops;
2673 * Walk the inodes already attached to the buffer and mark them
2674 * stale. These will all have the flush locks held, so an
2675 * in-memory inode walk can't lock them. By marking them all
2676 * stale first, we will not attempt to lock them in the loop
2677 * below as the XFS_ISTALE flag will be set.
2679 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
2680 if (lip->li_type == XFS_LI_INODE) {
2681 iip = (struct xfs_inode_log_item *)lip;
2682 ASSERT(iip->ili_logged == 1);
2683 lip->li_cb = xfs_istale_done;
2684 xfs_trans_ail_copy_lsn(mp->m_ail,
2685 &iip->ili_flush_lsn,
2686 &iip->ili_item.li_lsn);
2687 xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
2693 * For each inode in memory attempt to add it to the inode
2694 * buffer and set it up for being staled on buffer IO
2695 * completion. This is safe as we've locked out tail pushing
2696 * and flushing by locking the buffer.
2698 * We have already marked every inode that was part of a
2699 * transaction stale above, which means there is no point in
2700 * even trying to lock them.
2702 for (i = 0; i < igeo->inodes_per_cluster; i++) {
2703 ip = xfs_ifree_get_one_inode(pag, free_ip, inum + i);
2708 iip->ili_last_fields = iip->ili_fields;
2709 iip->ili_fields = 0;
2710 iip->ili_fsync_fields = 0;
2711 iip->ili_logged = 1;
2712 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
2713 &iip->ili_item.li_lsn);
2715 xfs_buf_attach_iodone(bp, xfs_istale_done,
2719 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2722 xfs_trans_stale_inode_buf(tp, bp);
2723 xfs_trans_binval(tp, bp);
2731 * This is called to return an inode to the inode free list.
2732 * The inode should already be truncated to 0 length and have
2733 * no pages associated with it. This routine also assumes that
2734 * the inode is already a part of the transaction.
2736 * The on-disk copy of the inode will have been added to the list
2737 * of unlinked inodes in the AGI. We need to remove the inode from
2738 * that list atomically with respect to freeing it here.
2742 struct xfs_trans *tp,
2743 struct xfs_inode *ip)
2746 struct xfs_icluster xic = { 0 };
2748 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2749 ASSERT(VFS_I(ip)->i_nlink == 0);
2750 ASSERT(ip->i_df.if_nextents == 0);
2751 ASSERT(ip->i_d.di_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
2752 ASSERT(ip->i_d.di_nblocks == 0);
2755 * Pull the on-disk inode from the AGI unlinked list.
2757 error = xfs_iunlink_remove(tp, ip);
2761 error = xfs_difree(tp, ip->i_ino, &xic);
2766 * Free any local-format data sitting around before we reset the
2767 * data fork to extents format. Note that the attr fork data has
2768 * already been freed by xfs_attr_inactive.
2770 if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
2771 kmem_free(ip->i_df.if_u1.if_data);
2772 ip->i_df.if_u1.if_data = NULL;
2773 ip->i_df.if_bytes = 0;
2776 VFS_I(ip)->i_mode = 0; /* mark incore inode as free */
2777 ip->i_d.di_flags = 0;
2778 ip->i_d.di_flags2 = 0;
2779 ip->i_d.di_dmevmask = 0;
2780 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
2781 ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
2783 /* Don't attempt to replay owner changes for a deleted inode */
2784 ip->i_itemp->ili_fields &= ~(XFS_ILOG_AOWNER|XFS_ILOG_DOWNER);
2787 * Bump the generation count so no one will be confused
2788 * by reincarnations of this inode.
2790 VFS_I(ip)->i_generation++;
2791 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2794 error = xfs_ifree_cluster(ip, tp, &xic);
2800 * This is called to unpin an inode. The caller must have the inode locked
2801 * in at least shared mode so that the buffer cannot be subsequently pinned
2802 * once someone is waiting for it to be unpinned.
2806 struct xfs_inode *ip)
2808 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2810 trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2812 /* Give the log a push to start the unpinning I/O */
2813 xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0, NULL);
2819 struct xfs_inode *ip)
2821 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2822 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2827 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2828 if (xfs_ipincount(ip))
2830 } while (xfs_ipincount(ip));
2831 finish_wait(wq, &wait.wq_entry);
2836 struct xfs_inode *ip)
2838 if (xfs_ipincount(ip))
2839 __xfs_iunpin_wait(ip);
2843 * Removing an inode from the namespace involves removing the directory entry
2844 * and dropping the link count on the inode. Removing the directory entry can
2845 * result in locking an AGF (directory blocks were freed) and removing a link
2846 * count can result in placing the inode on an unlinked list which results in
2849 * The big problem here is that we have an ordering constraint on AGF and AGI
2850 * locking - inode allocation locks the AGI, then can allocate a new extent for
2851 * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
2852 * removes the inode from the unlinked list, requiring that we lock the AGI
2853 * first, and then freeing the inode can result in an inode chunk being freed
2854 * and hence freeing disk space requiring that we lock an AGF.
2856 * Hence the ordering that is imposed by other parts of the code is AGI before
2857 * AGF. This means we cannot remove the directory entry before we drop the inode
2858 * reference count and put it on the unlinked list as this results in a lock
2859 * order of AGF then AGI, and this can deadlock against inode allocation and
2860 * freeing. Therefore we must drop the link counts before we remove the
2863 * This is still safe from a transactional point of view - it is not until we
2864 * get to xfs_defer_finish() that we have the possibility of multiple
2865 * transactions in this operation. Hence as long as we remove the directory
2866 * entry and drop the link count in the first transaction of the remove
2867 * operation, there are no transactional constraints on the ordering here.
2872 struct xfs_name *name,
2875 xfs_mount_t *mp = dp->i_mount;
2876 xfs_trans_t *tp = NULL;
2877 int is_dir = S_ISDIR(VFS_I(ip)->i_mode);
2881 trace_xfs_remove(dp, name);
2883 if (XFS_FORCED_SHUTDOWN(mp))
2886 error = xfs_qm_dqattach(dp);
2890 error = xfs_qm_dqattach(ip);
2895 * We try to get the real space reservation first,
2896 * allowing for directory btree deletion(s) implying
2897 * possible bmap insert(s). If we can't get the space
2898 * reservation then we use 0 instead, and avoid the bmap
2899 * btree insert(s) in the directory code by, if the bmap
2900 * insert tries to happen, instead trimming the LAST
2901 * block from the directory.
2903 resblks = XFS_REMOVE_SPACE_RES(mp);
2904 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp);
2905 if (error == -ENOSPC) {
2907 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0,
2911 ASSERT(error != -ENOSPC);
2915 xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
2917 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
2918 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2921 * If we're removing a directory perform some additional validation.
2924 ASSERT(VFS_I(ip)->i_nlink >= 2);
2925 if (VFS_I(ip)->i_nlink != 2) {
2927 goto out_trans_cancel;
2929 if (!xfs_dir_isempty(ip)) {
2931 goto out_trans_cancel;
2934 /* Drop the link from ip's "..". */
2935 error = xfs_droplink(tp, dp);
2937 goto out_trans_cancel;
2939 /* Drop the "." link from ip to self. */
2940 error = xfs_droplink(tp, ip);
2942 goto out_trans_cancel;
2945 * When removing a non-directory we need to log the parent
2946 * inode here. For a directory this is done implicitly
2947 * by the xfs_droplink call for the ".." entry.
2949 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2951 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2953 /* Drop the link from dp to ip. */
2954 error = xfs_droplink(tp, ip);
2956 goto out_trans_cancel;
2958 error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
2960 ASSERT(error != -ENOENT);
2961 goto out_trans_cancel;
2965 * If this is a synchronous mount, make sure that the
2966 * remove transaction goes to disk before returning to
2969 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2970 xfs_trans_set_sync(tp);
2972 error = xfs_trans_commit(tp);
2976 if (is_dir && xfs_inode_is_filestream(ip))
2977 xfs_filestream_deassociate(ip);
2982 xfs_trans_cancel(tp);
2988 * Enter all inodes for a rename transaction into a sorted array.
2990 #define __XFS_SORT_INODES 5
2992 xfs_sort_for_rename(
2993 struct xfs_inode *dp1, /* in: old (source) directory inode */
2994 struct xfs_inode *dp2, /* in: new (target) directory inode */
2995 struct xfs_inode *ip1, /* in: inode of old entry */
2996 struct xfs_inode *ip2, /* in: inode of new entry */
2997 struct xfs_inode *wip, /* in: whiteout inode */
2998 struct xfs_inode **i_tab,/* out: sorted array of inodes */
2999 int *num_inodes) /* in/out: inodes in array */
3003 ASSERT(*num_inodes == __XFS_SORT_INODES);
3004 memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
3007 * i_tab contains a list of pointers to inodes. We initialize
3008 * the table here & we'll sort it. We will then use it to
3009 * order the acquisition of the inode locks.
3011 * Note that the table may contain duplicates. e.g., dp1 == dp2.
3024 * Sort the elements via bubble sort. (Remember, there are at
3025 * most 5 elements to sort, so this is adequate.)
3027 for (i = 0; i < *num_inodes; i++) {
3028 for (j = 1; j < *num_inodes; j++) {
3029 if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
3030 struct xfs_inode *temp = i_tab[j];
3031 i_tab[j] = i_tab[j-1];
3040 struct xfs_trans *tp)
3043 * If this is a synchronous mount, make sure that the rename transaction
3044 * goes to disk before returning to the user.
3046 if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
3047 xfs_trans_set_sync(tp);
3049 return xfs_trans_commit(tp);
3053 * xfs_cross_rename()
3055 * responsible for handling RENAME_EXCHANGE flag in renameat2() sytemcall
3059 struct xfs_trans *tp,
3060 struct xfs_inode *dp1,
3061 struct xfs_name *name1,
3062 struct xfs_inode *ip1,
3063 struct xfs_inode *dp2,
3064 struct xfs_name *name2,
3065 struct xfs_inode *ip2,
3073 /* Swap inode number for dirent in first parent */
3074 error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
3076 goto out_trans_abort;
3078 /* Swap inode number for dirent in second parent */
3079 error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
3081 goto out_trans_abort;
3084 * If we're renaming one or more directories across different parents,
3085 * update the respective ".." entries (and link counts) to match the new
3089 dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3091 if (S_ISDIR(VFS_I(ip2)->i_mode)) {
3092 error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
3093 dp1->i_ino, spaceres);
3095 goto out_trans_abort;
3097 /* transfer ip2 ".." reference to dp1 */
3098 if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
3099 error = xfs_droplink(tp, dp2);
3101 goto out_trans_abort;
3102 xfs_bumplink(tp, dp1);
3106 * Although ip1 isn't changed here, userspace needs
3107 * to be warned about the change, so that applications
3108 * relying on it (like backup ones), will properly
3111 ip1_flags |= XFS_ICHGTIME_CHG;
3112 ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3115 if (S_ISDIR(VFS_I(ip1)->i_mode)) {
3116 error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
3117 dp2->i_ino, spaceres);
3119 goto out_trans_abort;
3121 /* transfer ip1 ".." reference to dp2 */
3122 if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
3123 error = xfs_droplink(tp, dp1);
3125 goto out_trans_abort;
3126 xfs_bumplink(tp, dp2);
3130 * Although ip2 isn't changed here, userspace needs
3131 * to be warned about the change, so that applications
3132 * relying on it (like backup ones), will properly
3135 ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3136 ip2_flags |= XFS_ICHGTIME_CHG;
3141 xfs_trans_ichgtime(tp, ip1, ip1_flags);
3142 xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
3145 xfs_trans_ichgtime(tp, ip2, ip2_flags);
3146 xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
3149 xfs_trans_ichgtime(tp, dp2, dp2_flags);
3150 xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
3152 xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3153 xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
3154 return xfs_finish_rename(tp);
3157 xfs_trans_cancel(tp);
3162 * xfs_rename_alloc_whiteout()
3164 * Return a referenced, unlinked, unlocked inode that that can be used as a
3165 * whiteout in a rename transaction. We use a tmpfile inode here so that if we
3166 * crash between allocating the inode and linking it into the rename transaction
3167 * recovery will free the inode and we won't leak it.
3170 xfs_rename_alloc_whiteout(
3171 struct xfs_inode *dp,
3172 struct xfs_inode **wip)
3174 struct xfs_inode *tmpfile;
3177 error = xfs_create_tmpfile(dp, S_IFCHR | WHITEOUT_MODE, &tmpfile);
3182 * Prepare the tmpfile inode as if it were created through the VFS.
3183 * Complete the inode setup and flag it as linkable. nlink is already
3184 * zero, so we can skip the drop_nlink.
3186 xfs_setup_iops(tmpfile);
3187 xfs_finish_inode_setup(tmpfile);
3188 VFS_I(tmpfile)->i_state |= I_LINKABLE;
3199 struct xfs_inode *src_dp,
3200 struct xfs_name *src_name,
3201 struct xfs_inode *src_ip,
3202 struct xfs_inode *target_dp,
3203 struct xfs_name *target_name,
3204 struct xfs_inode *target_ip,
3207 struct xfs_mount *mp = src_dp->i_mount;
3208 struct xfs_trans *tp;
3209 struct xfs_inode *wip = NULL; /* whiteout inode */
3210 struct xfs_inode *inodes[__XFS_SORT_INODES];
3211 struct xfs_buf *agibp;
3212 int num_inodes = __XFS_SORT_INODES;
3213 bool new_parent = (src_dp != target_dp);
3214 bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
3218 trace_xfs_rename(src_dp, target_dp, src_name, target_name);
3220 if ((flags & RENAME_EXCHANGE) && !target_ip)
3224 * If we are doing a whiteout operation, allocate the whiteout inode
3225 * we will be placing at the target and ensure the type is set
3228 if (flags & RENAME_WHITEOUT) {
3229 ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
3230 error = xfs_rename_alloc_whiteout(target_dp, &wip);
3234 /* setup target dirent info as whiteout */
3235 src_name->type = XFS_DIR3_FT_CHRDEV;
3238 xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
3239 inodes, &num_inodes);
3241 spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
3242 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
3243 if (error == -ENOSPC) {
3245 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
3249 goto out_release_wip;
3252 * Attach the dquots to the inodes
3254 error = xfs_qm_vop_rename_dqattach(inodes);
3256 goto out_trans_cancel;
3259 * Lock all the participating inodes. Depending upon whether
3260 * the target_name exists in the target directory, and
3261 * whether the target directory is the same as the source
3262 * directory, we can lock from 2 to 4 inodes.
3264 xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
3267 * Join all the inodes to the transaction. From this point on,
3268 * we can rely on either trans_commit or trans_cancel to unlock
3271 xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
3273 xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
3274 xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
3276 xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
3278 xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
3281 * If we are using project inheritance, we only allow renames
3282 * into our tree when the project IDs are the same; else the
3283 * tree quota mechanism would be circumvented.
3285 if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
3286 target_dp->i_d.di_projid != src_ip->i_d.di_projid)) {
3288 goto out_trans_cancel;
3291 /* RENAME_EXCHANGE is unique from here on. */
3292 if (flags & RENAME_EXCHANGE)
3293 return xfs_cross_rename(tp, src_dp, src_name, src_ip,
3294 target_dp, target_name, target_ip,
3298 * Check for expected errors before we dirty the transaction
3299 * so we can return an error without a transaction abort.
3301 if (target_ip == NULL) {
3303 * If there's no space reservation, check the entry will
3304 * fit before actually inserting it.
3307 error = xfs_dir_canenter(tp, target_dp, target_name);
3309 goto out_trans_cancel;
3313 * If target exists and it's a directory, check that whether
3314 * it can be destroyed.
3316 if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
3317 (!xfs_dir_isempty(target_ip) ||
3318 (VFS_I(target_ip)->i_nlink > 2))) {
3320 goto out_trans_cancel;
3325 * Directory entry creation below may acquire the AGF. Remove
3326 * the whiteout from the unlinked list first to preserve correct
3327 * AGI/AGF locking order. This dirties the transaction so failures
3328 * after this point will abort and log recovery will clean up the
3331 * For whiteouts, we need to bump the link count on the whiteout
3332 * inode. After this point, we have a real link, clear the tmpfile
3333 * state flag from the inode so it doesn't accidentally get misused
3337 ASSERT(VFS_I(wip)->i_nlink == 0);
3338 error = xfs_iunlink_remove(tp, wip);
3340 goto out_trans_cancel;
3342 xfs_bumplink(tp, wip);
3343 VFS_I(wip)->i_state &= ~I_LINKABLE;
3347 * Set up the target.
3349 if (target_ip == NULL) {
3351 * If target does not exist and the rename crosses
3352 * directories, adjust the target directory link count
3353 * to account for the ".." reference from the new entry.
3355 error = xfs_dir_createname(tp, target_dp, target_name,
3356 src_ip->i_ino, spaceres);
3358 goto out_trans_cancel;
3360 xfs_trans_ichgtime(tp, target_dp,
3361 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3363 if (new_parent && src_is_directory) {
3364 xfs_bumplink(tp, target_dp);
3366 } else { /* target_ip != NULL */
3368 * Link the source inode under the target name.
3369 * If the source inode is a directory and we are moving
3370 * it across directories, its ".." entry will be
3371 * inconsistent until we replace that down below.
3373 * In case there is already an entry with the same
3374 * name at the destination directory, remove it first.
3378 * Check whether the replace operation will need to allocate
3379 * blocks. This happens when the shortform directory lacks
3380 * space and we have to convert it to a block format directory.
3381 * When more blocks are necessary, we must lock the AGI first
3382 * to preserve locking order (AGI -> AGF).
3384 if (xfs_dir2_sf_replace_needblock(target_dp, src_ip->i_ino)) {
3385 error = xfs_read_agi(mp, tp,
3386 XFS_INO_TO_AGNO(mp, target_ip->i_ino),
3389 goto out_trans_cancel;
3392 error = xfs_dir_replace(tp, target_dp, target_name,
3393 src_ip->i_ino, spaceres);
3395 goto out_trans_cancel;
3397 xfs_trans_ichgtime(tp, target_dp,
3398 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3401 * Decrement the link count on the target since the target
3402 * dir no longer points to it.
3404 error = xfs_droplink(tp, target_ip);
3406 goto out_trans_cancel;
3408 if (src_is_directory) {
3410 * Drop the link from the old "." entry.
3412 error = xfs_droplink(tp, target_ip);
3414 goto out_trans_cancel;
3416 } /* target_ip != NULL */
3419 * Remove the source.
3421 if (new_parent && src_is_directory) {
3423 * Rewrite the ".." entry to point to the new
3426 error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3427 target_dp->i_ino, spaceres);
3428 ASSERT(error != -EEXIST);
3430 goto out_trans_cancel;
3434 * We always want to hit the ctime on the source inode.
3436 * This isn't strictly required by the standards since the source
3437 * inode isn't really being changed, but old unix file systems did
3438 * it and some incremental backup programs won't work without it.
3440 xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3441 xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3444 * Adjust the link count on src_dp. This is necessary when
3445 * renaming a directory, either within one parent when
3446 * the target existed, or across two parent directories.
3448 if (src_is_directory && (new_parent || target_ip != NULL)) {
3451 * Decrement link count on src_directory since the
3452 * entry that's moved no longer points to it.
3454 error = xfs_droplink(tp, src_dp);
3456 goto out_trans_cancel;
3460 * For whiteouts, we only need to update the source dirent with the
3461 * inode number of the whiteout inode rather than removing it
3465 error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3468 error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3471 goto out_trans_cancel;
3473 xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3474 xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3476 xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3478 error = xfs_finish_rename(tp);
3484 xfs_trans_cancel(tp);
3493 struct xfs_inode *ip,
3496 struct xfs_mount *mp = ip->i_mount;
3497 struct xfs_perag *pag;
3498 unsigned long first_index, mask;
3500 struct xfs_inode **cilist;
3501 struct xfs_inode *cip;
3502 struct xfs_ino_geometry *igeo = M_IGEO(mp);
3508 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
3510 cilist_size = igeo->inodes_per_cluster * sizeof(struct xfs_inode *);
3511 cilist = kmem_alloc(cilist_size, KM_MAYFAIL|KM_NOFS);
3515 mask = ~(igeo->inodes_per_cluster - 1);
3516 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
3518 /* really need a gang lookup range call here */
3519 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)cilist,
3520 first_index, igeo->inodes_per_cluster);
3524 for (i = 0; i < nr_found; i++) {
3530 * because this is an RCU protected lookup, we could find a
3531 * recently freed or even reallocated inode during the lookup.
3532 * We need to check under the i_flags_lock for a valid inode
3533 * here. Skip it if it is not valid or the wrong inode.
3535 spin_lock(&cip->i_flags_lock);
3537 __xfs_iflags_test(cip, XFS_ISTALE)) {
3538 spin_unlock(&cip->i_flags_lock);
3543 * Once we fall off the end of the cluster, no point checking
3544 * any more inodes in the list because they will also all be
3545 * outside the cluster.
3547 if ((XFS_INO_TO_AGINO(mp, cip->i_ino) & mask) != first_index) {
3548 spin_unlock(&cip->i_flags_lock);
3551 spin_unlock(&cip->i_flags_lock);
3554 * Do an un-protected check to see if the inode is dirty and
3555 * is a candidate for flushing. These checks will be repeated
3556 * later after the appropriate locks are acquired.
3558 if (xfs_inode_clean(cip) && xfs_ipincount(cip) == 0)
3562 * Try to get locks. If any are unavailable or it is pinned,
3563 * then this inode cannot be flushed and is skipped.
3566 if (!xfs_ilock_nowait(cip, XFS_ILOCK_SHARED))
3568 if (!xfs_iflock_nowait(cip)) {
3569 xfs_iunlock(cip, XFS_ILOCK_SHARED);
3572 if (xfs_ipincount(cip)) {
3574 xfs_iunlock(cip, XFS_ILOCK_SHARED);
3580 * Check the inode number again, just to be certain we are not
3581 * racing with freeing in xfs_reclaim_inode(). See the comments
3582 * in that function for more information as to why the initial
3583 * check is not sufficient.
3587 xfs_iunlock(cip, XFS_ILOCK_SHARED);
3592 * arriving here means that this inode can be flushed. First
3593 * re-check that it's dirty before flushing.
3595 if (!xfs_inode_clean(cip)) {
3596 error = xfs_iflush_int(cip, bp);
3598 xfs_iunlock(cip, XFS_ILOCK_SHARED);
3605 xfs_iunlock(cip, XFS_ILOCK_SHARED);
3609 XFS_STATS_INC(mp, xs_icluster_flushcnt);
3610 XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
3622 * Flush dirty inode metadata into the backing buffer.
3624 * The caller must have the inode lock and the inode flush lock held. The
3625 * inode lock will still be held upon return to the caller, and the inode
3626 * flush lock will be released after the inode has reached the disk.
3628 * The caller must write out the buffer returned in *bpp and release it.
3632 struct xfs_inode *ip,
3633 struct xfs_buf **bpp)
3635 struct xfs_mount *mp = ip->i_mount;
3636 struct xfs_buf *bp = NULL;
3637 struct xfs_dinode *dip;
3640 XFS_STATS_INC(mp, xs_iflush_count);
3642 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3643 ASSERT(xfs_isiflocked(ip));
3644 ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
3645 ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3649 xfs_iunpin_wait(ip);
3652 * For stale inodes we cannot rely on the backing buffer remaining
3653 * stale in cache for the remaining life of the stale inode and so
3654 * xfs_imap_to_bp() below may give us a buffer that no longer contains
3655 * inodes below. We have to check this after ensuring the inode is
3656 * unpinned so that it is safe to reclaim the stale inode after the
3659 if (xfs_iflags_test(ip, XFS_ISTALE)) {
3665 * Get the buffer containing the on-disk inode. We are doing a try-lock
3666 * operation here, so we may get an EAGAIN error. In that case, return
3667 * leaving the inode dirty.
3669 * If we get any other error, we effectively have a corruption situation
3670 * and we cannot flush the inode. Abort the flush and shut down.
3672 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK);
3673 if (error == -EAGAIN) {
3681 * If the buffer is pinned then push on the log now so we won't
3682 * get stuck waiting in the write for too long.
3684 if (xfs_buf_ispinned(bp))
3685 xfs_log_force(mp, 0);
3688 * Flush the provided inode then attempt to gather others from the
3689 * cluster into the write.
3691 * Note: Once we attempt to flush an inode, we must run buffer
3692 * completion callbacks on any failure. If this fails, simulate an I/O
3693 * failure on the buffer and shut down.
3695 error = xfs_iflush_int(ip, bp);
3697 error = xfs_iflush_cluster(ip, bp);
3699 bp->b_flags |= XBF_ASYNC;
3700 xfs_buf_ioend_fail(bp);
3708 xfs_iflush_abort(ip);
3710 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3716 struct xfs_inode *ip,
3719 struct xfs_inode_log_item *iip = ip->i_itemp;
3720 struct xfs_dinode *dip;
3721 struct xfs_mount *mp = ip->i_mount;
3724 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3725 ASSERT(xfs_isiflocked(ip));
3726 ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
3727 ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3728 ASSERT(iip != NULL && iip->ili_fields != 0);
3730 dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
3733 * We don't flush the inode if any of the following checks fail, but we
3734 * do still update the log item and attach to the backing buffer as if
3735 * the flush happened. This is a formality to facilitate predictable
3736 * error handling as the caller will shutdown and fail the buffer.
3738 error = -EFSCORRUPTED;
3739 if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
3740 mp, XFS_ERRTAG_IFLUSH_1)) {
3741 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3742 "%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT,
3743 __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3746 if (S_ISREG(VFS_I(ip)->i_mode)) {
3748 ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3749 ip->i_df.if_format != XFS_DINODE_FMT_BTREE,
3750 mp, XFS_ERRTAG_IFLUSH_3)) {
3751 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3752 "%s: Bad regular inode %Lu, ptr "PTR_FMT,
3753 __func__, ip->i_ino, ip);
3756 } else if (S_ISDIR(VFS_I(ip)->i_mode)) {
3758 ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3759 ip->i_df.if_format != XFS_DINODE_FMT_BTREE &&
3760 ip->i_df.if_format != XFS_DINODE_FMT_LOCAL,
3761 mp, XFS_ERRTAG_IFLUSH_4)) {
3762 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3763 "%s: Bad directory inode %Lu, ptr "PTR_FMT,
3764 __func__, ip->i_ino, ip);
3768 if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp) >
3769 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
3770 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3771 "%s: detected corrupt incore inode %Lu, "
3772 "total extents = %d, nblocks = %Ld, ptr "PTR_FMT,
3773 __func__, ip->i_ino,
3774 ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp),
3775 ip->i_d.di_nblocks, ip);
3778 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
3779 mp, XFS_ERRTAG_IFLUSH_6)) {
3780 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3781 "%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT,
3782 __func__, ip->i_ino, ip->i_d.di_forkoff, ip);
3787 * Inode item log recovery for v2 inodes are dependent on the
3788 * di_flushiter count for correct sequencing. We bump the flush
3789 * iteration count so we can detect flushes which postdate a log record
3790 * during recovery. This is redundant as we now log every change and
3791 * hence this can't happen but we need to still do it to ensure
3792 * backwards compatibility with old kernels that predate logging all
3795 if (!xfs_sb_version_has_v3inode(&mp->m_sb))
3796 ip->i_d.di_flushiter++;
3799 * If there are inline format data / attr forks attached to this inode,
3800 * make sure they are not corrupt.
3802 if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
3803 xfs_ifork_verify_local_data(ip))
3805 if (ip->i_afp && ip->i_afp->if_format == XFS_DINODE_FMT_LOCAL &&
3806 xfs_ifork_verify_local_attr(ip))
3810 * Copy the dirty parts of the inode into the on-disk inode. We always
3811 * copy out the core of the inode, because if the inode is dirty at all
3814 xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
3816 /* Wrap, we never let the log put out DI_MAX_FLUSH */
3817 if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3818 ip->i_d.di_flushiter = 0;
3820 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3821 if (XFS_IFORK_Q(ip))
3822 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3823 xfs_inobp_check(mp, bp);
3826 * We've recorded everything logged in the inode, so we'd like to clear
3827 * the ili_fields bits so we don't log and flush things unnecessarily.
3828 * However, we can't stop logging all this information until the data
3829 * we've copied into the disk buffer is written to disk. If we did we
3830 * might overwrite the copy of the inode in the log with all the data
3831 * after re-logging only part of it, and in the face of a crash we
3832 * wouldn't have all the data we need to recover.
3834 * What we do is move the bits to the ili_last_fields field. When
3835 * logging the inode, these bits are moved back to the ili_fields field.
3836 * In the xfs_iflush_done() routine we clear ili_last_fields, since we
3837 * know that the information those bits represent is permanently on
3838 * disk. As long as the flush completes before the inode is logged
3839 * again, then both ili_fields and ili_last_fields will be cleared.
3841 * We can play with the ili_fields bits here, because the inode lock
3842 * must be held exclusively in order to set bits there and the flush
3843 * lock protects the ili_last_fields bits. Set ili_logged so the flush
3844 * done routine can tell whether or not to look in the AIL. Also, store
3845 * the current LSN of the inode so that we can tell whether the item has
3846 * moved in the AIL from xfs_iflush_done(). In order to read the lsn we
3847 * need the AIL lock, because it is a 64 bit value that cannot be read
3852 iip->ili_last_fields = iip->ili_fields;
3853 iip->ili_fields = 0;
3854 iip->ili_fsync_fields = 0;
3855 iip->ili_logged = 1;
3857 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3858 &iip->ili_item.li_lsn);
3861 * Attach the inode item callback to the buffer whether the flush
3862 * succeeded or not. If not, the caller will shut down and fail I/O
3863 * completion on the buffer to remove the inode from the AIL and release
3866 xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
3868 /* generate the checksum. */
3869 xfs_dinode_calc_crc(mp, dip);
3871 ASSERT(!list_empty(&bp->b_li_list));
3872 ASSERT(bp->b_iodone != NULL);
3876 /* Release an inode. */
3879 struct xfs_inode *ip)
3881 trace_xfs_irele(ip, _RET_IP_);
3886 * Ensure all commited transactions touching the inode are written to the log.
3889 xfs_log_force_inode(
3890 struct xfs_inode *ip)
3894 xfs_ilock(ip, XFS_ILOCK_SHARED);
3895 if (xfs_ipincount(ip))
3896 lsn = ip->i_itemp->ili_last_lsn;
3897 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3901 return xfs_log_force_lsn(ip->i_mount, lsn, XFS_LOG_SYNC, NULL);
3905 * Grab the exclusive iolock for a data copy from src to dest, making sure to
3906 * abide vfs locking order (lowest pointer value goes first) and breaking the
3907 * layout leases before proceeding. The loop is needed because we cannot call
3908 * the blocking break_layout() with the iolocks held, and therefore have to
3909 * back out both locks.
3912 xfs_iolock_two_inodes_and_break_layout(
3922 /* Wait to break both inodes' layouts before we start locking. */
3923 error = break_layout(src, true);
3927 error = break_layout(dest, true);
3932 /* Lock one inode and make sure nobody got in and leased it. */
3934 error = break_layout(src, false);
3937 if (error == -EWOULDBLOCK)
3945 /* Lock the other inode and make sure nobody got in and leased it. */
3946 inode_lock_nested(dest, I_MUTEX_NONDIR2);
3947 error = break_layout(dest, false);
3951 if (error == -EWOULDBLOCK)
3960 * Lock two inodes so that userspace cannot initiate I/O via file syscalls or
3965 struct xfs_inode *ip1,
3966 struct xfs_inode *ip2)
3970 ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
3974 xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
3976 xfs_lock_two_inodes(ip1, XFS_MMAPLOCK_EXCL,
3977 ip2, XFS_MMAPLOCK_EXCL);
3981 /* Unlock both inodes to allow IO and mmap activity. */
3983 xfs_iunlock2_io_mmap(
3984 struct xfs_inode *ip1,
3985 struct xfs_inode *ip2)
3987 bool same_inode = (ip1 == ip2);
3989 xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3991 xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3992 inode_unlock(VFS_I(ip2));
3994 inode_unlock(VFS_I(ip1));