1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trans.h"
15 #include "xfs_inode_item.h"
17 #include "xfs_bmap_util.h"
19 #include "xfs_dir2_priv.h"
20 #include "xfs_ioctl.h"
21 #include "xfs_trace.h"
23 #include "xfs_icache.h"
25 #include "xfs_iomap.h"
26 #include "xfs_reflink.h"
28 #include <linux/falloc.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mman.h>
31 #include <linux/fadvise.h>
32 #include <linux/mount.h>
34 static const struct vm_operations_struct xfs_file_vm_ops;
37 * Decide if the given file range is aligned to the size of the fundamental
38 * allocation unit for the file.
41 xfs_is_falloc_aligned(
46 struct xfs_mount *mp = ip->i_mount;
49 if (XFS_IS_REALTIME_INODE(ip)) {
50 if (!is_power_of_2(mp->m_sb.sb_rextsize)) {
54 rextbytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize);
55 div_u64_rem(pos, rextbytes, &mod);
58 div_u64_rem(len, rextbytes, &mod);
61 mask = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize) - 1;
63 mask = mp->m_sb.sb_blocksize - 1;
66 return !((pos | len) & mask);
70 xfs_update_prealloc_flags(
72 enum xfs_prealloc_flags flags)
77 error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
82 xfs_ilock(ip, XFS_ILOCK_EXCL);
83 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
85 if (!(flags & XFS_PREALLOC_INVISIBLE)) {
86 VFS_I(ip)->i_mode &= ~S_ISUID;
87 if (VFS_I(ip)->i_mode & S_IXGRP)
88 VFS_I(ip)->i_mode &= ~S_ISGID;
89 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
92 if (flags & XFS_PREALLOC_SET)
93 ip->i_diflags |= XFS_DIFLAG_PREALLOC;
94 if (flags & XFS_PREALLOC_CLEAR)
95 ip->i_diflags &= ~XFS_DIFLAG_PREALLOC;
97 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
98 if (flags & XFS_PREALLOC_SYNC)
99 xfs_trans_set_sync(tp);
100 return xfs_trans_commit(tp);
104 * Fsync operations on directories are much simpler than on regular files,
105 * as there is no file data to flush, and thus also no need for explicit
106 * cache flush operations, and there are no non-transaction metadata updates
107 * on directories either.
116 struct xfs_inode *ip = XFS_I(file->f_mapping->host);
118 trace_xfs_dir_fsync(ip);
119 return xfs_log_force_inode(ip);
124 struct xfs_inode *ip,
127 if (!xfs_ipincount(ip))
129 if (datasync && !(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
131 return ip->i_itemp->ili_last_lsn;
135 * All metadata updates are logged, which means that we just have to flush the
136 * log up to the latest LSN that touched the inode.
138 * If we have concurrent fsync/fdatasync() calls, we need them to all block on
139 * the log force before we clear the ili_fsync_fields field. This ensures that
140 * we don't get a racing sync operation that does not wait for the metadata to
141 * hit the journal before returning. If we race with clearing ili_fsync_fields,
142 * then all that will happen is the log force will do nothing as the lsn will
143 * already be on disk. We can't race with setting ili_fsync_fields because that
144 * is done under XFS_ILOCK_EXCL, and that can't happen because we hold the lock
145 * shared until after the ili_fsync_fields is cleared.
149 struct xfs_inode *ip,
156 xfs_ilock(ip, XFS_ILOCK_SHARED);
157 lsn = xfs_fsync_lsn(ip, datasync);
159 error = xfs_log_force_lsn(ip->i_mount, lsn, XFS_LOG_SYNC,
162 spin_lock(&ip->i_itemp->ili_lock);
163 ip->i_itemp->ili_fsync_fields = 0;
164 spin_unlock(&ip->i_itemp->ili_lock);
166 xfs_iunlock(ip, XFS_ILOCK_SHARED);
177 struct xfs_inode *ip = XFS_I(file->f_mapping->host);
178 struct xfs_mount *mp = ip->i_mount;
182 trace_xfs_file_fsync(ip);
184 error = file_write_and_wait_range(file, start, end);
188 if (XFS_FORCED_SHUTDOWN(mp))
191 xfs_iflags_clear(ip, XFS_ITRUNCATED);
194 * If we have an RT and/or log subvolume we need to make sure to flush
195 * the write cache the device used for file data first. This is to
196 * ensure newly written file data make it to disk before logging the new
197 * inode size in case of an extending write.
199 if (XFS_IS_REALTIME_INODE(ip))
200 xfs_blkdev_issue_flush(mp->m_rtdev_targp);
201 else if (mp->m_logdev_targp != mp->m_ddev_targp)
202 xfs_blkdev_issue_flush(mp->m_ddev_targp);
205 * Any inode that has dirty modifications in the log is pinned. The
206 * racy check here for a pinned inode while not catch modifications
207 * that happen concurrently to the fsync call, but fsync semantics
208 * only require to sync previously completed I/O.
210 if (xfs_ipincount(ip))
211 error = xfs_fsync_flush_log(ip, datasync, &log_flushed);
214 * If we only have a single device, and the log force about was
215 * a no-op we might have to flush the data device cache here.
216 * This can only happen for fdatasync/O_DSYNC if we were overwriting
217 * an already allocated file and thus do not have any metadata to
220 if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
221 mp->m_logdev_targp == mp->m_ddev_targp)
222 xfs_blkdev_issue_flush(mp->m_ddev_targp);
230 unsigned int lock_mode)
232 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
234 if (iocb->ki_flags & IOCB_NOWAIT) {
235 if (!xfs_ilock_nowait(ip, lock_mode))
238 xfs_ilock(ip, lock_mode);
249 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
252 trace_xfs_file_direct_read(iocb, to);
254 if (!iov_iter_count(to))
255 return 0; /* skip atime */
257 file_accessed(iocb->ki_filp);
259 ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
262 ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL, 0);
263 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
268 static noinline ssize_t
273 struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host);
276 trace_xfs_file_dax_read(iocb, to);
278 if (!iov_iter_count(to))
279 return 0; /* skip atime */
281 ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
284 ret = dax_iomap_rw(iocb, to, &xfs_read_iomap_ops);
285 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
287 file_accessed(iocb->ki_filp);
292 xfs_file_buffered_read(
296 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
299 trace_xfs_file_buffered_read(iocb, to);
301 ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
304 ret = generic_file_read_iter(iocb, to);
305 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
315 struct inode *inode = file_inode(iocb->ki_filp);
316 struct xfs_mount *mp = XFS_I(inode)->i_mount;
319 XFS_STATS_INC(mp, xs_read_calls);
321 if (XFS_FORCED_SHUTDOWN(mp))
325 ret = xfs_file_dax_read(iocb, to);
326 else if (iocb->ki_flags & IOCB_DIRECT)
327 ret = xfs_file_dio_read(iocb, to);
329 ret = xfs_file_buffered_read(iocb, to);
332 XFS_STATS_ADD(mp, xs_read_bytes, ret);
337 * Common pre-write limit and setup checks.
339 * Called with the iolocked held either shared and exclusive according to
340 * @iolock, and returns with it held. Might upgrade the iolock to exclusive
341 * if called for a direct write beyond i_size.
344 xfs_file_write_checks(
346 struct iov_iter *from,
349 struct file *file = iocb->ki_filp;
350 struct inode *inode = file->f_mapping->host;
351 struct xfs_inode *ip = XFS_I(inode);
353 size_t count = iov_iter_count(from);
354 bool drained_dio = false;
358 error = generic_write_checks(iocb, from);
362 if (iocb->ki_flags & IOCB_NOWAIT) {
363 error = break_layout(inode, false);
364 if (error == -EWOULDBLOCK)
367 error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
374 * For changing security info in file_remove_privs() we need i_rwsem
377 if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
378 xfs_iunlock(ip, *iolock);
379 *iolock = XFS_IOLOCK_EXCL;
380 error = xfs_ilock_iocb(iocb, *iolock);
388 * If the offset is beyond the size of the file, we need to zero any
389 * blocks that fall between the existing EOF and the start of this
390 * write. If zeroing is needed and we are currently holding the
391 * iolock shared, we need to update it to exclusive which implies
392 * having to redo all checks before.
394 * We need to serialise against EOF updates that occur in IO
395 * completions here. We want to make sure that nobody is changing the
396 * size while we do this check until we have placed an IO barrier (i.e.
397 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
398 * The spinlock effectively forms a memory barrier once we have the
399 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
400 * and hence be able to correctly determine if we need to run zeroing.
402 spin_lock(&ip->i_flags_lock);
403 isize = i_size_read(inode);
404 if (iocb->ki_pos > isize) {
405 spin_unlock(&ip->i_flags_lock);
407 if (iocb->ki_flags & IOCB_NOWAIT)
411 if (*iolock == XFS_IOLOCK_SHARED) {
412 xfs_iunlock(ip, *iolock);
413 *iolock = XFS_IOLOCK_EXCL;
414 xfs_ilock(ip, *iolock);
415 iov_iter_reexpand(from, count);
418 * We now have an IO submission barrier in place, but
419 * AIO can do EOF updates during IO completion and hence
420 * we now need to wait for all of them to drain. Non-AIO
421 * DIO will have drained before we are given the
422 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
425 inode_dio_wait(inode);
430 trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
431 error = iomap_zero_range(inode, isize, iocb->ki_pos - isize,
432 NULL, &xfs_buffered_write_iomap_ops);
436 spin_unlock(&ip->i_flags_lock);
438 return file_modified(file);
442 xfs_dio_write_end_io(
448 struct inode *inode = file_inode(iocb->ki_filp);
449 struct xfs_inode *ip = XFS_I(inode);
450 loff_t offset = iocb->ki_pos;
451 unsigned int nofs_flag;
453 trace_xfs_end_io_direct_write(ip, offset, size);
455 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
464 * Capture amount written on completion as we can't reliably account
465 * for it on submission.
467 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size);
470 * We can allocate memory here while doing writeback on behalf of
471 * memory reclaim. To avoid memory allocation deadlocks set the
472 * task-wide nofs context for the following operations.
474 nofs_flag = memalloc_nofs_save();
476 if (flags & IOMAP_DIO_COW) {
477 error = xfs_reflink_end_cow(ip, offset, size);
483 * Unwritten conversion updates the in-core isize after extent
484 * conversion but before updating the on-disk size. Updating isize any
485 * earlier allows a racing dio read to find unwritten extents before
486 * they are converted.
488 if (flags & IOMAP_DIO_UNWRITTEN) {
489 error = xfs_iomap_write_unwritten(ip, offset, size, true);
494 * We need to update the in-core inode size here so that we don't end up
495 * with the on-disk inode size being outside the in-core inode size. We
496 * have no other method of updating EOF for AIO, so always do it here
499 * We need to lock the test/set EOF update as we can be racing with
500 * other IO completions here to update the EOF. Failing to serialise
501 * here can result in EOF moving backwards and Bad Things Happen when
504 spin_lock(&ip->i_flags_lock);
505 if (offset + size > i_size_read(inode)) {
506 i_size_write(inode, offset + size);
507 spin_unlock(&ip->i_flags_lock);
508 error = xfs_setfilesize(ip, offset, size);
510 spin_unlock(&ip->i_flags_lock);
514 memalloc_nofs_restore(nofs_flag);
518 static const struct iomap_dio_ops xfs_dio_write_ops = {
519 .end_io = xfs_dio_write_end_io,
523 * Handle block aligned direct I/O writes
525 static noinline ssize_t
526 xfs_file_dio_write_aligned(
527 struct xfs_inode *ip,
529 struct iov_iter *from)
531 int iolock = XFS_IOLOCK_SHARED;
534 ret = xfs_ilock_iocb(iocb, iolock);
537 ret = xfs_file_write_checks(iocb, from, &iolock);
542 * We don't need to hold the IOLOCK exclusively across the IO, so demote
543 * the iolock back to shared if we had to take the exclusive lock in
544 * xfs_file_write_checks() for other reasons.
546 if (iolock == XFS_IOLOCK_EXCL) {
547 xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
548 iolock = XFS_IOLOCK_SHARED;
550 trace_xfs_file_direct_write(iocb, from);
551 ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
552 &xfs_dio_write_ops, 0);
555 xfs_iunlock(ip, iolock);
560 * Handle block unaligned direct I/O writes
562 * In most cases direct I/O writes will be done holding IOLOCK_SHARED, allowing
563 * them to be done in parallel with reads and other direct I/O writes. However,
564 * if the I/O is not aligned to filesystem blocks, the direct I/O layer may need
565 * to do sub-block zeroing and that requires serialisation against other direct
566 * I/O to the same block. In this case we need to serialise the submission of
567 * the unaligned I/O so that we don't get racing block zeroing in the dio layer.
568 * In the case where sub-block zeroing is not required, we can do concurrent
569 * sub-block dios to the same block successfully.
571 * Optimistically submit the I/O using the shared lock first, but use the
572 * IOMAP_DIO_OVERWRITE_ONLY flag to tell the lower layers to return -EAGAIN
573 * if block allocation or partial block zeroing would be required. In that case
574 * we try again with the exclusive lock.
576 static noinline ssize_t
577 xfs_file_dio_write_unaligned(
578 struct xfs_inode *ip,
580 struct iov_iter *from)
582 size_t isize = i_size_read(VFS_I(ip));
583 size_t count = iov_iter_count(from);
584 int iolock = XFS_IOLOCK_SHARED;
585 unsigned int flags = IOMAP_DIO_OVERWRITE_ONLY;
589 * Extending writes need exclusivity because of the sub-block zeroing
590 * that the DIO code always does for partial tail blocks beyond EOF, so
591 * don't even bother trying the fast path in this case.
593 if (iocb->ki_pos > isize || iocb->ki_pos + count >= isize) {
595 if (iocb->ki_flags & IOCB_NOWAIT)
597 iolock = XFS_IOLOCK_EXCL;
598 flags = IOMAP_DIO_FORCE_WAIT;
601 ret = xfs_ilock_iocb(iocb, iolock);
606 * We can't properly handle unaligned direct I/O to reflink files yet,
607 * as we can't unshare a partial block.
609 if (xfs_is_cow_inode(ip)) {
610 trace_xfs_reflink_bounce_dio_write(iocb, from);
615 ret = xfs_file_write_checks(iocb, from, &iolock);
620 * If we are doing exclusive unaligned I/O, this must be the only I/O
621 * in-flight. Otherwise we risk data corruption due to unwritten extent
622 * conversions from the AIO end_io handler. Wait for all other I/O to
625 if (flags & IOMAP_DIO_FORCE_WAIT)
626 inode_dio_wait(VFS_I(ip));
628 trace_xfs_file_direct_write(iocb, from);
629 ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
630 &xfs_dio_write_ops, flags);
633 * Retry unaligned I/O with exclusive blocking semantics if the DIO
634 * layer rejected it for mapping or locking reasons. If we are doing
635 * nonblocking user I/O, propagate the error.
637 if (ret == -EAGAIN && !(iocb->ki_flags & IOCB_NOWAIT)) {
638 ASSERT(flags & IOMAP_DIO_OVERWRITE_ONLY);
639 xfs_iunlock(ip, iolock);
640 goto retry_exclusive;
645 xfs_iunlock(ip, iolock);
652 struct iov_iter *from)
654 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
655 struct xfs_buftarg *target = xfs_inode_buftarg(ip);
656 size_t count = iov_iter_count(from);
658 /* direct I/O must be aligned to device logical sector size */
659 if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
661 if ((iocb->ki_pos | count) & ip->i_mount->m_blockmask)
662 return xfs_file_dio_write_unaligned(ip, iocb, from);
663 return xfs_file_dio_write_aligned(ip, iocb, from);
666 static noinline ssize_t
669 struct iov_iter *from)
671 struct inode *inode = iocb->ki_filp->f_mapping->host;
672 struct xfs_inode *ip = XFS_I(inode);
673 int iolock = XFS_IOLOCK_EXCL;
674 ssize_t ret, error = 0;
677 ret = xfs_ilock_iocb(iocb, iolock);
680 ret = xfs_file_write_checks(iocb, from, &iolock);
686 trace_xfs_file_dax_write(iocb, from);
687 ret = dax_iomap_rw(iocb, from, &xfs_direct_write_iomap_ops);
688 if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
689 i_size_write(inode, iocb->ki_pos);
690 error = xfs_setfilesize(ip, pos, ret);
694 xfs_iunlock(ip, iolock);
699 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
701 /* Handle various SYNC-type writes */
702 ret = generic_write_sync(iocb, ret);
708 xfs_file_buffered_write(
710 struct iov_iter *from)
712 struct file *file = iocb->ki_filp;
713 struct address_space *mapping = file->f_mapping;
714 struct inode *inode = mapping->host;
715 struct xfs_inode *ip = XFS_I(inode);
717 bool cleared_space = false;
720 if (iocb->ki_flags & IOCB_NOWAIT)
724 iolock = XFS_IOLOCK_EXCL;
725 xfs_ilock(ip, iolock);
727 ret = xfs_file_write_checks(iocb, from, &iolock);
731 /* We can write back this queue in page reclaim */
732 current->backing_dev_info = inode_to_bdi(inode);
734 trace_xfs_file_buffered_write(iocb, from);
735 ret = iomap_file_buffered_write(iocb, from,
736 &xfs_buffered_write_iomap_ops);
737 if (likely(ret >= 0))
741 * If we hit a space limit, try to free up some lingering preallocated
742 * space before returning an error. In the case of ENOSPC, first try to
743 * write back all dirty inodes to free up some of the excess reserved
744 * metadata space. This reduces the chances that the eofblocks scan
745 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
746 * also behaves as a filter to prevent too many eofblocks scans from
747 * running at the same time. Use a synchronous scan to increase the
748 * effectiveness of the scan.
750 if (ret == -EDQUOT && !cleared_space) {
751 xfs_iunlock(ip, iolock);
752 xfs_blockgc_free_quota(ip, XFS_EOF_FLAGS_SYNC);
753 cleared_space = true;
755 } else if (ret == -ENOSPC && !cleared_space) {
756 struct xfs_eofblocks eofb = {0};
758 cleared_space = true;
759 xfs_flush_inodes(ip->i_mount);
761 xfs_iunlock(ip, iolock);
762 eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
763 xfs_blockgc_free_space(ip->i_mount, &eofb);
767 current->backing_dev_info = NULL;
770 xfs_iunlock(ip, iolock);
773 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
774 /* Handle various SYNC-type writes */
775 ret = generic_write_sync(iocb, ret);
783 struct iov_iter *from)
785 struct file *file = iocb->ki_filp;
786 struct address_space *mapping = file->f_mapping;
787 struct inode *inode = mapping->host;
788 struct xfs_inode *ip = XFS_I(inode);
790 size_t ocount = iov_iter_count(from);
792 XFS_STATS_INC(ip->i_mount, xs_write_calls);
797 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
801 return xfs_file_dax_write(iocb, from);
803 if (iocb->ki_flags & IOCB_DIRECT) {
805 * Allow a directio write to fall back to a buffered
806 * write *only* in the case that we're doing a reflink
807 * CoW. In all other directio scenarios we do not
808 * allow an operation to fall back to buffered mode.
810 ret = xfs_file_dio_write(iocb, from);
815 return xfs_file_buffered_write(iocb, from);
822 struct xfs_inode *ip = XFS_I(inode);
824 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
826 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
830 xfs_break_dax_layouts(
836 ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL));
838 page = dax_layout_busy_page(inode->i_mapping);
843 return ___wait_var_event(&page->_refcount,
844 atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
845 0, 0, xfs_wait_dax_page(inode));
852 enum layout_break_reason reason)
857 ASSERT(xfs_isilocked(XFS_I(inode), XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));
863 error = xfs_break_dax_layouts(inode, &retry);
868 error = xfs_break_leased_layouts(inode, iolock, &retry);
874 } while (error == 0 && retry);
879 #define XFS_FALLOC_FL_SUPPORTED \
880 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
881 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \
882 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
891 struct inode *inode = file_inode(file);
892 struct xfs_inode *ip = XFS_I(inode);
894 enum xfs_prealloc_flags flags = 0;
895 uint iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
897 bool do_file_insert = false;
899 if (!S_ISREG(inode->i_mode))
901 if (mode & ~XFS_FALLOC_FL_SUPPORTED)
904 xfs_ilock(ip, iolock);
905 error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
910 * Must wait for all AIO to complete before we continue as AIO can
911 * change the file size on completion without holding any locks we
912 * currently hold. We must do this first because AIO can update both
913 * the on disk and in memory inode sizes, and the operations that follow
914 * require the in-memory size to be fully up-to-date.
916 inode_dio_wait(inode);
919 * Now AIO and DIO has drained we flush and (if necessary) invalidate
920 * the cached range over the first operation we are about to run.
922 * We care about zero and collapse here because they both run a hole
923 * punch over the range first. Because that can zero data, and the range
924 * of invalidation for the shift operations is much larger, we still do
925 * the required flush for collapse in xfs_prepare_shift().
927 * Insert has the same range requirements as collapse, and we extend the
928 * file first which can zero data. Hence insert has the same
929 * flush/invalidate requirements as collapse and so they are both
930 * handled at the right time by xfs_prepare_shift().
932 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE |
933 FALLOC_FL_COLLAPSE_RANGE)) {
934 error = xfs_flush_unmap_range(ip, offset, len);
939 if (mode & FALLOC_FL_PUNCH_HOLE) {
940 error = xfs_free_file_space(ip, offset, len);
943 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
944 if (!xfs_is_falloc_aligned(ip, offset, len)) {
950 * There is no need to overlap collapse range with EOF,
951 * in which case it is effectively a truncate operation
953 if (offset + len >= i_size_read(inode)) {
958 new_size = i_size_read(inode) - len;
960 error = xfs_collapse_file_space(ip, offset, len);
963 } else if (mode & FALLOC_FL_INSERT_RANGE) {
964 loff_t isize = i_size_read(inode);
966 if (!xfs_is_falloc_aligned(ip, offset, len)) {
972 * New inode size must not exceed ->s_maxbytes, accounting for
973 * possible signed overflow.
975 if (inode->i_sb->s_maxbytes - isize < len) {
979 new_size = isize + len;
981 /* Offset should be less than i_size */
982 if (offset >= isize) {
986 do_file_insert = true;
988 flags |= XFS_PREALLOC_SET;
990 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
991 offset + len > i_size_read(inode)) {
992 new_size = offset + len;
993 error = inode_newsize_ok(inode, new_size);
998 if (mode & FALLOC_FL_ZERO_RANGE) {
1000 * Punch a hole and prealloc the range. We use a hole
1001 * punch rather than unwritten extent conversion for two
1004 * 1.) Hole punch handles partial block zeroing for us.
1005 * 2.) If prealloc returns ENOSPC, the file range is
1006 * still zero-valued by virtue of the hole punch.
1008 unsigned int blksize = i_blocksize(inode);
1010 trace_xfs_zero_file_space(ip);
1012 error = xfs_free_file_space(ip, offset, len);
1016 len = round_up(offset + len, blksize) -
1017 round_down(offset, blksize);
1018 offset = round_down(offset, blksize);
1019 } else if (mode & FALLOC_FL_UNSHARE_RANGE) {
1020 error = xfs_reflink_unshare(ip, offset, len);
1025 * If always_cow mode we can't use preallocations and
1026 * thus should not create them.
1028 if (xfs_is_always_cow_inode(ip)) {
1029 error = -EOPNOTSUPP;
1034 if (!xfs_is_always_cow_inode(ip)) {
1035 error = xfs_alloc_file_space(ip, offset, len,
1036 XFS_BMAPI_PREALLOC);
1042 if (file->f_flags & O_DSYNC)
1043 flags |= XFS_PREALLOC_SYNC;
1045 error = xfs_update_prealloc_flags(ip, flags);
1049 /* Change file size if needed */
1053 iattr.ia_valid = ATTR_SIZE;
1054 iattr.ia_size = new_size;
1055 error = xfs_vn_setattr_size(file_mnt_user_ns(file),
1056 file_dentry(file), &iattr);
1062 * Perform hole insertion now that the file size has been
1063 * updated so that if we crash during the operation we don't
1064 * leave shifted extents past EOF and hence losing access to
1065 * the data that is contained within them.
1068 error = xfs_insert_file_space(ip, offset, len);
1071 xfs_iunlock(ip, iolock);
1082 struct xfs_inode *ip = XFS_I(file_inode(file));
1087 * Operations creating pages in page cache need protection from hole
1088 * punching and similar ops
1090 if (advice == POSIX_FADV_WILLNEED) {
1091 lockflags = XFS_IOLOCK_SHARED;
1092 xfs_ilock(ip, lockflags);
1094 ret = generic_fadvise(file, start, end, advice);
1096 xfs_iunlock(ip, lockflags);
1100 /* Does this file, inode, or mount want synchronous writes? */
1101 static inline bool xfs_file_sync_writes(struct file *filp)
1103 struct xfs_inode *ip = XFS_I(file_inode(filp));
1105 if (ip->i_mount->m_flags & XFS_MOUNT_WSYNC)
1107 if (filp->f_flags & (__O_SYNC | O_DSYNC))
1109 if (IS_SYNC(file_inode(filp)))
1116 xfs_file_remap_range(
1117 struct file *file_in,
1119 struct file *file_out,
1122 unsigned int remap_flags)
1124 struct inode *inode_in = file_inode(file_in);
1125 struct xfs_inode *src = XFS_I(inode_in);
1126 struct inode *inode_out = file_inode(file_out);
1127 struct xfs_inode *dest = XFS_I(inode_out);
1128 struct xfs_mount *mp = src->i_mount;
1129 loff_t remapped = 0;
1130 xfs_extlen_t cowextsize;
1133 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
1136 if (!xfs_sb_version_hasreflink(&mp->m_sb))
1139 if (XFS_FORCED_SHUTDOWN(mp))
1142 /* Prepare and then clone file data. */
1143 ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
1145 if (ret || len == 0)
1148 trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
1150 ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len,
1156 * Carry the cowextsize hint from src to dest if we're sharing the
1157 * entire source file to the entire destination file, the source file
1158 * has a cowextsize hint, and the destination file does not.
1161 if (pos_in == 0 && len == i_size_read(inode_in) &&
1162 (src->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) &&
1163 pos_out == 0 && len >= i_size_read(inode_out) &&
1164 !(dest->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE))
1165 cowextsize = src->i_cowextsize;
1167 ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
1172 if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out))
1173 xfs_log_force_inode(dest);
1175 xfs_iunlock2_io_mmap(src, dest);
1177 trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
1178 return remapped > 0 ? remapped : ret;
1183 struct inode *inode,
1186 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
1188 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
1190 file->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
1196 struct inode *inode,
1199 struct xfs_inode *ip = XFS_I(inode);
1203 error = xfs_file_open(inode, file);
1208 * If there are any blocks, read-ahead block 0 as we're almost
1209 * certain to have the next operation be a read there.
1211 mode = xfs_ilock_data_map_shared(ip);
1212 if (ip->i_df.if_nextents > 0)
1213 error = xfs_dir3_data_readahead(ip, 0, 0);
1214 xfs_iunlock(ip, mode);
1220 struct inode *inode,
1223 return xfs_release(XFS_I(inode));
1229 struct dir_context *ctx)
1231 struct inode *inode = file_inode(file);
1232 xfs_inode_t *ip = XFS_I(inode);
1236 * The Linux API doesn't pass down the total size of the buffer
1237 * we read into down to the filesystem. With the filldir concept
1238 * it's not needed for correct information, but the XFS dir2 leaf
1239 * code wants an estimate of the buffer size to calculate it's
1240 * readahead window and size the buffers used for mapping to
1243 * Try to give it an estimate that's good enough, maybe at some
1244 * point we can change the ->readdir prototype to include the
1245 * buffer size. For now we use the current glibc buffer size.
1247 bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_disk_size);
1249 return xfs_readdir(NULL, ip, ctx, bufsize);
1258 struct inode *inode = file->f_mapping->host;
1260 if (XFS_FORCED_SHUTDOWN(XFS_I(inode)->i_mount))
1265 return generic_file_llseek(file, offset, whence);
1267 offset = iomap_seek_hole(inode, offset, &xfs_seek_iomap_ops);
1270 offset = iomap_seek_data(inode, offset, &xfs_seek_iomap_ops);
1276 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1280 * Locking for serialisation of IO during page faults. This results in a lock
1284 * sb_start_pagefault(vfs, freeze)
1285 * i_mmaplock (XFS - truncate serialisation)
1287 * i_lock (XFS - extent map serialisation)
1290 __xfs_filemap_fault(
1291 struct vm_fault *vmf,
1292 enum page_entry_size pe_size,
1295 struct inode *inode = file_inode(vmf->vma->vm_file);
1296 struct xfs_inode *ip = XFS_I(inode);
1299 trace_xfs_filemap_fault(ip, pe_size, write_fault);
1302 sb_start_pagefault(inode->i_sb);
1303 file_update_time(vmf->vma->vm_file);
1306 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1307 if (IS_DAX(inode)) {
1310 ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL,
1311 (write_fault && !vmf->cow_page) ?
1312 &xfs_direct_write_iomap_ops :
1313 &xfs_read_iomap_ops);
1314 if (ret & VM_FAULT_NEEDDSYNC)
1315 ret = dax_finish_sync_fault(vmf, pe_size, pfn);
1318 ret = iomap_page_mkwrite(vmf,
1319 &xfs_buffered_write_iomap_ops);
1321 ret = filemap_fault(vmf);
1323 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1326 sb_end_pagefault(inode->i_sb);
1332 struct vm_fault *vmf)
1334 return (vmf->flags & FAULT_FLAG_WRITE) &&
1335 (vmf->vma->vm_flags & VM_SHARED);
1340 struct vm_fault *vmf)
1342 /* DAX can shortcut the normal fault path on write faults! */
1343 return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
1344 IS_DAX(file_inode(vmf->vma->vm_file)) &&
1345 xfs_is_write_fault(vmf));
1349 xfs_filemap_huge_fault(
1350 struct vm_fault *vmf,
1351 enum page_entry_size pe_size)
1353 if (!IS_DAX(file_inode(vmf->vma->vm_file)))
1354 return VM_FAULT_FALLBACK;
1356 /* DAX can shortcut the normal fault path on write faults! */
1357 return __xfs_filemap_fault(vmf, pe_size,
1358 xfs_is_write_fault(vmf));
1362 xfs_filemap_page_mkwrite(
1363 struct vm_fault *vmf)
1365 return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1369 * pfn_mkwrite was originally intended to ensure we capture time stamp updates
1370 * on write faults. In reality, it needs to serialise against truncate and
1371 * prepare memory for writing so handle is as standard write fault.
1374 xfs_filemap_pfn_mkwrite(
1375 struct vm_fault *vmf)
1378 return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1382 xfs_filemap_map_pages(
1383 struct vm_fault *vmf,
1384 pgoff_t start_pgoff,
1387 struct inode *inode = file_inode(vmf->vma->vm_file);
1390 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1391 ret = filemap_map_pages(vmf, start_pgoff, end_pgoff);
1392 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1396 static const struct vm_operations_struct xfs_file_vm_ops = {
1397 .fault = xfs_filemap_fault,
1398 .huge_fault = xfs_filemap_huge_fault,
1399 .map_pages = xfs_filemap_map_pages,
1400 .page_mkwrite = xfs_filemap_page_mkwrite,
1401 .pfn_mkwrite = xfs_filemap_pfn_mkwrite,
1407 struct vm_area_struct *vma)
1409 struct inode *inode = file_inode(file);
1410 struct xfs_buftarg *target = xfs_inode_buftarg(XFS_I(inode));
1413 * We don't support synchronous mappings for non-DAX files and
1414 * for DAX files if underneath dax_device is not synchronous.
1416 if (!daxdev_mapping_supported(vma, target->bt_daxdev))
1419 file_accessed(file);
1420 vma->vm_ops = &xfs_file_vm_ops;
1422 vma->vm_flags |= VM_HUGEPAGE;
1426 const struct file_operations xfs_file_operations = {
1427 .llseek = xfs_file_llseek,
1428 .read_iter = xfs_file_read_iter,
1429 .write_iter = xfs_file_write_iter,
1430 .splice_read = generic_file_splice_read,
1431 .splice_write = iter_file_splice_write,
1432 .iopoll = iomap_dio_iopoll,
1433 .unlocked_ioctl = xfs_file_ioctl,
1434 #ifdef CONFIG_COMPAT
1435 .compat_ioctl = xfs_file_compat_ioctl,
1437 .mmap = xfs_file_mmap,
1438 .mmap_supported_flags = MAP_SYNC,
1439 .open = xfs_file_open,
1440 .release = xfs_file_release,
1441 .fsync = xfs_file_fsync,
1442 .get_unmapped_area = thp_get_unmapped_area,
1443 .fallocate = xfs_file_fallocate,
1444 .fadvise = xfs_file_fadvise,
1445 .remap_file_range = xfs_file_remap_range,
1448 const struct file_operations xfs_dir_file_operations = {
1449 .open = xfs_dir_open,
1450 .read = generic_read_dir,
1451 .iterate_shared = xfs_file_readdir,
1452 .llseek = generic_file_llseek,
1453 .unlocked_ioctl = xfs_file_ioctl,
1454 #ifdef CONFIG_COMPAT
1455 .compat_ioctl = xfs_file_compat_ioctl,
1457 .fsync = xfs_dir_fsync,