Merge tag 'xfs-5.20-merge-8' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
[linux-2.6-microblaze.git] / fs / xfs / xfs_file.c
index 8d9b14d..c6c8026 100644 (file)
@@ -25,6 +25,7 @@
 #include "xfs_iomap.h"
 #include "xfs_reflink.h"
 
+#include <linux/dax.h>
 #include <linux/falloc.h>
 #include <linux/backing-dev.h>
 #include <linux/mman.h>
@@ -142,7 +143,7 @@ xfs_file_fsync(
 {
        struct xfs_inode        *ip = XFS_I(file->f_mapping->host);
        struct xfs_mount        *mp = ip->i_mount;
-       int                     error = 0;
+       int                     error, err2;
        int                     log_flushed = 0;
 
        trace_xfs_file_fsync(ip);
@@ -163,18 +164,21 @@ xfs_file_fsync(
         * inode size in case of an extending write.
         */
        if (XFS_IS_REALTIME_INODE(ip))
-               blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
+               error = blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
        else if (mp->m_logdev_targp != mp->m_ddev_targp)
-               blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
+               error = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
 
        /*
         * Any inode that has dirty modifications in the log is pinned.  The
-        * racy check here for a pinned inode while not catch modifications
+        * racy check here for a pinned inode will not catch modifications
         * that happen concurrently to the fsync call, but fsync semantics
         * only require to sync previously completed I/O.
         */
-       if (xfs_ipincount(ip))
-               error = xfs_fsync_flush_log(ip, datasync, &log_flushed);
+       if (xfs_ipincount(ip)) {
+               err2 = xfs_fsync_flush_log(ip, datasync, &log_flushed);
+               if (err2 && !error)
+                       error = err2;
+       }
 
        /*
         * If we only have a single device, and the log force about was
@@ -184,8 +188,11 @@ xfs_file_fsync(
         * commit.
         */
        if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
-           mp->m_logdev_targp == mp->m_ddev_targp)
-               blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
+           mp->m_logdev_targp == mp->m_ddev_targp) {
+               err2 = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
+               if (err2 && !error)
+                       error = err2;
+       }
 
        return error;
 }
@@ -669,7 +676,7 @@ xfs_file_dax_write(
        pos = iocb->ki_pos;
 
        trace_xfs_file_dax_write(iocb, from);
-       ret = dax_iomap_rw(iocb, from, &xfs_direct_write_iomap_ops);
+       ret = dax_iomap_rw(iocb, from, &xfs_dax_write_iomap_ops);
        if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
                i_size_write(inode, iocb->ki_pos);
                error = xfs_setfilesize(ip, pos, ret);
@@ -806,7 +813,7 @@ xfs_wait_dax_page(
        xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
 }
 
-static int
+int
 xfs_break_dax_layouts(
        struct inode            *inode,
        bool                    *retry)
@@ -1253,6 +1260,31 @@ xfs_file_llseek(
        return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
 }
 
+#ifdef CONFIG_FS_DAX
+static int
+xfs_dax_fault(
+       struct vm_fault         *vmf,
+       enum page_entry_size    pe_size,
+       bool                    write_fault,
+       pfn_t                   *pfn)
+{
+       return dax_iomap_fault(vmf, pe_size, pfn, NULL,
+                       (write_fault && !vmf->cow_page) ?
+                               &xfs_dax_write_iomap_ops :
+                               &xfs_read_iomap_ops);
+}
+#else
+static int
+xfs_dax_fault(
+       struct vm_fault         *vmf,
+       enum page_entry_size    pe_size,
+       bool                    write_fault,
+       pfn_t                   *pfn)
+{
+       return 0;
+}
+#endif
+
 /*
  * Locking for serialisation of IO during page faults. This results in a lock
  * ordering of:
@@ -1284,10 +1316,7 @@ __xfs_filemap_fault(
                pfn_t pfn;
 
                xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
-               ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL,
-                               (write_fault && !vmf->cow_page) ?
-                                &xfs_direct_write_iomap_ops :
-                                &xfs_read_iomap_ops);
+               ret = xfs_dax_fault(vmf, pe_size, write_fault, &pfn);
                if (ret & VM_FAULT_NEEDDSYNC)
                        ret = dax_finish_sync_fault(vmf, pe_size, pfn);
                xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);