Merge tag 'xfs-5.20-merge-8' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
[linux-2.6-microblaze.git] / fs / xfs / xfs_file.c
index b3d1392..c6c8026 100644 (file)
@@ -25,6 +25,7 @@
 #include "xfs_iomap.h"
 #include "xfs_reflink.h"
 
+#include <linux/dax.h>
 #include <linux/falloc.h>
 #include <linux/backing-dev.h>
 #include <linux/mman.h>
@@ -416,7 +417,7 @@ restart:
                spin_unlock(&ip->i_flags_lock);
 
 out:
-       return file_modified(file);
+       return kiocb_modified(iocb);
 }
 
 static int
@@ -675,7 +676,7 @@ xfs_file_dax_write(
        pos = iocb->ki_pos;
 
        trace_xfs_file_dax_write(iocb, from);
-       ret = dax_iomap_rw(iocb, from, &xfs_direct_write_iomap_ops);
+       ret = dax_iomap_rw(iocb, from, &xfs_dax_write_iomap_ops);
        if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
                i_size_write(inode, iocb->ki_pos);
                error = xfs_setfilesize(ip, pos, ret);
@@ -706,12 +707,11 @@ xfs_file_buffered_write(
        bool                    cleared_space = false;
        unsigned int            iolock;
 
-       if (iocb->ki_flags & IOCB_NOWAIT)
-               return -EOPNOTSUPP;
-
 write_retry:
        iolock = XFS_IOLOCK_EXCL;
-       xfs_ilock(ip, iolock);
+       ret = xfs_ilock_iocb(iocb, iolock);
+       if (ret)
+               return ret;
 
        ret = xfs_file_write_checks(iocb, from, &iolock);
        if (ret)
@@ -813,7 +813,7 @@ xfs_wait_dax_page(
        xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
 }
 
-static int
+int
 xfs_break_dax_layouts(
        struct inode            *inode,
        bool                    *retry)
@@ -1171,7 +1171,7 @@ xfs_file_open(
 {
        if (xfs_is_shutdown(XFS_M(inode->i_sb)))
                return -EIO;
-       file->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
+       file->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC | FMODE_BUF_WASYNC;
        return generic_file_open(inode, file);
 }
 
@@ -1260,6 +1260,31 @@ xfs_file_llseek(
        return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
 }
 
+#ifdef CONFIG_FS_DAX
+static int
+xfs_dax_fault(
+       struct vm_fault         *vmf,
+       enum page_entry_size    pe_size,
+       bool                    write_fault,
+       pfn_t                   *pfn)
+{
+       return dax_iomap_fault(vmf, pe_size, pfn, NULL,
+                       (write_fault && !vmf->cow_page) ?
+                               &xfs_dax_write_iomap_ops :
+                               &xfs_read_iomap_ops);
+}
+#else
+static int
+xfs_dax_fault(
+       struct vm_fault         *vmf,
+       enum page_entry_size    pe_size,
+       bool                    write_fault,
+       pfn_t                   *pfn)
+{
+       return 0;
+}
+#endif
+
 /*
  * Locking for serialisation of IO during page faults. This results in a lock
  * ordering of:
@@ -1291,10 +1316,7 @@ __xfs_filemap_fault(
                pfn_t pfn;
 
                xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
-               ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL,
-                               (write_fault && !vmf->cow_page) ?
-                                &xfs_direct_write_iomap_ops :
-                                &xfs_read_iomap_ops);
+               ret = xfs_dax_fault(vmf, pe_size, write_fault, &pfn);
                if (ret & VM_FAULT_NEEDDSYNC)
                        ret = dax_finish_sync_fault(vmf, pe_size, pfn);
                xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);