clk: ti: Drop legacy compatibility clocks for am4
[linux-2.6-microblaze.git] / fs / xfs / xfs_iomap.c
index 0937584..e552ce5 100644 (file)
@@ -28,7 +28,6 @@
 #include "xfs_dquot.h"
 #include "xfs_reflink.h"
 
-
 #define XFS_ALLOC_ALIGN(mp, off) \
        (((off) >> mp->m_allocsize_log) << mp->m_allocsize_log)
 
@@ -54,7 +53,8 @@ xfs_bmbt_to_iomap(
        struct xfs_inode        *ip,
        struct iomap            *iomap,
        struct xfs_bmbt_irec    *imap,
-       u16                     flags)
+       unsigned int            mapping_flags,
+       u16                     iomap_flags)
 {
        struct xfs_mount        *mp = ip->i_mount;
        struct xfs_buftarg      *target = xfs_inode_buftarg(ip);
@@ -71,16 +71,22 @@ xfs_bmbt_to_iomap(
                iomap->type = IOMAP_DELALLOC;
        } else {
                iomap->addr = BBTOB(xfs_fsb_to_db(ip, imap->br_startblock));
+               if (mapping_flags & IOMAP_DAX)
+                       iomap->addr += target->bt_dax_part_off;
+
                if (imap->br_state == XFS_EXT_UNWRITTEN)
                        iomap->type = IOMAP_UNWRITTEN;
                else
                        iomap->type = IOMAP_MAPPED;
+
        }
        iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
        iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
-       iomap->bdev = target->bt_bdev;
-       iomap->dax_dev = target->bt_daxdev;
-       iomap->flags = flags;
+       if (mapping_flags & IOMAP_DAX)
+               iomap->dax_dev = target->bt_daxdev;
+       else
+               iomap->bdev = target->bt_bdev;
+       iomap->flags = iomap_flags;
 
        if (xfs_ipincount(ip) &&
            (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
@@ -188,6 +194,7 @@ xfs_iomap_write_direct(
        struct xfs_inode        *ip,
        xfs_fileoff_t           offset_fsb,
        xfs_fileoff_t           count_fsb,
+       unsigned int            flags,
        struct xfs_bmbt_irec    *imap)
 {
        struct xfs_mount        *mp = ip->i_mount;
@@ -229,7 +236,7 @@ xfs_iomap_write_direct(
         * the reserve block pool for bmbt block allocation if there is no space
         * left but we need to do unwritten extent conversion.
         */
-       if (IS_DAX(VFS_I(ip))) {
+       if (flags & IOMAP_DAX) {
                bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
                if (imap->br_state == XFS_EXT_UNWRITTEN) {
                        force = true;
@@ -620,7 +627,7 @@ imap_needs_alloc(
            imap->br_startblock == DELAYSTARTBLOCK)
                return true;
        /* we convert unwritten extents before copying the data for DAX */
-       if (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN)
+       if ((flags & IOMAP_DAX) && imap->br_state == XFS_EXT_UNWRITTEN)
                return true;
        return false;
 }
@@ -800,7 +807,7 @@ xfs_direct_write_iomap_begin(
 
        xfs_iunlock(ip, lockmode);
        trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
-       return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags);
+       return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, iomap_flags);
 
 allocate_blocks:
        error = -EAGAIN;
@@ -826,23 +833,24 @@ allocate_blocks:
        xfs_iunlock(ip, lockmode);
 
        error = xfs_iomap_write_direct(ip, offset_fsb, end_fsb - offset_fsb,
-                       &imap);
+                       flags, &imap);
        if (error)
                return error;
 
        trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
-       return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags | IOMAP_F_NEW);
+       return xfs_bmbt_to_iomap(ip, iomap, &imap, flags,
+                                iomap_flags | IOMAP_F_NEW);
 
 out_found_cow:
        xfs_iunlock(ip, lockmode);
        length = XFS_FSB_TO_B(mp, cmap.br_startoff + cmap.br_blockcount);
        trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap);
        if (imap.br_startblock != HOLESTARTBLOCK) {
-               error = xfs_bmbt_to_iomap(ip, srcmap, &imap, 0);
+               error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0);
                if (error)
                        return error;
        }
-       return xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
+       return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, IOMAP_F_SHARED);
 
 out_unlock:
        if (lockmode)
@@ -1052,23 +1060,24 @@ retry:
         */
        xfs_iunlock(ip, XFS_ILOCK_EXCL);
        trace_xfs_iomap_alloc(ip, offset, count, allocfork, &imap);
-       return xfs_bmbt_to_iomap(ip, iomap, &imap, IOMAP_F_NEW);
+       return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, IOMAP_F_NEW);
 
 found_imap:
        xfs_iunlock(ip, XFS_ILOCK_EXCL);
-       return xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
+       return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0);
 
 found_cow:
        xfs_iunlock(ip, XFS_ILOCK_EXCL);
        if (imap.br_startoff <= offset_fsb) {
-               error = xfs_bmbt_to_iomap(ip, srcmap, &imap, 0);
+               error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0);
                if (error)
                        return error;
-               return xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
+               return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags,
+                                        IOMAP_F_SHARED);
        }
 
        xfs_trim_extent(&cmap, offset_fsb, imap.br_startoff - offset_fsb);
-       return xfs_bmbt_to_iomap(ip, iomap, &cmap, 0);
+       return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, 0);
 
 out_unlock:
        xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -1177,7 +1186,8 @@ xfs_read_iomap_begin(
        if (error)
                return error;
        trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
-       return xfs_bmbt_to_iomap(ip, iomap, &imap, shared ? IOMAP_F_SHARED : 0);
+       return xfs_bmbt_to_iomap(ip, iomap, &imap, flags,
+                                shared ? IOMAP_F_SHARED : 0);
 }
 
 const struct iomap_ops xfs_read_iomap_ops = {
@@ -1236,7 +1246,8 @@ xfs_seek_iomap_begin(
                if (data_fsb < cow_fsb + cmap.br_blockcount)
                        end_fsb = min(end_fsb, data_fsb);
                xfs_trim_extent(&cmap, offset_fsb, end_fsb);
-               error = xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
+               error = xfs_bmbt_to_iomap(ip, iomap, &cmap, flags,
+                                         IOMAP_F_SHARED);
                /*
                 * This is a COW extent, so we must probe the page cache
                 * because there could be dirty page cache being backed
@@ -1258,7 +1269,7 @@ xfs_seek_iomap_begin(
        imap.br_state = XFS_EXT_NORM;
 done:
        xfs_trim_extent(&imap, offset_fsb, end_fsb);
-       error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
+       error = xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0);
 out_unlock:
        xfs_iunlock(ip, lockmode);
        return error;
@@ -1305,9 +1316,40 @@ out_unlock:
        if (error)
                return error;
        ASSERT(nimaps);
-       return xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
+       return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0);
 }
 
 const struct iomap_ops xfs_xattr_iomap_ops = {
        .iomap_begin            = xfs_xattr_iomap_begin,
 };
+
+int
+xfs_zero_range(
+       struct xfs_inode        *ip,
+       loff_t                  pos,
+       loff_t                  len,
+       bool                    *did_zero)
+{
+       struct inode            *inode = VFS_I(ip);
+
+       if (IS_DAX(inode))
+               return dax_zero_range(inode, pos, len, did_zero,
+                                     &xfs_direct_write_iomap_ops);
+       return iomap_zero_range(inode, pos, len, did_zero,
+                               &xfs_buffered_write_iomap_ops);
+}
+
+int
+xfs_truncate_page(
+       struct xfs_inode        *ip,
+       loff_t                  pos,
+       bool                    *did_zero)
+{
+       struct inode            *inode = VFS_I(ip);
+
+       if (IS_DAX(inode))
+               return dax_truncate_page(inode, pos, did_zero,
+                                       &xfs_direct_write_iomap_ops);
+       return iomap_truncate_page(inode, pos, did_zero,
+                                  &xfs_buffered_write_iomap_ops);
+}