Merge tag 'acpi-5.20-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[linux-2.6-microblaze.git] / fs / dax.c
index 649ff51..c440dce 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -334,13 +334,35 @@ static unsigned long dax_end_pfn(void *entry)
        for (pfn = dax_to_pfn(entry); \
                        pfn < dax_end_pfn(entry); pfn++)
 
+static inline bool dax_mapping_is_cow(struct address_space *mapping)
+{
+       return (unsigned long)mapping == PAGE_MAPPING_DAX_COW;
+}
+
 /*
- * TODO: for reflink+dax we need a way to associate a single page with
- * multiple address_space instances at different linear_page_index()
- * offsets.
+ * Set the page->mapping with FS_DAX_MAPPING_COW flag, increase the refcount.
+ */
+static inline void dax_mapping_set_cow(struct page *page)
+{
+       if ((uintptr_t)page->mapping != PAGE_MAPPING_DAX_COW) {
+               /*
+                * Reset the index if the page was already mapped
+                * regularly before.
+                */
+               if (page->mapping)
+                       page->index = 1;
+               page->mapping = (void *)PAGE_MAPPING_DAX_COW;
+       }
+       page->index++;
+}
+
+/*
+ * When it is called in dax_insert_entry(), the cow flag will indicate that
+ * whether this entry is shared by multiple files.  If so, set the page->mapping
+ * FS_DAX_MAPPING_COW, and use page->index as refcount.
  */
 static void dax_associate_entry(void *entry, struct address_space *mapping,
-               struct vm_area_struct *vma, unsigned long address)
+               struct vm_area_struct *vma, unsigned long address, bool cow)
 {
        unsigned long size = dax_entry_size(entry), pfn, index;
        int i = 0;
@@ -352,9 +374,13 @@ static void dax_associate_entry(void *entry, struct address_space *mapping,
        for_each_mapped_pfn(entry, pfn) {
                struct page *page = pfn_to_page(pfn);
 
-               WARN_ON_ONCE(page->mapping);
-               page->mapping = mapping;
-               page->index = index + i++;
+               if (cow) {
+                       dax_mapping_set_cow(page);
+               } else {
+                       WARN_ON_ONCE(page->mapping);
+                       page->mapping = mapping;
+                       page->index = index + i++;
+               }
        }
 }
 
@@ -370,7 +396,12 @@ static void dax_disassociate_entry(void *entry, struct address_space *mapping,
                struct page *page = pfn_to_page(pfn);
 
                WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
-               WARN_ON_ONCE(page->mapping && page->mapping != mapping);
+               if (dax_mapping_is_cow(page->mapping)) {
+                       /* keep the CoW flag if this page is still shared */
+                       if (page->index-- > 0)
+                               continue;
+               } else
+                       WARN_ON_ONCE(page->mapping && page->mapping != mapping);
                page->mapping = NULL;
                page->index = 0;
        }
@@ -455,6 +486,69 @@ void dax_unlock_page(struct page *page, dax_entry_t cookie)
        dax_unlock_entry(&xas, (void *)cookie);
 }
 
+/*
+ * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping
+ * @mapping: the file's mapping whose entry we want to lock
+ * @index: the offset within this file
+ * @page: output the dax page corresponding to this dax entry
+ *
+ * Return: A cookie to pass to dax_unlock_mapping_entry() or 0 if the entry
+ * could not be locked.
+ */
+dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t index,
+               struct page **page)
+{
+       XA_STATE(xas, NULL, 0);
+       void *entry;
+
+       rcu_read_lock();
+       for (;;) {
+               entry = NULL;
+               if (!dax_mapping(mapping))
+                       break;
+
+               xas.xa = &mapping->i_pages;
+               xas_lock_irq(&xas);
+               xas_set(&xas, index);
+               entry = xas_load(&xas);
+               if (dax_is_locked(entry)) {
+                       rcu_read_unlock();
+                       wait_entry_unlocked(&xas, entry);
+                       rcu_read_lock();
+                       continue;
+               }
+               if (!entry ||
+                   dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
+                       /*
+                        * Because we are looking for entry from file's mapping
+                        * and index, so the entry may not be inserted for now,
+                        * or even a zero/empty entry.  We don't think this is
+                        * an error case.  So, return a special value and do
+                        * not output @page.
+                        */
+                       entry = (void *)~0UL;
+               } else {
+                       *page = pfn_to_page(dax_to_pfn(entry));
+                       dax_lock_entry(&xas, entry);
+               }
+               xas_unlock_irq(&xas);
+               break;
+       }
+       rcu_read_unlock();
+       return (dax_entry_t)entry;
+}
+
+void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index,
+               dax_entry_t cookie)
+{
+       XA_STATE(xas, &mapping->i_pages, index);
+
+       if (cookie == ~0UL)
+               return;
+
+       dax_unlock_entry(&xas, (void *)cookie);
+}
+
 /*
  * Find page cache entry at given index. If it is a DAX entry, return it
  * with the entry locked. If the page cache doesn't contain an entry at
@@ -735,6 +829,23 @@ static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter
        return 0;
 }
 
+/*
+ * MAP_SYNC on a dax mapping guarantees dirty metadata is
+ * flushed on write-faults (non-cow), but not read-faults.
+ */
+static bool dax_fault_is_synchronous(const struct iomap_iter *iter,
+               struct vm_area_struct *vma)
+{
+       return (iter->flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) &&
+               (iter->iomap.flags & IOMAP_F_DIRTY);
+}
+
+static bool dax_fault_is_cow(const struct iomap_iter *iter)
+{
+       return (iter->flags & IOMAP_WRITE) &&
+               (iter->iomap.flags & IOMAP_F_SHARED);
+}
+
 /*
  * By this point grab_mapping_entry() has ensured that we have a locked entry
  * of the appropriate size so we don't have to worry about downgrading PMDs to
@@ -742,16 +853,19 @@ static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter
  * already in the tree, we will skip the insertion and just dirty the PMD as
  * appropriate.
  */
-static void *dax_insert_entry(struct xa_state *xas,
-               struct address_space *mapping, struct vm_fault *vmf,
-               void *entry, pfn_t pfn, unsigned long flags, bool dirty)
+static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
+               const struct iomap_iter *iter, void *entry, pfn_t pfn,
+               unsigned long flags)
 {
+       struct address_space *mapping = vmf->vma->vm_file->f_mapping;
        void *new_entry = dax_make_entry(pfn, flags);
+       bool dirty = !dax_fault_is_synchronous(iter, vmf->vma);
+       bool cow = dax_fault_is_cow(iter);
 
        if (dirty)
                __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 
-       if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) {
+       if (cow || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) {
                unsigned long index = xas->xa_index;
                /* we are replacing a zero page with block mapping */
                if (dax_is_pmd_entry(entry))
@@ -763,11 +877,12 @@ static void *dax_insert_entry(struct xa_state *xas,
 
        xas_reset(xas);
        xas_lock_irq(xas);
-       if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
+       if (cow || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
                void *old;
 
                dax_disassociate_entry(entry, mapping, false);
-               dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
+               dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address,
+                               cow);
                /*
                 * Only swap our new entry into the page cache if the current
                 * entry is a zero page or an empty entry.  If a normal PTE or
@@ -787,6 +902,9 @@ static void *dax_insert_entry(struct xa_state *xas,
        if (dirty)
                xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
 
+       if (cow)
+               xas_set_mark(xas, PAGECACHE_TAG_TOWRITE);
+
        xas_unlock_irq(xas);
        return entry;
 }
@@ -931,20 +1049,22 @@ int dax_writeback_mapping_range(struct address_space *mapping,
 }
 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
 
-static int dax_iomap_pfn(const struct iomap *iomap, loff_t pos, size_t size,
-                        pfn_t *pfnp)
+static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos,
+               size_t size, void **kaddr, pfn_t *pfnp)
 {
        pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
-       int id, rc;
+       int id, rc = 0;
        long length;
 
        id = dax_read_lock();
        length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
-                                  DAX_ACCESS, NULL, pfnp);
+                                  DAX_ACCESS, kaddr, pfnp);
        if (length < 0) {
                rc = length;
                goto out;
        }
+       if (!pfnp)
+               goto out_check_addr;
        rc = -EINVAL;
        if (PFN_PHYS(length) < size)
                goto out;
@@ -954,11 +1074,71 @@ static int dax_iomap_pfn(const struct iomap *iomap, loff_t pos, size_t size,
        if (length > 1 && !pfn_t_devmap(*pfnp))
                goto out;
        rc = 0;
+
+out_check_addr:
+       if (!kaddr)
+               goto out;
+       if (!*kaddr)
+               rc = -EFAULT;
 out:
        dax_read_unlock(id);
        return rc;
 }
 
+/**
+ * dax_iomap_cow_copy - Copy the data from source to destination before write
+ * @pos:       address to do copy from.
+ * @length:    size of copy operation.
+ * @align_size:        aligned w.r.t align_size (either PMD_SIZE or PAGE_SIZE)
+ * @srcmap:    iomap srcmap
+ * @daddr:     destination address to copy to.
+ *
+ * This can be called from two places. Either during DAX write fault (page
+ * aligned), to copy the length size data to daddr. Or, while doing normal DAX
+ * write operation, dax_iomap_actor() might call this to do the copy of either
+ * start or end unaligned address. In the latter case the rest of the copy of
+ * aligned ranges is taken care by dax_iomap_actor() itself.
+ */
+static int dax_iomap_cow_copy(loff_t pos, uint64_t length, size_t align_size,
+               const struct iomap *srcmap, void *daddr)
+{
+       loff_t head_off = pos & (align_size - 1);
+       size_t size = ALIGN(head_off + length, align_size);
+       loff_t end = pos + length;
+       loff_t pg_end = round_up(end, align_size);
+       bool copy_all = head_off == 0 && end == pg_end;
+       void *saddr = 0;
+       int ret = 0;
+
+       ret = dax_iomap_direct_access(srcmap, pos, size, &saddr, NULL);
+       if (ret)
+               return ret;
+
+       if (copy_all) {
+               ret = copy_mc_to_kernel(daddr, saddr, length);
+               return ret ? -EIO : 0;
+       }
+
+       /* Copy the head part of the range */
+       if (head_off) {
+               ret = copy_mc_to_kernel(daddr, saddr, head_off);
+               if (ret)
+                       return -EIO;
+       }
+
+       /* Copy the tail part of the range */
+       if (end < pg_end) {
+               loff_t tail_off = head_off + length;
+               loff_t tail_len = pg_end - end;
+
+               ret = copy_mc_to_kernel(daddr + tail_off, saddr + tail_off,
+                                       tail_len);
+               if (ret)
+                       return -EIO;
+       }
+       return 0;
+}
+
 /*
  * The user has performed a load from a hole in the file.  Allocating a new
  * page in the file would cause excessive storage usage for workloads with
@@ -966,17 +1146,15 @@ out:
  * If this page is ever written to we will re-fault and change the mapping to
  * point to real DAX storage instead.
  */
-static vm_fault_t dax_load_hole(struct xa_state *xas,
-               struct address_space *mapping, void **entry,
-               struct vm_fault *vmf)
+static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf,
+               const struct iomap_iter *iter, void **entry)
 {
-       struct inode *inode = mapping->host;
+       struct inode *inode = iter->inode;
        unsigned long vaddr = vmf->address;
        pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
        vm_fault_t ret;
 
-       *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
-                       DAX_ZERO_PAGE, false);
+       *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE);
 
        ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
        trace_dax_load_hole(inode, vmf, ret);
@@ -985,7 +1163,7 @@ static vm_fault_t dax_load_hole(struct xa_state *xas,
 
 #ifdef CONFIG_FS_DAX_PMD
 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
-               const struct iomap *iomap, void **entry)
+               const struct iomap_iter *iter, void **entry)
 {
        struct address_space *mapping = vmf->vma->vm_file->f_mapping;
        unsigned long pmd_addr = vmf->address & PMD_MASK;
@@ -1003,8 +1181,8 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
                goto fallback;
 
        pfn = page_to_pfn_t(zero_page);
-       *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
-                       DAX_PMD | DAX_ZERO_PAGE, false);
+       *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn,
+                                 DAX_PMD | DAX_ZERO_PAGE);
 
        if (arch_needs_pgtable_deposit()) {
                pgtable = pte_alloc_one(vma->vm_mm);
@@ -1037,23 +1215,34 @@ fallback:
 }
 #else
 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
-               const struct iomap *iomap, void **entry)
+               const struct iomap_iter *iter, void **entry)
 {
        return VM_FAULT_FALLBACK;
 }
 #endif /* CONFIG_FS_DAX_PMD */
 
-static int dax_memzero(struct dax_device *dax_dev, pgoff_t pgoff,
-               unsigned int offset, size_t size)
+static int dax_memzero(struct iomap_iter *iter, loff_t pos, size_t size)
 {
+       const struct iomap *iomap = &iter->iomap;
+       const struct iomap *srcmap = iomap_iter_srcmap(iter);
+       unsigned offset = offset_in_page(pos);
+       pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
        void *kaddr;
        long ret;
 
-       ret = dax_direct_access(dax_dev, pgoff, 1, DAX_ACCESS, &kaddr, NULL);
-       if (ret > 0) {
-               memset(kaddr + offset, 0, size);
-               dax_flush(dax_dev, kaddr + offset, size);
-       }
+       ret = dax_direct_access(iomap->dax_dev, pgoff, 1, DAX_ACCESS, &kaddr,
+                               NULL);
+       if (ret < 0)
+               return ret;
+       memset(kaddr + offset, 0, size);
+       if (srcmap->addr != iomap->addr) {
+               ret = dax_iomap_cow_copy(pos, size, PAGE_SIZE, srcmap,
+                                        kaddr);
+               if (ret < 0)
+                       return ret;
+               dax_flush(iomap->dax_dev, kaddr, PAGE_SIZE);
+       } else
+               dax_flush(iomap->dax_dev, kaddr + offset, size);
        return ret;
 }
 
@@ -1080,7 +1269,7 @@ static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero)
                if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE)
                        rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
                else
-                       rc = dax_memzero(iomap->dax_dev, pgoff, offset, size);
+                       rc = dax_memzero(iter, pos, size);
                dax_read_unlock(id);
 
                if (rc < 0)
@@ -1129,15 +1318,17 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
                struct iov_iter *iter)
 {
        const struct iomap *iomap = &iomi->iomap;
+       const struct iomap *srcmap = &iomi->srcmap;
        loff_t length = iomap_length(iomi);
        loff_t pos = iomi->pos;
        struct dax_device *dax_dev = iomap->dax_dev;
        loff_t end = pos + length, done = 0;
+       bool write = iov_iter_rw(iter) == WRITE;
        ssize_t ret = 0;
        size_t xfer;
        int id;
 
-       if (iov_iter_rw(iter) == READ) {
+       if (!write) {
                end = min(end, i_size_read(iomi->inode));
                if (pos >= end)
                        return 0;
@@ -1146,7 +1337,12 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
                        return iov_iter_zero(min(length, end - pos), iter);
        }
 
-       if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
+       /*
+        * In DAX mode, enforce either pure overwrites of written extents, or
+        * writes to unwritten extents as part of a copy-on-write operation.
+        */
+       if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED &&
+                       !(iomap->flags & IOMAP_F_SHARED)))
                return -EIO;
 
        /*
@@ -1188,6 +1384,14 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
                        break;
                }
 
+               if (write &&
+                   srcmap->type != IOMAP_HOLE && srcmap->addr != iomap->addr) {
+                       ret = dax_iomap_cow_copy(pos, length, PAGE_SIZE, srcmap,
+                                                kaddr);
+                       if (ret)
+                               break;
+               }
+
                map_len = PFN_PHYS(map_len);
                kaddr += offset;
                map_len -= offset;
@@ -1197,7 +1401,7 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
                if (recovery)
                        xfer = dax_recovery_write(dax_dev, pgoff, kaddr,
                                        map_len, iter);
-               else if (iov_iter_rw(iter) == WRITE)
+               else if (write)
                        xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
                                        map_len, iter);
                else
@@ -1267,17 +1471,6 @@ static vm_fault_t dax_fault_return(int error)
        return vmf_error(error);
 }
 
-/*
- * MAP_SYNC on a dax mapping guarantees dirty metadata is
- * flushed on write-faults (non-cow), but not read-faults.
- */
-static bool dax_fault_is_synchronous(unsigned long flags,
-               struct vm_area_struct *vma, const struct iomap *iomap)
-{
-       return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
-               && (iomap->flags & IOMAP_F_DIRTY);
-}
-
 /*
  * When handling a synchronous page fault and the inode need a fsync, we can
  * insert the PTE/PMD into page tables only after that fsync happened. Skip
@@ -1335,15 +1528,15 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
                const struct iomap_iter *iter, pfn_t *pfnp,
                struct xa_state *xas, void **entry, bool pmd)
 {
-       struct address_space *mapping = vmf->vma->vm_file->f_mapping;
        const struct iomap *iomap = &iter->iomap;
+       const struct iomap *srcmap = &iter->srcmap;
        size_t size = pmd ? PMD_SIZE : PAGE_SIZE;
        loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT;
-       bool write = vmf->flags & FAULT_FLAG_WRITE;
-       bool sync = dax_fault_is_synchronous(iter->flags, vmf->vma, iomap);
+       bool write = iter->flags & IOMAP_WRITE;
        unsigned long entry_flags = pmd ? DAX_PMD : 0;
        int err = 0;
        pfn_t pfn;
+       void *kaddr;
 
        if (!pmd && vmf->cow_page)
                return dax_fault_cow_page(vmf, iter);
@@ -1352,23 +1545,29 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
        if (!write &&
            (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) {
                if (!pmd)
-                       return dax_load_hole(xas, mapping, entry, vmf);
-               return dax_pmd_load_hole(xas, vmf, iomap, entry);
+                       return dax_load_hole(xas, vmf, iter, entry);
+               return dax_pmd_load_hole(xas, vmf, iter, entry);
        }
 
-       if (iomap->type != IOMAP_MAPPED) {
+       if (iomap->type != IOMAP_MAPPED && !(iomap->flags & IOMAP_F_SHARED)) {
                WARN_ON_ONCE(1);
                return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS;
        }
 
-       err = dax_iomap_pfn(&iter->iomap, pos, size, &pfn);
+       err = dax_iomap_direct_access(iomap, pos, size, &kaddr, &pfn);
        if (err)
                return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err);
 
-       *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, entry_flags,
-                                 write && !sync);
+       *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags);
+
+       if (write &&
+           srcmap->type != IOMAP_HOLE && srcmap->addr != iomap->addr) {
+               err = dax_iomap_cow_copy(pos, size, size, srcmap, kaddr);
+               if (err)
+                       return dax_fault_return(err);
+       }
 
-       if (sync)
+       if (dax_fault_is_synchronous(iter, vmf->vma))
                return dax_fault_synchronous_pfnp(pfnp, pfn);
 
        /* insert PMD pfn */
@@ -1674,3 +1873,85 @@ vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
        return dax_insert_pfn_mkwrite(vmf, pfn, order);
 }
 EXPORT_SYMBOL_GPL(dax_finish_sync_fault);
+
+static loff_t dax_range_compare_iter(struct iomap_iter *it_src,
+               struct iomap_iter *it_dest, u64 len, bool *same)
+{
+       const struct iomap *smap = &it_src->iomap;
+       const struct iomap *dmap = &it_dest->iomap;
+       loff_t pos1 = it_src->pos, pos2 = it_dest->pos;
+       void *saddr, *daddr;
+       int id, ret;
+
+       len = min(len, min(smap->length, dmap->length));
+
+       if (smap->type == IOMAP_HOLE && dmap->type == IOMAP_HOLE) {
+               *same = true;
+               return len;
+       }
+
+       if (smap->type == IOMAP_HOLE || dmap->type == IOMAP_HOLE) {
+               *same = false;
+               return 0;
+       }
+
+       id = dax_read_lock();
+       ret = dax_iomap_direct_access(smap, pos1, ALIGN(pos1 + len, PAGE_SIZE),
+                                     &saddr, NULL);
+       if (ret < 0)
+               goto out_unlock;
+
+       ret = dax_iomap_direct_access(dmap, pos2, ALIGN(pos2 + len, PAGE_SIZE),
+                                     &daddr, NULL);
+       if (ret < 0)
+               goto out_unlock;
+
+       *same = !memcmp(saddr, daddr, len);
+       if (!*same)
+               len = 0;
+       dax_read_unlock(id);
+       return len;
+
+out_unlock:
+       dax_read_unlock(id);
+       return -EIO;
+}
+
+int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
+               struct inode *dst, loff_t dstoff, loff_t len, bool *same,
+               const struct iomap_ops *ops)
+{
+       struct iomap_iter src_iter = {
+               .inode          = src,
+               .pos            = srcoff,
+               .len            = len,
+               .flags          = IOMAP_DAX,
+       };
+       struct iomap_iter dst_iter = {
+               .inode          = dst,
+               .pos            = dstoff,
+               .len            = len,
+               .flags          = IOMAP_DAX,
+       };
+       int ret;
+
+       while ((ret = iomap_iter(&src_iter, ops)) > 0) {
+               while ((ret = iomap_iter(&dst_iter, ops)) > 0) {
+                       dst_iter.processed = dax_range_compare_iter(&src_iter,
+                                               &dst_iter, len, same);
+               }
+               if (ret <= 0)
+                       src_iter.processed = ret;
+       }
+       return ret;
+}
+
+int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+                             struct file *file_out, loff_t pos_out,
+                             loff_t *len, unsigned int remap_flags,
+                             const struct iomap_ops *ops)
+{
+       return __generic_remap_file_range_prep(file_in, pos_in, file_out,
+                                              pos_out, len, remap_flags, ops);
+}
+EXPORT_SYMBOL_GPL(dax_remap_file_range_prep);