Merge tag 'libnvdimm-for-4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdim...
[linux-2.6-microblaze.git] / fs / dax.c
index 27ba300..9598159 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -565,7 +565,7 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
                ret = __radix_tree_lookup(page_tree, index, &node, &slot);
                WARN_ON_ONCE(ret != entry);
                __radix_tree_replace(page_tree, node, slot,
-                                    new_entry, NULL, NULL);
+                                    new_entry, NULL);
                entry = new_entry;
        }
 
@@ -614,6 +614,13 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
                if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
                        continue;
 
+               /*
+                * No need to call mmu_notifier_invalidate_range() as we are
+                * downgrading page table protection not changing it to point
+                * to a new page.
+                *
+                * See Documentation/vm/mmu_notifier.txt
+                */
                if (pmdp) {
 #ifdef CONFIG_FS_DAX_PMD
                        pmd_t pmd;
@@ -628,7 +635,6 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
                        pmd = pmd_wrprotect(pmd);
                        pmd = pmd_mkclean(pmd);
                        set_pmd_at(vma->vm_mm, address, pmdp, pmd);
-                       mmu_notifier_invalidate_range(vma->vm_mm, start, end);
 unlock_pmd:
                        spin_unlock(ptl);
 #endif
@@ -643,7 +649,6 @@ unlock_pmd:
                        pte = pte_wrprotect(pte);
                        pte = pte_mkclean(pte);
                        set_pte_at(vma->vm_mm, address, ptep, pte);
-                       mmu_notifier_invalidate_range(vma->vm_mm, start, end);
 unlock_pte:
                        pte_unmap_unlock(ptep, ptl);
                }
@@ -789,7 +794,7 @@ int dax_writeback_mapping_range(struct address_space *mapping,
 
        tag_pages_for_writeback(mapping, start_index, end_index);
 
-       pagevec_init(&pvec, 0);
+       pagevec_init(&pvec);
        while (!done) {
                pvec.nr = find_get_entries_tag(mapping, start_index,
                                PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
@@ -822,7 +827,7 @@ EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
 
 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
 {
-       return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
+       return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
 }
 
 static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
@@ -1333,7 +1338,7 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
         * this is a reliable test.
         */
        pgoff = linear_page_index(vma, pmd_addr);
-       max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
+       max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
 
        trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
 
@@ -1357,13 +1362,13 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
        if ((pmd_addr + PMD_SIZE) > vma->vm_end)
                goto fallback;
 
-       if (pgoff > max_pgoff) {
+       if (pgoff >= max_pgoff) {
                result = VM_FAULT_SIGBUS;
                goto out;
        }
 
        /* If the PMD would extend beyond the file size */
-       if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
+       if ((pgoff | PG_PMD_COLOUR) >= max_pgoff)
                goto fallback;
 
        /*