Merge tag 'zonefs-5.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal...
[linux-2.6-microblaze.git] / mm / filemap.c
index aa0e0fb..6ff2a3f 100644 (file)
@@ -42,6 +42,8 @@
 #include <linux/psi.h>
 #include <linux/ramfs.h>
 #include <linux/page_idle.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
 #include "internal.h"
 
 #define CREATE_TRACE_POINTS
@@ -2915,74 +2917,163 @@ out_retry:
 }
 EXPORT_SYMBOL(filemap_fault);
 
-void filemap_map_pages(struct vm_fault *vmf,
-               pgoff_t start_pgoff, pgoff_t end_pgoff)
+static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page)
 {
-       struct file *file = vmf->vma->vm_file;
+       struct mm_struct *mm = vmf->vma->vm_mm;
+
+       /* Huge page is mapped? No need to proceed. */
+       if (pmd_trans_huge(*vmf->pmd)) {
+               unlock_page(page);
+               put_page(page);
+               return true;
+       }
+
+       if (pmd_none(*vmf->pmd) && PageTransHuge(page)) {
+           vm_fault_t ret = do_set_pmd(vmf, page);
+           if (!ret) {
+                   /* The page is mapped successfully, reference consumed. */
+                   unlock_page(page);
+                   return true;
+           }
+       }
+
+       if (pmd_none(*vmf->pmd)) {
+               vmf->ptl = pmd_lock(mm, vmf->pmd);
+               if (likely(pmd_none(*vmf->pmd))) {
+                       mm_inc_nr_ptes(mm);
+                       pmd_populate(mm, vmf->pmd, vmf->prealloc_pte);
+                       vmf->prealloc_pte = NULL;
+               }
+               spin_unlock(vmf->ptl);
+       }
+
+       /* See comment in handle_pte_fault() */
+       if (pmd_devmap_trans_unstable(vmf->pmd)) {
+               unlock_page(page);
+               put_page(page);
+               return true;
+       }
+
+       return false;
+}
+
+static struct page *next_uptodate_page(struct page *page,
+                                      struct address_space *mapping,
+                                      struct xa_state *xas, pgoff_t end_pgoff)
+{
+       unsigned long max_idx;
+
+       do {
+               if (!page)
+                       return NULL;
+               if (xas_retry(xas, page))
+                       continue;
+               if (xa_is_value(page))
+                       continue;
+               if (PageLocked(page))
+                       continue;
+               if (!page_cache_get_speculative(page))
+                       continue;
+               /* Has the page moved or been split? */
+               if (unlikely(page != xas_reload(xas)))
+                       goto skip;
+               if (!PageUptodate(page) || PageReadahead(page))
+                       goto skip;
+               if (PageHWPoison(page))
+                       goto skip;
+               if (!trylock_page(page))
+                       goto skip;
+               if (page->mapping != mapping)
+                       goto unlock;
+               if (!PageUptodate(page))
+                       goto unlock;
+               max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
+               if (xas->xa_index >= max_idx)
+                       goto unlock;
+               return page;
+unlock:
+               unlock_page(page);
+skip:
+               put_page(page);
+       } while ((page = xas_next_entry(xas, end_pgoff)) != NULL);
+
+       return NULL;
+}
+
+static inline struct page *first_map_page(struct address_space *mapping,
+                                         struct xa_state *xas,
+                                         pgoff_t end_pgoff)
+{
+       return next_uptodate_page(xas_find(xas, end_pgoff),
+                                 mapping, xas, end_pgoff);
+}
+
+static inline struct page *next_map_page(struct address_space *mapping,
+                                        struct xa_state *xas,
+                                        pgoff_t end_pgoff)
+{
+       return next_uptodate_page(xas_next_entry(xas, end_pgoff),
+                                 mapping, xas, end_pgoff);
+}
+
+vm_fault_t filemap_map_pages(struct vm_fault *vmf,
+                            pgoff_t start_pgoff, pgoff_t end_pgoff)
+{
+       struct vm_area_struct *vma = vmf->vma;
+       struct file *file = vma->vm_file;
        struct address_space *mapping = file->f_mapping;
        pgoff_t last_pgoff = start_pgoff;
-       unsigned long max_idx;
+       unsigned long addr;
        XA_STATE(xas, &mapping->i_pages, start_pgoff);
        struct page *head, *page;
        unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
+       vm_fault_t ret = 0;
 
        rcu_read_lock();
-       xas_for_each(&xas, head, end_pgoff) {
-               if (xas_retry(&xas, head))
-                       continue;
-               if (xa_is_value(head))
-                       goto next;
+       head = first_map_page(mapping, &xas, end_pgoff);
+       if (!head)
+               goto out;
 
-               /*
-                * Check for a locked page first, as a speculative
-                * reference may adversely influence page migration.
-                */
-               if (PageLocked(head))
-                       goto next;
-               if (!page_cache_get_speculative(head))
-                       goto next;
+       if (filemap_map_pmd(vmf, head)) {
+               ret = VM_FAULT_NOPAGE;
+               goto out;
+       }
 
-               /* Has the page moved or been split? */
-               if (unlikely(head != xas_reload(&xas)))
-                       goto skip;
+       addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+       vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
+       do {
                page = find_subpage(head, xas.xa_index);
-
-               if (!PageUptodate(head) ||
-                               PageReadahead(page) ||
-                               PageHWPoison(page))
-                       goto skip;
-               if (!trylock_page(head))
-                       goto skip;
-
-               if (head->mapping != mapping || !PageUptodate(head))
-                       goto unlock;
-
-               max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
-               if (xas.xa_index >= max_idx)
+               if (PageHWPoison(page))
                        goto unlock;
 
                if (mmap_miss > 0)
                        mmap_miss--;
 
-               vmf->address += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
-               if (vmf->pte)
-                       vmf->pte += xas.xa_index - last_pgoff;
+               addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
+               vmf->pte += xas.xa_index - last_pgoff;
                last_pgoff = xas.xa_index;
-               if (alloc_set_pte(vmf, page))
+
+               if (!pte_none(*vmf->pte))
                        goto unlock;
+
+               /* We're about to handle the fault */
+               if (vmf->address == addr)
+                       ret = VM_FAULT_NOPAGE;
+
+               do_set_pte(vmf, page, addr);
+               /* no need to invalidate: a not-present page won't be cached */
+               update_mmu_cache(vma, addr, vmf->pte);
                unlock_page(head);
-               goto next;
+               continue;
 unlock:
                unlock_page(head);
-skip:
                put_page(head);
-next:
-               /* Huge page is mapped? No need to proceed. */
-               if (pmd_trans_huge(*vmf->pmd))
-                       break;
-       }
+       } while ((head = next_map_page(mapping, &xas, end_pgoff)) != NULL);
+       pte_unmap_unlock(vmf->pte, vmf->ptl);
+out:
        rcu_read_unlock();
        WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
+       return ret;
 }
 EXPORT_SYMBOL(filemap_map_pages);