mm/vmalloc: fallback to a single page allocator
[linux-2.6-microblaze.git] / mm / memory.c
index f3ffab9..3dd6b2e 100644 (file)
@@ -1361,7 +1361,18 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
                        else if (zap_huge_pmd(tlb, vma, pmd, addr))
                                goto next;
                        /* fall through */
+               } else if (details && details->single_page &&
+                          PageTransCompound(details->single_page) &&
+                          next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
+                       spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
+                       /*
+                        * Take and drop THP pmd lock so that we cannot return
+                        * prematurely, while zap_huge_pmd() has cleared *pmd,
+                        * but not yet decremented compound_mapcount().
+                        */
+                       spin_unlock(ptl);
                }
+
                /*
                 * Here there can be other concurrent MADV_DONTNEED or
                 * trans huge page faults running, and if the pmd is
@@ -3012,6 +3023,8 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
                                munlock_vma_page(old_page);
                        unlock_page(old_page);
                }
+               if (page_copied)
+                       free_swap_cache(old_page);
                put_page(old_page);
        }
        return page_copied ? VM_FAULT_WRITE : 0;
@@ -3036,7 +3049,7 @@ oom:
  * The function expects the page to be locked or other protection against
  * concurrent faults / writeback (such as DAX radix tree locks).
  *
- * Return: %VM_FAULT_WRITE on success, %0 when PTE got changed before
+ * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before
  * we acquired PTE lock.
  */
 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
@@ -3236,6 +3249,36 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
        }
 }
 
+/**
+ * unmap_mapping_page() - Unmap single page from processes.
+ * @page: The locked page to be unmapped.
+ *
+ * Unmap this page from any userspace process which still has it mmaped.
+ * Typically, for efficiency, the range of nearby pages has already been
+ * unmapped by unmap_mapping_pages() or unmap_mapping_range().  But once
+ * truncation or invalidation holds the lock on a page, it may find that
+ * the page has been remapped again: and then uses unmap_mapping_page()
+ * to unmap it finally.
+ */
+void unmap_mapping_page(struct page *page)
+{
+       struct address_space *mapping = page->mapping;
+       struct zap_details details = { };
+
+       VM_BUG_ON(!PageLocked(page));
+       VM_BUG_ON(PageTail(page));
+
+       details.check_mapping = mapping;
+       details.first_index = page->index;
+       details.last_index = page->index + thp_nr_pages(page) - 1;
+       details.single_page = page;
+
+       i_mmap_lock_write(mapping);
+       if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
+               unmap_mapping_range_tree(&mapping->i_mmap, &details);
+       i_mmap_unlock_write(mapping);
+}
+
 /**
  * unmap_mapping_pages() - Unmap pages from processes.
  * @mapping: The address space containing pages to be unmapped.
@@ -3312,6 +3355,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct page *page = NULL, *swapcache;
+       struct swap_info_struct *si = NULL;
        swp_entry_t entry;
        pte_t pte;
        int locked;
@@ -3339,14 +3383,16 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
                goto out;
        }
 
+       /* Prevent swapoff from happening to us. */
+       si = get_swap_device(entry);
+       if (unlikely(!si))
+               goto out;
 
        delayacct_set_flag(current, DELAYACCT_PF_SWAPIN);
        page = lookup_swap_cache(entry, vma, vmf->address);
        swapcache = page;
 
        if (!page) {
-               struct swap_info_struct *si = swp_swap_info(entry);
-
                if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
                    __swap_count(entry) == 1) {
                        /* skip swapcache */
@@ -3515,6 +3561,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
 unlock:
        pte_unmap_unlock(vmf->pte, vmf->ptl);
 out:
+       if (si)
+               put_swap_device(si);
        return ret;
 out_nomap:
        pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -3526,6 +3574,8 @@ out_release:
                unlock_page(swapcache);
                put_page(swapcache);
        }
+       if (si)
+               put_swap_device(si);
        return ret;
 }
 
@@ -4944,8 +4994,8 @@ int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf,
                         * Check if this is a VM_IO | VM_PFNMAP VMA, which
                         * we can access using slightly different code.
                         */
-                       vma = find_vma(mm, addr);
-                       if (!vma || vma->vm_start > addr)
+                       vma = vma_lookup(mm, addr);
+                       if (!vma)
                                break;
                        if (vma->vm_ops && vma->vm_ops->access)
                                ret = vma->vm_ops->access(vma, addr, buf,