drm/i915: Block fbdev HPD processing during suspend
[linux-2.6-microblaze.git] / mm / huge_memory.c
index e84a10b..faf357e 100644 (file)
@@ -62,6 +62,16 @@ static struct shrinker deferred_split_shrinker;
 static atomic_t huge_zero_refcount;
 struct page *huge_zero_page __read_mostly;
 
+bool transparent_hugepage_enabled(struct vm_area_struct *vma)
+{
+       if (vma_is_anonymous(vma))
+               return __transparent_hugepage_enabled(vma);
+       if (vma_is_shmem(vma) && shmem_huge_enabled(vma))
+               return __transparent_hugepage_enabled(vma);
+
+       return false;
+}
+
 static struct page *get_huge_zero_page(void)
 {
        struct page *zero_page;
@@ -420,7 +430,7 @@ static int __init hugepage_init(void)
         * where the extra memory used could hurt more than TLB overhead
         * is likely to save.  The admin can still enable it through /sys.
         */
-       if (totalram_pages < (512 << (20 - PAGE_SHIFT))) {
+       if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
                transparent_hugepage_flags = 0;
                return 0;
        }
@@ -558,7 +568,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
                return VM_FAULT_FALLBACK;
        }
 
-       pgtable = pte_alloc_one(vma->vm_mm, haddr);
+       pgtable = pte_alloc_one(vma->vm_mm);
        if (unlikely(!pgtable)) {
                ret = VM_FAULT_OOM;
                goto release;
@@ -692,7 +702,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
                struct page *zero_page;
                bool set;
                vm_fault_t ret;
-               pgtable = pte_alloc_one(vma->vm_mm, haddr);
+               pgtable = pte_alloc_one(vma->vm_mm);
                if (unlikely(!pgtable))
                        return VM_FAULT_OOM;
                zero_page = mm_get_huge_zero_page(vma->vm_mm);
@@ -781,7 +791,7 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
                return VM_FAULT_SIGBUS;
 
        if (arch_needs_pgtable_deposit()) {
-               pgtable = pte_alloc_one(vma->vm_mm, addr);
+               pgtable = pte_alloc_one(vma->vm_mm);
                if (!pgtable)
                        return VM_FAULT_OOM;
        }
@@ -917,7 +927,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        if (!vma_is_anonymous(vma))
                return 0;
 
-       pgtable = pte_alloc_one(dst_mm, addr);
+       pgtable = pte_alloc_one(dst_mm);
        if (unlikely(!pgtable))
                goto out;
 
@@ -1134,8 +1144,7 @@ static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf,
        int i;
        vm_fault_t ret = 0;
        struct page **pages;
-       unsigned long mmun_start;       /* For mmu_notifiers */
-       unsigned long mmun_end;         /* For mmu_notifiers */
+       struct mmu_notifier_range range;
 
        pages = kmalloc_array(HPAGE_PMD_NR, sizeof(struct page *),
                              GFP_KERNEL);
@@ -1173,9 +1182,9 @@ static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf,
                cond_resched();
        }
 
-       mmun_start = haddr;
-       mmun_end   = haddr + HPAGE_PMD_SIZE;
-       mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
+       mmu_notifier_range_init(&range, vma->vm_mm, haddr,
+                               haddr + HPAGE_PMD_SIZE);
+       mmu_notifier_invalidate_range_start(&range);
 
        vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
        if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
@@ -1220,8 +1229,7 @@ static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf,
         * No need to double call mmu_notifier->invalidate_range() callback as
         * the above pmdp_huge_clear_flush_notify() did already call it.
         */
-       mmu_notifier_invalidate_range_only_end(vma->vm_mm, mmun_start,
-                                               mmun_end);
+       mmu_notifier_invalidate_range_only_end(&range);
 
        ret |= VM_FAULT_WRITE;
        put_page(page);
@@ -1231,7 +1239,7 @@ out:
 
 out_free_pages:
        spin_unlock(vmf->ptl);
-       mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
+       mmu_notifier_invalidate_range_end(&range);
        for (i = 0; i < HPAGE_PMD_NR; i++) {
                memcg = (void *)page_private(pages[i]);
                set_page_private(pages[i], 0);
@@ -1248,8 +1256,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
        struct page *page = NULL, *new_page;
        struct mem_cgroup *memcg;
        unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
-       unsigned long mmun_start;       /* For mmu_notifiers */
-       unsigned long mmun_end;         /* For mmu_notifiers */
+       struct mmu_notifier_range range;
        gfp_t huge_gfp;                 /* for allocation and charge */
        vm_fault_t ret = 0;
 
@@ -1293,7 +1300,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
        get_page(page);
        spin_unlock(vmf->ptl);
 alloc:
-       if (transparent_hugepage_enabled(vma) &&
+       if (__transparent_hugepage_enabled(vma) &&
            !transparent_hugepage_debug_cow()) {
                huge_gfp = alloc_hugepage_direct_gfpmask(vma);
                new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
@@ -1338,9 +1345,9 @@ alloc:
                                    vma, HPAGE_PMD_NR);
        __SetPageUptodate(new_page);
 
-       mmun_start = haddr;
-       mmun_end   = haddr + HPAGE_PMD_SIZE;
-       mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
+       mmu_notifier_range_init(&range, vma->vm_mm, haddr,
+                               haddr + HPAGE_PMD_SIZE);
+       mmu_notifier_invalidate_range_start(&range);
 
        spin_lock(vmf->ptl);
        if (page)
@@ -1375,8 +1382,7 @@ out_mn:
         * No need to double call mmu_notifier->invalidate_range() callback as
         * the above pmdp_huge_clear_flush_notify() did already call it.
         */
-       mmu_notifier_invalidate_range_only_end(vma->vm_mm, mmun_start,
-                                              mmun_end);
+       mmu_notifier_invalidate_range_only_end(&range);
 out:
        return ret;
 out_unlock:
@@ -1490,8 +1496,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
                if (!get_page_unless_zero(page))
                        goto out_unlock;
                spin_unlock(vmf->ptl);
-               wait_on_page_locked(page);
-               put_page(page);
+               put_and_wait_on_page_locked(page);
                goto out;
        }
 
@@ -1527,8 +1532,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
                if (!get_page_unless_zero(page))
                        goto out_unlock;
                spin_unlock(vmf->ptl);
-               wait_on_page_locked(page);
-               put_page(page);
+               put_and_wait_on_page_locked(page);
                goto out;
        }
 
@@ -2017,14 +2021,15 @@ void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
                unsigned long address)
 {
        spinlock_t *ptl;
-       struct mm_struct *mm = vma->vm_mm;
-       unsigned long haddr = address & HPAGE_PUD_MASK;
+       struct mmu_notifier_range range;
 
-       mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PUD_SIZE);
-       ptl = pud_lock(mm, pud);
+       mmu_notifier_range_init(&range, vma->vm_mm, address & HPAGE_PUD_MASK,
+                               (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
+       mmu_notifier_invalidate_range_start(&range);
+       ptl = pud_lock(vma->vm_mm, pud);
        if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
                goto out;
-       __split_huge_pud_locked(vma, pud, haddr);
+       __split_huge_pud_locked(vma, pud, range.start);
 
 out:
        spin_unlock(ptl);
@@ -2032,8 +2037,7 @@ out:
         * No need to double call mmu_notifier->invalidate_range() callback as
         * the above pudp_huge_clear_flush_notify() did already call it.
         */
-       mmu_notifier_invalidate_range_only_end(mm, haddr, haddr +
-                                              HPAGE_PUD_SIZE);
+       mmu_notifier_invalidate_range_only_end(&range);
 }
 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
 
@@ -2235,11 +2239,12 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                unsigned long address, bool freeze, struct page *page)
 {
        spinlock_t *ptl;
-       struct mm_struct *mm = vma->vm_mm;
-       unsigned long haddr = address & HPAGE_PMD_MASK;
+       struct mmu_notifier_range range;
 
-       mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
-       ptl = pmd_lock(mm, pmd);
+       mmu_notifier_range_init(&range, vma->vm_mm, address & HPAGE_PMD_MASK,
+                               (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
+       mmu_notifier_invalidate_range_start(&range);
+       ptl = pmd_lock(vma->vm_mm, pmd);
 
        /*
         * If caller asks to setup a migration entries, we need a page to check
@@ -2255,7 +2260,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                        clear_page_mlock(page);
        } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd)))
                goto out;
-       __split_huge_pmd_locked(vma, pmd, haddr, freeze);
+       __split_huge_pmd_locked(vma, pmd, range.start, freeze);
 out:
        spin_unlock(ptl);
        /*
@@ -2271,8 +2276,7 @@ out:
         *     any further changes to individual pte will notify. So no need
         *     to call mmu_notifier->invalidate_range()
         */
-       mmu_notifier_invalidate_range_only_end(mm, haddr, haddr +
-                                              HPAGE_PMD_SIZE);
+       mmu_notifier_invalidate_range_only_end(&range);
 }
 
 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,