mm: pass pvec directly to find_get_entries
[linux-2.6-microblaze.git] / mm / huge_memory.c
index 91ca9b1..d77605c 100644 (file)
@@ -386,7 +386,11 @@ static int __init hugepage_init(void)
        struct kobject *hugepage_kobj;
 
        if (!has_transparent_hugepage()) {
-               transparent_hugepage_flags = 0;
+               /*
+                * Hardware doesn't support hugepages, hence disable
+                * DAX PMD support.
+                */
+               transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_NEVER_DAX;
                return -EINVAL;
        }
 
@@ -636,6 +640,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
                lru_cache_add_inactive_or_unevictable(page, vma);
                pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
                set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
+               update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
                add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
                mm_inc_nr_ptes(vma->vm_mm);
                spin_unlock(vmf->ptl);
@@ -690,20 +695,19 @@ static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma)
 }
 
 /* Caller must hold page table lock. */
-static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
+static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
                struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
                struct page *zero_page)
 {
        pmd_t entry;
        if (!pmd_none(*pmd))
-               return false;
+               return;
        entry = mk_pmd(zero_page, vma->vm_page_prot);
        entry = pmd_mkhuge(entry);
        if (pgtable)
                pgtable_trans_huge_deposit(mm, pmd, pgtable);
        set_pmd_at(mm, haddr, pmd, entry);
        mm_inc_nr_ptes(mm);
-       return true;
 }
 
 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
@@ -749,6 +753,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
                        } else {
                                set_huge_zero_page(pgtable, vma->vm_mm, vma,
                                                   haddr, vmf->pmd, zero_page);
+                               update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
                                spin_unlock(vmf->ptl);
                        }
                } else {
@@ -1439,7 +1444,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
                if (!get_page_unless_zero(page))
                        goto out_unlock;
                spin_unlock(vmf->ptl);
-               put_and_wait_on_page_locked(page);
+               put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
                goto out;
        }
 
@@ -1475,7 +1480,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
                if (!get_page_unless_zero(page))
                        goto out_unlock;
                spin_unlock(vmf->ptl);
-               put_and_wait_on_page_locked(page);
+               put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
                goto out;
        }
 
@@ -2176,7 +2181,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
                lock_page_memcg(page);
                if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
                        /* Last compound_mapcount is gone. */
-                       __dec_lruvec_page_state(page, NR_ANON_THPS);
+                       __mod_lruvec_page_state(page, NR_ANON_THPS,
+                                               -HPAGE_PMD_NR);
                        if (TestClearPageDoubleMap(page)) {
                                /* No need in mapcount reference anymore */
                                for (i = 0; i < HPAGE_PMD_NR; i++)
@@ -2751,10 +2757,14 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                }
                spin_unlock(&ds_queue->split_queue_lock);
                if (mapping) {
+                       int nr = thp_nr_pages(head);
+
                        if (PageSwapBacked(head))
-                               __dec_lruvec_page_state(head, NR_SHMEM_THPS);
+                               __mod_lruvec_page_state(head, NR_SHMEM_THPS,
+                                                       -nr);
                        else
-                               __dec_lruvec_page_state(head, NR_FILE_THPS);
+                               __mod_lruvec_page_state(head, NR_FILE_THPS,
+                                                       -nr);
                }
 
                __split_huge_page(page, list, end);