misc: genwqe: Rudimentary typo fixes
[linux-2.6-microblaze.git] / mm / huge_memory.c
index f655137..ae907a9 100644 (file)
@@ -386,7 +386,11 @@ static int __init hugepage_init(void)
        struct kobject *hugepage_kobj;
 
        if (!has_transparent_hugepage()) {
-               transparent_hugepage_flags = 0;
+               /*
+                * Hardware doesn't support hugepages, hence disable
+                * DAX PMD support.
+                */
+               transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_NEVER_DAX;
                return -EINVAL;
        }
 
@@ -636,6 +640,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
                lru_cache_add_inactive_or_unevictable(page, vma);
                pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
                set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
+               update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
                add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
                mm_inc_nr_ptes(vma->vm_mm);
                spin_unlock(vmf->ptl);
@@ -663,9 +668,9 @@ release:
  *         available
  * never: never stall for any thp allocation
  */
-static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma)
+gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
 {
-       const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
+       const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE);
 
        /* Always do synchronous compaction */
        if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
@@ -690,20 +695,19 @@ static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma)
 }
 
 /* Caller must hold page table lock. */
-static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
+static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
                struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
                struct page *zero_page)
 {
        pmd_t entry;
        if (!pmd_none(*pmd))
-               return false;
+               return;
        entry = mk_pmd(zero_page, vma->vm_page_prot);
        entry = pmd_mkhuge(entry);
        if (pgtable)
                pgtable_trans_huge_deposit(mm, pmd, pgtable);
        set_pmd_at(mm, haddr, pmd, entry);
        mm_inc_nr_ptes(mm);
-       return true;
 }
 
 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
@@ -749,6 +753,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
                        } else {
                                set_huge_zero_page(pgtable, vma->vm_mm, vma,
                                                   haddr, vmf->pmd, zero_page);
+                               update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
                                spin_unlock(vmf->ptl);
                        }
                } else {
@@ -757,7 +762,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
                }
                return ret;
        }
-       gfp = alloc_hugepage_direct_gfpmask(vma);
+       gfp = vma_thp_gfp_mask(vma);
        page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
        if (unlikely(!page)) {
                count_vm_event(THP_FAULT_FALLBACK);
@@ -1095,9 +1100,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
         * best effort that the pinned pages won't be replaced by another
         * random page during the coming copy-on-write.
         */
-       if (unlikely(is_cow_mapping(vma->vm_flags) &&
-                    atomic_read(&src_mm->has_pinned) &&
-                    page_maybe_dma_pinned(src_page))) {
+       if (unlikely(page_needs_cow_for_dma(vma, src_page))) {
                pte_free(dst_mm, pgtable);
                spin_unlock(src_ptl);
                spin_unlock(dst_ptl);
@@ -1209,9 +1212,7 @@ int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        }
 
        /* Please refer to comments in copy_huge_pmd() */
-       if (unlikely(is_cow_mapping(vma->vm_flags) &&
-                    atomic_read(&src_mm->has_pinned) &&
-                    page_maybe_dma_pinned(pud_page(pud)))) {
+       if (unlikely(page_needs_cow_for_dma(vma, pud_page(pud)))) {
                spin_unlock(src_ptl);
                spin_unlock(dst_ptl);
                __split_huge_pud(vma, src_pud, addr);
@@ -2176,7 +2177,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
                lock_page_memcg(page);
                if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
                        /* Last compound_mapcount is gone. */
-                       __dec_lruvec_page_state(page, NR_ANON_THPS);
+                       __mod_lruvec_page_state(page, NR_ANON_THPS,
+                                               -HPAGE_PMD_NR);
                        if (TestClearPageDoubleMap(page)) {
                                /* No need in mapcount reference anymore */
                                for (i = 0; i < HPAGE_PMD_NR; i++)
@@ -2465,7 +2467,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
        int i;
 
        /* complete memcg works before add pages to LRU */
-       mem_cgroup_split_huge_fixup(head);
+       split_page_memcg(head, nr);
 
        if (PageAnon(head) && PageSwapCache(head)) {
                swp_entry_t entry = { .val = page_private(head) };
@@ -2751,10 +2753,14 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                }
                spin_unlock(&ds_queue->split_queue_lock);
                if (mapping) {
+                       int nr = thp_nr_pages(head);
+
                        if (PageSwapBacked(head))
-                               __dec_lruvec_page_state(head, NR_SHMEM_THPS);
+                               __mod_lruvec_page_state(head, NR_SHMEM_THPS,
+                                                       -nr);
                        else
-                               __dec_lruvec_page_state(head, NR_FILE_THPS);
+                               __mod_lruvec_page_state(head, NR_FILE_THPS,
+                                                       -nr);
                }
 
                __split_huge_page(page, list, end);