misc: genwqe: Rudimentary typo fixes
[linux-2.6-microblaze.git] / mm / huge_memory.c
index d77605c..ae907a9 100644 (file)
@@ -668,9 +668,9 @@ release:
  *         available
  * never: never stall for any thp allocation
  */
-static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma)
+gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
 {
-       const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
+       const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE);
 
        /* Always do synchronous compaction */
        if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
@@ -762,7 +762,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
                }
                return ret;
        }
-       gfp = alloc_hugepage_direct_gfpmask(vma);
+       gfp = vma_thp_gfp_mask(vma);
        page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
        if (unlikely(!page)) {
                count_vm_event(THP_FAULT_FALLBACK);
@@ -1100,9 +1100,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
         * best effort that the pinned pages won't be replaced by another
         * random page during the coming copy-on-write.
         */
-       if (unlikely(is_cow_mapping(vma->vm_flags) &&
-                    atomic_read(&src_mm->has_pinned) &&
-                    page_maybe_dma_pinned(src_page))) {
+       if (unlikely(page_needs_cow_for_dma(vma, src_page))) {
                pte_free(dst_mm, pgtable);
                spin_unlock(src_ptl);
                spin_unlock(dst_ptl);
@@ -1214,9 +1212,7 @@ int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        }
 
        /* Please refer to comments in copy_huge_pmd() */
-       if (unlikely(is_cow_mapping(vma->vm_flags) &&
-                    atomic_read(&src_mm->has_pinned) &&
-                    page_maybe_dma_pinned(pud_page(pud)))) {
+       if (unlikely(page_needs_cow_for_dma(vma, pud_page(pud)))) {
                spin_unlock(src_ptl);
                spin_unlock(dst_ptl);
                __split_huge_pud(vma, src_pud, addr);
@@ -2471,7 +2467,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
        int i;
 
        /* complete memcg works before add pages to LRU */
-       mem_cgroup_split_huge_fixup(head);
+       split_page_memcg(head, nr);
 
        if (PageAnon(head) && PageSwapCache(head)) {
                swp_entry_t entry = { .val = page_private(head) };