ARM: 8985/1: efi/decompressor: deal with HYP mode boot gracefully
[linux-2.6-microblaze.git] / mm / huge_memory.c
index e866988..78c84be 100644 (file)
@@ -522,7 +522,7 @@ void prep_transhuge_page(struct page *page)
 bool is_transparent_hugepage(struct page *page)
 {
        if (!PageCompound(page))
-               return 0;
+               return false;
 
        page = compound_head(page);
        return is_huge_zero_page(page) ||
@@ -1647,8 +1647,8 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
         * pgtable_trans_huge_withdraw after finishing pmdp related
         * operations.
         */
-       orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
-                       tlb->fullmm);
+       orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
+                                               tlb->fullmm);
        tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
        if (vma_is_special_huge(vma)) {
                if (arch_needs_pgtable_deposit())
@@ -1746,7 +1746,7 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
 
        /*
         * We don't have to worry about the ordering of src and dst
-        * ptlocks because exclusive mmap_sem prevents deadlock.
+        * ptlocks because exclusive mmap_lock prevents deadlock.
         */
        old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
        if (old_ptl) {
@@ -1833,9 +1833,9 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                goto unlock;
 
        /*
-        * In case prot_numa, we are under down_read(mmap_sem). It's critical
+        * In case prot_numa, we are under mmap_read_lock(mm). It's critical
         * to not clear pmd intermittently to avoid race with MADV_DONTNEED
-        * which is also under down_read(mmap_sem):
+        * which is also under mmap_read_lock(mm):
         *
         *      CPU0:                           CPU1:
         *                              change_huge_pmd(prot_numa=1)
@@ -2618,7 +2618,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
 
        if (PageAnon(head)) {
                /*
-                * The caller does not necessarily hold an mmap_sem that would
+                * The caller does not necessarily hold an mmap_lock that would
                 * prevent the anon_vma disappearing so we first we take a
                 * reference to it and then lock the anon_vma for write. This
                 * is similar to page_lock_anon_vma_read except the write lock