drm/i915: Fix i915_sg_page_sizes to record dma segments rather than physical pages
[linux-2.6-microblaze.git] / mm / hugetlb.c
index 8a1d1f8..5b1ab1f 100644 (file)
@@ -79,21 +79,6 @@ DEFINE_SPINLOCK(hugetlb_lock);
 static int num_fault_mutexes;
 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
 
-static inline bool PageHugeFreed(struct page *head)
-{
-       return page_private(head + 4) == -1UL;
-}
-
-static inline void SetPageHugeFreed(struct page *head)
-{
-       set_page_private(head + 4, -1UL);
-}
-
-static inline void ClearPageHugeFreed(struct page *head)
-{
-       set_page_private(head + 4, 0);
-}
-
 /* Forward declaration */
 static int hugetlb_acct_memory(struct hstate *h, long delta);
 
@@ -346,6 +331,24 @@ static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
        }
 }
 
+static inline long
+hugetlb_resv_map_add(struct resv_map *map, struct file_region *rg, long from,
+                    long to, struct hstate *h, struct hugetlb_cgroup *cg,
+                    long *regions_needed)
+{
+       struct file_region *nrg;
+
+       if (!regions_needed) {
+               nrg = get_file_region_entry_from_cache(map, from, to);
+               record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg);
+               list_add(&nrg->link, rg->link.prev);
+               coalesce_file_region(map, nrg);
+       } else
+               *regions_needed += 1;
+
+       return to - from;
+}
+
 /*
  * Must be called with resv->lock held.
  *
@@ -361,7 +364,7 @@ static long add_reservation_in_range(struct resv_map *resv, long f, long t,
        long add = 0;
        struct list_head *head = &resv->regions;
        long last_accounted_offset = f;
-       struct file_region *rg = NULL, *trg = NULL, *nrg = NULL;
+       struct file_region *rg = NULL, *trg = NULL;
 
        if (regions_needed)
                *regions_needed = 0;
@@ -384,24 +387,17 @@ static long add_reservation_in_range(struct resv_map *resv, long f, long t,
                /* When we find a region that starts beyond our range, we've
                 * finished.
                 */
-               if (rg->from > t)
+               if (rg->from >= t)
                        break;
 
                /* Add an entry for last_accounted_offset -> rg->from, and
                 * update last_accounted_offset.
                 */
-               if (rg->from > last_accounted_offset) {
-                       add += rg->from - last_accounted_offset;
-                       if (!regions_needed) {
-                               nrg = get_file_region_entry_from_cache(
-                                       resv, last_accounted_offset, rg->from);
-                               record_hugetlb_cgroup_uncharge_info(h_cg, h,
-                                                                   resv, nrg);
-                               list_add(&nrg->link, rg->link.prev);
-                               coalesce_file_region(resv, nrg);
-                       } else
-                               *regions_needed += 1;
-               }
+               if (rg->from > last_accounted_offset)
+                       add += hugetlb_resv_map_add(resv, rg,
+                                                   last_accounted_offset,
+                                                   rg->from, h, h_cg,
+                                                   regions_needed);
 
                last_accounted_offset = rg->to;
        }
@@ -409,17 +405,9 @@ static long add_reservation_in_range(struct resv_map *resv, long f, long t,
        /* Handle the case where our range extends beyond
         * last_accounted_offset.
         */
-       if (last_accounted_offset < t) {
-               add += t - last_accounted_offset;
-               if (!regions_needed) {
-                       nrg = get_file_region_entry_from_cache(
-                               resv, last_accounted_offset, t);
-                       record_hugetlb_cgroup_uncharge_info(h_cg, h, resv, nrg);
-                       list_add(&nrg->link, rg->link.prev);
-                       coalesce_file_region(resv, nrg);
-               } else
-                       *regions_needed += 1;
-       }
+       if (last_accounted_offset < t)
+               add += hugetlb_resv_map_add(resv, rg, last_accounted_offset,
+                                           t, h, h_cg, regions_needed);
 
        VM_BUG_ON(add < 0);
        return add;
@@ -1053,7 +1041,7 @@ static void enqueue_huge_page(struct hstate *h, struct page *page)
        list_move(&page->lru, &h->hugepage_freelists[nid]);
        h->free_huge_pages++;
        h->free_huge_pages_node[nid]++;
-       SetPageHugeFreed(page);
+       SetHPageFreed(page);
 }
 
 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
@@ -1070,7 +1058,7 @@ static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
 
                list_move(&page->lru, &h->hugepage_activelist);
                set_page_refcounted(page);
-               ClearPageHugeFreed(page);
+               ClearHPageFreed(page);
                h->free_huge_pages--;
                h->free_huge_pages_node[nid]--;
                return page;
@@ -1143,7 +1131,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
        nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
        page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
        if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
-               SetPagePrivate(page);
+               SetHPageRestoreReserve(page);
                h->resv_huge_pages--;
        }
 
@@ -1364,52 +1352,6 @@ struct hstate *size_to_hstate(unsigned long size)
        return NULL;
 }
 
-/*
- * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
- * to hstate->hugepage_activelist.)
- *
- * This function can be called for tail pages, but never returns true for them.
- */
-bool page_huge_active(struct page *page)
-{
-       return PageHeadHuge(page) && PagePrivate(&page[1]);
-}
-
-/* never called for tail page */
-void set_page_huge_active(struct page *page)
-{
-       VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
-       SetPagePrivate(&page[1]);
-}
-
-static void clear_page_huge_active(struct page *page)
-{
-       VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
-       ClearPagePrivate(&page[1]);
-}
-
-/*
- * Internal hugetlb specific page flag. Do not use outside of the hugetlb
- * code
- */
-static inline bool PageHugeTemporary(struct page *page)
-{
-       if (!PageHuge(page))
-               return false;
-
-       return (unsigned long)page[2].mapping == -1U;
-}
-
-static inline void SetPageHugeTemporary(struct page *page)
-{
-       page[2].mapping = (void *)-1U;
-}
-
-static inline void ClearPageHugeTemporary(struct page *page)
-{
-       page[2].mapping = NULL;
-}
-
 static void __free_huge_page(struct page *page)
 {
        /*
@@ -1418,20 +1360,19 @@ static void __free_huge_page(struct page *page)
         */
        struct hstate *h = page_hstate(page);
        int nid = page_to_nid(page);
-       struct hugepage_subpool *spool =
-               (struct hugepage_subpool *)page_private(page);
+       struct hugepage_subpool *spool = hugetlb_page_subpool(page);
        bool restore_reserve;
 
        VM_BUG_ON_PAGE(page_count(page), page);
        VM_BUG_ON_PAGE(page_mapcount(page), page);
 
-       set_page_private(page, 0);
+       hugetlb_set_page_subpool(page, NULL);
        page->mapping = NULL;
-       restore_reserve = PagePrivate(page);
-       ClearPagePrivate(page);
+       restore_reserve = HPageRestoreReserve(page);
+       ClearHPageRestoreReserve(page);
 
        /*
-        * If PagePrivate() was set on page, page allocation consumed a
+        * If HPageRestoreReserve was set on page, page allocation consumed a
         * reservation.  If the page was associated with a subpool, there
         * would have been a page reserved in the subpool before allocation
         * via hugepage_subpool_get_pages().  Since we are 'restoring' the
@@ -1450,7 +1391,7 @@ static void __free_huge_page(struct page *page)
        }
 
        spin_lock(&hugetlb_lock);
-       clear_page_huge_active(page);
+       ClearHPageMigratable(page);
        hugetlb_cgroup_uncharge_page(hstate_index(h),
                                     pages_per_huge_page(h), page);
        hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
@@ -1458,9 +1399,9 @@ static void __free_huge_page(struct page *page)
        if (restore_reserve)
                h->resv_huge_pages++;
 
-       if (PageHugeTemporary(page)) {
+       if (HPageTemporary(page)) {
                list_del(&page->lru);
-               ClearPageHugeTemporary(page);
+               ClearHPageTemporary(page);
                update_and_free_page(h, page);
        } else if (h->surplus_huge_pages_node[nid]) {
                /* remove the page from active list */
@@ -1527,12 +1468,13 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
 {
        INIT_LIST_HEAD(&page->lru);
        set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
+       hugetlb_set_page_subpool(page, NULL);
        set_hugetlb_cgroup(page, NULL);
        set_hugetlb_cgroup_rsvd(page, NULL);
        spin_lock(&hugetlb_lock);
        h->nr_huge_pages++;
        h->nr_huge_pages_node[nid]++;
-       ClearPageHugeFreed(page);
+       ClearHPageFreed(page);
        spin_unlock(&hugetlb_lock);
 }
 
@@ -1803,7 +1745,7 @@ retry:
                 * We should make sure that the page is already on the free list
                 * when it is dissolved.
                 */
-               if (unlikely(!PageHugeFreed(head))) {
+               if (unlikely(!HPageFreed(head))) {
                        spin_unlock(&hugetlb_lock);
                        cond_resched();
 
@@ -1894,7 +1836,7 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
         * codeflow
         */
        if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
-               SetPageHugeTemporary(page);
+               SetHPageTemporary(page);
                spin_unlock(&hugetlb_lock);
                put_page(page);
                return NULL;
@@ -1925,7 +1867,7 @@ static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
         * We do not account these pages as surplus because they are only
         * temporary and will be released properly on the last reference
         */
-       SetPageHugeTemporary(page);
+       SetHPageTemporary(page);
 
        return page;
 }
@@ -2263,24 +2205,24 @@ static long vma_add_reservation(struct hstate *h,
  * This routine is called to restore a reservation on error paths.  In the
  * specific error paths, a huge page was allocated (via alloc_huge_page)
  * and is about to be freed.  If a reservation for the page existed,
- * alloc_huge_page would have consumed the reservation and set PagePrivate
- * in the newly allocated page.  When the page is freed via free_huge_page,
- * the global reservation count will be incremented if PagePrivate is set.
- * However, free_huge_page can not adjust the reserve map.  Adjust the
- * reserve map here to be consistent with global reserve count adjustments
- * to be made by free_huge_page.
+ * alloc_huge_page would have consumed the reservation and set
+ * HPageRestoreReserve in the newly allocated page.  When the page is freed
+ * via free_huge_page, the global reservation count will be incremented if
+ * HPageRestoreReserve is set.  However, free_huge_page can not adjust the
+ * reserve map.  Adjust the reserve map here to be consistent with global
+ * reserve count adjustments to be made by free_huge_page.
  */
 static void restore_reserve_on_error(struct hstate *h,
                        struct vm_area_struct *vma, unsigned long address,
                        struct page *page)
 {
-       if (unlikely(PagePrivate(page))) {
+       if (unlikely(HPageRestoreReserve(page))) {
                long rc = vma_needs_reservation(h, vma, address);
 
                if (unlikely(rc < 0)) {
                        /*
                         * Rare out of memory condition in reserve map
-                        * manipulation.  Clear PagePrivate so that
+                        * manipulation.  Clear HPageRestoreReserve so that
                         * global reserve count will not be incremented
                         * by free_huge_page.  This will make it appear
                         * as though the reservation for this page was
@@ -2289,7 +2231,7 @@ static void restore_reserve_on_error(struct hstate *h,
                         * is better than inconsistent global huge page
                         * accounting of reserve counts.
                         */
-                       ClearPagePrivate(page);
+                       ClearHPageRestoreReserve(page);
                } else if (rc) {
                        rc = vma_add_reservation(h, vma, address);
                        if (unlikely(rc < 0))
@@ -2297,7 +2239,7 @@ static void restore_reserve_on_error(struct hstate *h,
                                 * See above comment about rare out of
                                 * memory condition.
                                 */
-                               ClearPagePrivate(page);
+                               ClearHPageRestoreReserve(page);
                } else
                        vma_end_reservation(h, vma, address);
        }
@@ -2378,7 +2320,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
                if (!page)
                        goto out_uncharge_cgroup;
                if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
-                       SetPagePrivate(page);
+                       SetHPageRestoreReserve(page);
                        h->resv_huge_pages--;
                }
                spin_lock(&hugetlb_lock);
@@ -2396,7 +2338,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
 
        spin_unlock(&hugetlb_lock);
 
-       set_page_private(page, (unsigned long)spool);
+       hugetlb_set_page_subpool(page, spool);
 
        map_commit = vma_commit_reservation(h, vma, addr);
        if (unlikely(map_chg > map_commit)) {
@@ -3170,6 +3112,9 @@ static int __init hugetlb_init(void)
 {
        int i;
 
+       BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
+                       __NR_HPAGEFLAGS);
+
        if (!hugepages_supported()) {
                if (hugetlb_max_hstate || default_hstate_max_huge_pages)
                        pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
@@ -3783,21 +3728,32 @@ static bool is_hugetlb_entry_hwpoisoned(pte_t pte)
                return false;
 }
 
+static void
+hugetlb_install_page(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
+                    struct page *new_page)
+{
+       __SetPageUptodate(new_page);
+       set_huge_pte_at(vma->vm_mm, addr, ptep, make_huge_pte(vma, new_page, 1));
+       hugepage_add_new_anon_rmap(new_page, vma, addr);
+       hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
+       ClearHPageRestoreReserve(new_page);
+       SetHPageMigratable(new_page);
+}
+
 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                            struct vm_area_struct *vma)
 {
        pte_t *src_pte, *dst_pte, entry, dst_entry;
        struct page *ptepage;
        unsigned long addr;
-       int cow;
+       bool cow = is_cow_mapping(vma->vm_flags);
        struct hstate *h = hstate_vma(vma);
        unsigned long sz = huge_page_size(h);
+       unsigned long npages = pages_per_huge_page(h);
        struct address_space *mapping = vma->vm_file->f_mapping;
        struct mmu_notifier_range range;
        int ret = 0;
 
-       cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
-
        if (cow) {
                mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, src,
                                        vma->vm_start,
@@ -3842,6 +3798,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
                entry = huge_ptep_get(src_pte);
                dst_entry = huge_ptep_get(dst_pte);
+again:
                if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
                        /*
                         * Skip if src entry none.  Also, skip in the
@@ -3865,6 +3822,52 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                        }
                        set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
                } else {
+                       entry = huge_ptep_get(src_pte);
+                       ptepage = pte_page(entry);
+                       get_page(ptepage);
+
+                       /*
+                        * This is a rare case where we see pinned hugetlb
+                        * pages while they're prone to COW.  We need to do the
+                        * COW earlier during fork.
+                        *
+                        * When pre-allocating the page or copying data, we
+                        * need to be without the pgtable locks since we could
+                        * sleep during the process.
+                        */
+                       if (unlikely(page_needs_cow_for_dma(vma, ptepage))) {
+                               pte_t src_pte_old = entry;
+                               struct page *new;
+
+                               spin_unlock(src_ptl);
+                               spin_unlock(dst_ptl);
+                               /* Do not use reserve as it's private owned */
+                               new = alloc_huge_page(vma, addr, 1);
+                               if (IS_ERR(new)) {
+                                       put_page(ptepage);
+                                       ret = PTR_ERR(new);
+                                       break;
+                               }
+                               copy_user_huge_page(new, ptepage, addr, vma,
+                                                   npages);
+                               put_page(ptepage);
+
+                               /* Install the new huge page if src pte stable */
+                               dst_ptl = huge_pte_lock(h, dst, dst_pte);
+                               src_ptl = huge_pte_lockptr(h, src, src_pte);
+                               spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
+                               entry = huge_ptep_get(src_pte);
+                               if (!pte_same(src_pte_old, entry)) {
+                                       put_page(new);
+                                       /* dst_entry won't change as in child */
+                                       goto again;
+                               }
+                               hugetlb_install_page(vma, dst_pte, addr, new);
+                               spin_unlock(src_ptl);
+                               spin_unlock(dst_ptl);
+                               continue;
+                       }
+
                        if (cow) {
                                /*
                                 * No need to notify as we are downgrading page
@@ -3875,12 +3878,10 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                                 */
                                huge_ptep_set_wrprotect(src, addr, src_pte);
                        }
-                       entry = huge_ptep_get(src_pte);
-                       ptepage = pte_page(entry);
-                       get_page(ptepage);
+
                        page_dup_rmap(ptepage, true);
                        set_huge_pte_at(dst, addr, dst_pte, entry);
-                       hugetlb_count_add(pages_per_huge_page(h), dst);
+                       hugetlb_count_add(npages, dst);
                }
                spin_unlock(src_ptl);
                spin_unlock(dst_ptl);
@@ -4207,7 +4208,7 @@ retry_avoidcopy:
        spin_lock(ptl);
        ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
        if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
-               ClearPagePrivate(new_page);
+               ClearHPageRestoreReserve(new_page);
 
                /* Break COW */
                huge_ptep_clear_flush(vma, haddr, ptep);
@@ -4216,7 +4217,7 @@ retry_avoidcopy:
                                make_huge_pte(vma, new_page, 1));
                page_remove_rmap(old_page, true);
                hugepage_add_new_anon_rmap(new_page, vma, haddr);
-               set_page_huge_active(new_page);
+               SetHPageMigratable(new_page);
                /* Make the old page be freed below */
                new_page = old_page;
        }
@@ -4274,7 +4275,7 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
 
        if (err)
                return err;
-       ClearPagePrivate(page);
+       ClearHPageRestoreReserve(page);
 
        /*
         * set page dirty so that it will not be removed from cache/file
@@ -4436,7 +4437,7 @@ retry:
                goto backout;
 
        if (anon_rmap) {
-               ClearPagePrivate(page);
+               ClearHPageRestoreReserve(page);
                hugepage_add_new_anon_rmap(page, vma, haddr);
        } else
                page_dup_rmap(page, true);
@@ -4453,12 +4454,12 @@ retry:
        spin_unlock(ptl);
 
        /*
-        * Only make newly allocated pages active.  Existing pages found
-        * in the pagecache could be !page_huge_active() if they have been
-        * isolated for migration.
+        * Only set HPageMigratable in newly allocated pages.  Existing pages
+        * found in the pagecache may not have HPageMigratableset if they have
+        * been isolated for migration.
         */
        if (new_page)
-               set_page_huge_active(page);
+               SetHPageMigratable(page);
 
        unlock_page(page);
 out:
@@ -4750,7 +4751,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
        if (vm_shared) {
                page_dup_rmap(page, true);
        } else {
-               ClearPagePrivate(page);
+               ClearHPageRestoreReserve(page);
                hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
        }
 
@@ -4769,7 +4770,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
        update_mmu_cache(dst_vma, dst_addr, dst_pte);
 
        spin_unlock(ptl);
-       set_page_huge_active(page);
+       SetHPageMigratable(page);
        if (vm_shared)
                unlock_page(page);
        ret = 0;
@@ -5074,12 +5075,13 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
        return pages << h->order;
 }
 
-int hugetlb_reserve_pages(struct inode *inode,
+/* Return true if reservation was successful, false otherwise.  */
+bool hugetlb_reserve_pages(struct inode *inode,
                                        long from, long to,
                                        struct vm_area_struct *vma,
                                        vm_flags_t vm_flags)
 {
-       long ret, chg, add = -1;
+       long chg, add = -1;
        struct hstate *h = hstate_inode(inode);
        struct hugepage_subpool *spool = subpool_inode(inode);
        struct resv_map *resv_map;
@@ -5089,7 +5091,7 @@ int hugetlb_reserve_pages(struct inode *inode,
        /* This should never happen */
        if (from > to) {
                VM_WARN(1, "%s called with a negative range\n", __func__);
-               return -EINVAL;
+               return false;
        }
 
        /*
@@ -5098,7 +5100,7 @@ int hugetlb_reserve_pages(struct inode *inode,
         * without using reserves
         */
        if (vm_flags & VM_NORESERVE)
-               return 0;
+               return true;
 
        /*
         * Shared mappings base their reservation on the number of pages that
@@ -5120,7 +5122,7 @@ int hugetlb_reserve_pages(struct inode *inode,
                /* Private mapping. */
                resv_map = resv_map_alloc();
                if (!resv_map)
-                       return -ENOMEM;
+                       return false;
 
                chg = to - from;
 
@@ -5128,18 +5130,12 @@ int hugetlb_reserve_pages(struct inode *inode,
                set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
        }
 
-       if (chg < 0) {
-               ret = chg;
+       if (chg < 0)
                goto out_err;
-       }
 
-       ret = hugetlb_cgroup_charge_cgroup_rsvd(
-               hstate_index(h), chg * pages_per_huge_page(h), &h_cg);
-
-       if (ret < 0) {
-               ret = -ENOMEM;
+       if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
+                               chg * pages_per_huge_page(h), &h_cg) < 0)
                goto out_err;
-       }
 
        if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
                /* For private mappings, the hugetlb_cgroup uncharge info hangs
@@ -5154,19 +5150,15 @@ int hugetlb_reserve_pages(struct inode *inode,
         * reservations already in place (gbl_reserve).
         */
        gbl_reserve = hugepage_subpool_get_pages(spool, chg);
-       if (gbl_reserve < 0) {
-               ret = -ENOSPC;
+       if (gbl_reserve < 0)
                goto out_uncharge_cgroup;
-       }
 
        /*
         * Check enough hugepages are available for the reservation.
         * Hand the pages back to the subpool if there are not
         */
-       ret = hugetlb_acct_memory(h, gbl_reserve);
-       if (ret < 0) {
+       if (hugetlb_acct_memory(h, gbl_reserve) < 0)
                goto out_put_pages;
-       }
 
        /*
         * Account for the reservations made. Shared mappings record regions
@@ -5184,7 +5176,6 @@ int hugetlb_reserve_pages(struct inode *inode,
 
                if (unlikely(add < 0)) {
                        hugetlb_acct_memory(h, -gbl_reserve);
-                       ret = add;
                        goto out_put_pages;
                } else if (unlikely(chg > add)) {
                        /*
@@ -5205,7 +5196,8 @@ int hugetlb_reserve_pages(struct inode *inode,
                        hugetlb_acct_memory(h, -rsv_adjust);
                }
        }
-       return 0;
+       return true;
+
 out_put_pages:
        /* put back original number of pages, chg */
        (void)hugepage_subpool_put_pages(spool, chg);
@@ -5221,7 +5213,7 @@ out_err:
                        region_abort(resv_map, from, to, regions_needed);
        if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
                kref_put(&resv_map->refs, resv_map_release);
-       return ret;
+       return false;
 }
 
 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
@@ -5608,12 +5600,13 @@ bool isolate_huge_page(struct page *page, struct list_head *list)
        bool ret = true;
 
        spin_lock(&hugetlb_lock);
-       if (!PageHeadHuge(page) || !page_huge_active(page) ||
+       if (!PageHeadHuge(page) ||
+           !HPageMigratable(page) ||
            !get_page_unless_zero(page)) {
                ret = false;
                goto unlock;
        }
-       clear_page_huge_active(page);
+       ClearHPageMigratable(page);
        list_move_tail(&page->lru, list);
 unlock:
        spin_unlock(&hugetlb_lock);
@@ -5623,7 +5616,7 @@ unlock:
 void putback_active_hugepage(struct page *page)
 {
        spin_lock(&hugetlb_lock);
-       set_page_huge_active(page);
+       SetHPageMigratable(page);
        list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
        spin_unlock(&hugetlb_lock);
        put_page(page);
@@ -5646,12 +5639,12 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
         * here as well otherwise the global surplus count will not match
         * the per-node's.
         */
-       if (PageHugeTemporary(newpage)) {
+       if (HPageTemporary(newpage)) {
                int old_nid = page_to_nid(oldpage);
                int new_nid = page_to_nid(newpage);
 
-               SetPageHugeTemporary(oldpage);
-               ClearPageHugeTemporary(newpage);
+               SetHPageTemporary(oldpage);
+               ClearHPageTemporary(newpage);
 
                spin_lock(&hugetlb_lock);
                if (h->surplus_huge_pages_node[old_nid]) {