hugetlb: convert page_huge_active() HPageMigratable flag
[linux-2.6-microblaze.git] / mm / hugetlb.c
index 9d3a141..727c097 100644 (file)
@@ -1143,7 +1143,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
        nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
        page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
        if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
-               SetPagePrivate(page);
+               SetHPageRestoreReserve(page);
                h->resv_huge_pages--;
        }
 
@@ -1321,14 +1321,16 @@ static inline void destroy_compound_gigantic_page(struct page *page,
 static void update_and_free_page(struct hstate *h, struct page *page)
 {
        int i;
+       struct page *subpage = page;
 
        if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
                return;
 
        h->nr_huge_pages--;
        h->nr_huge_pages_node[page_to_nid(page)]--;
-       for (i = 0; i < pages_per_huge_page(h); i++) {
-               page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
+       for (i = 0; i < pages_per_huge_page(h);
+            i++, subpage = mem_map_next(subpage, page, i)) {
+               subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
                                1 << PG_referenced | 1 << PG_dirty |
                                1 << PG_active | 1 << PG_private |
                                1 << PG_writeback);
@@ -1362,30 +1364,6 @@ struct hstate *size_to_hstate(unsigned long size)
        return NULL;
 }
 
-/*
- * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
- * to hstate->hugepage_activelist.)
- *
- * This function can be called for tail pages, but never returns true for them.
- */
-bool page_huge_active(struct page *page)
-{
-       return PageHeadHuge(page) && PagePrivate(&page[1]);
-}
-
-/* never called for tail page */
-void set_page_huge_active(struct page *page)
-{
-       VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
-       SetPagePrivate(&page[1]);
-}
-
-static void clear_page_huge_active(struct page *page)
-{
-       VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
-       ClearPagePrivate(&page[1]);
-}
-
 /*
  * Internal hugetlb specific page flag. Do not use outside of the hugetlb
  * code
@@ -1416,20 +1394,19 @@ static void __free_huge_page(struct page *page)
         */
        struct hstate *h = page_hstate(page);
        int nid = page_to_nid(page);
-       struct hugepage_subpool *spool =
-               (struct hugepage_subpool *)page_private(page);
+       struct hugepage_subpool *spool = hugetlb_page_subpool(page);
        bool restore_reserve;
 
        VM_BUG_ON_PAGE(page_count(page), page);
        VM_BUG_ON_PAGE(page_mapcount(page), page);
 
-       set_page_private(page, 0);
+       hugetlb_set_page_subpool(page, NULL);
        page->mapping = NULL;
-       restore_reserve = PagePrivate(page);
-       ClearPagePrivate(page);
+       restore_reserve = HPageRestoreReserve(page);
+       ClearHPageRestoreReserve(page);
 
        /*
-        * If PagePrivate() was set on page, page allocation consumed a
+        * If HPageRestoreReserve was set on page, page allocation consumed a
         * reservation.  If the page was associated with a subpool, there
         * would have been a page reserved in the subpool before allocation
         * via hugepage_subpool_get_pages().  Since we are 'restoring' the
@@ -1448,7 +1425,7 @@ static void __free_huge_page(struct page *page)
        }
 
        spin_lock(&hugetlb_lock);
-       clear_page_huge_active(page);
+       ClearHPageMigratable(page);
        hugetlb_cgroup_uncharge_page(hstate_index(h),
                                     pages_per_huge_page(h), page);
        hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
@@ -2261,24 +2238,24 @@ static long vma_add_reservation(struct hstate *h,
  * This routine is called to restore a reservation on error paths.  In the
  * specific error paths, a huge page was allocated (via alloc_huge_page)
  * and is about to be freed.  If a reservation for the page existed,
- * alloc_huge_page would have consumed the reservation and set PagePrivate
- * in the newly allocated page.  When the page is freed via free_huge_page,
- * the global reservation count will be incremented if PagePrivate is set.
- * However, free_huge_page can not adjust the reserve map.  Adjust the
- * reserve map here to be consistent with global reserve count adjustments
- * to be made by free_huge_page.
+ * alloc_huge_page would have consumed the reservation and set
+ * HPageRestoreReserve in the newly allocated page.  When the page is freed
+ * via free_huge_page, the global reservation count will be incremented if
+ * HPageRestoreReserve is set.  However, free_huge_page can not adjust the
+ * reserve map.  Adjust the reserve map here to be consistent with global
+ * reserve count adjustments to be made by free_huge_page.
  */
 static void restore_reserve_on_error(struct hstate *h,
                        struct vm_area_struct *vma, unsigned long address,
                        struct page *page)
 {
-       if (unlikely(PagePrivate(page))) {
+       if (unlikely(HPageRestoreReserve(page))) {
                long rc = vma_needs_reservation(h, vma, address);
 
                if (unlikely(rc < 0)) {
                        /*
                         * Rare out of memory condition in reserve map
-                        * manipulation.  Clear PagePrivate so that
+                        * manipulation.  Clear HPageRestoreReserve so that
                         * global reserve count will not be incremented
                         * by free_huge_page.  This will make it appear
                         * as though the reservation for this page was
@@ -2287,7 +2264,7 @@ static void restore_reserve_on_error(struct hstate *h,
                         * is better than inconsistent global huge page
                         * accounting of reserve counts.
                         */
-                       ClearPagePrivate(page);
+                       ClearHPageRestoreReserve(page);
                } else if (rc) {
                        rc = vma_add_reservation(h, vma, address);
                        if (unlikely(rc < 0))
@@ -2295,7 +2272,7 @@ static void restore_reserve_on_error(struct hstate *h,
                                 * See above comment about rare out of
                                 * memory condition.
                                 */
-                               ClearPagePrivate(page);
+                               ClearHPageRestoreReserve(page);
                } else
                        vma_end_reservation(h, vma, address);
        }
@@ -2376,7 +2353,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
                if (!page)
                        goto out_uncharge_cgroup;
                if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
-                       SetPagePrivate(page);
+                       SetHPageRestoreReserve(page);
                        h->resv_huge_pages--;
                }
                spin_lock(&hugetlb_lock);
@@ -2394,7 +2371,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
 
        spin_unlock(&hugetlb_lock);
 
-       set_page_private(page, (unsigned long)spool);
+       hugetlb_set_page_subpool(page, spool);
 
        map_commit = vma_commit_reservation(h, vma, addr);
        if (unlikely(map_chg > map_commit)) {
@@ -2527,7 +2504,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
                if (hstate_is_gigantic(h)) {
                        if (hugetlb_cma_size) {
                                pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
-                               break;
+                               goto free;
                        }
                        if (!alloc_bootmem_huge_page(h))
                                break;
@@ -2545,7 +2522,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
                        h->max_huge_pages, buf, i);
                h->max_huge_pages = i;
        }
-
+free:
        kfree(node_alloc_noretry);
 }
 
@@ -3168,6 +3145,9 @@ static int __init hugetlb_init(void)
 {
        int i;
 
+       BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
+                       __NR_HPAGEFLAGS);
+
        if (!hugepages_supported()) {
                if (hugetlb_max_hstate || default_hstate_max_huge_pages)
                        pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
@@ -3248,7 +3228,7 @@ void __init hugetlb_add_hstate(unsigned int order)
        BUG_ON(order == 0);
        h = &hstates[hugetlb_max_hstate++];
        h->order = order;
-       h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
+       h->mask = ~(huge_page_size(h) - 1);
        for (i = 0; i < MAX_NUMNODES; ++i)
                INIT_LIST_HEAD(&h->hugepage_freelists[i]);
        INIT_LIST_HEAD(&h->hugepage_activelist);
@@ -3523,7 +3503,7 @@ void hugetlb_report_meminfo(struct seq_file *m)
        for_each_hstate(h) {
                unsigned long count = h->nr_huge_pages;
 
-               total += (PAGE_SIZE << huge_page_order(h)) * count;
+               total += huge_page_size(h) * count;
 
                if (h == &default_hstate)
                        seq_printf(m,
@@ -3536,10 +3516,10 @@ void hugetlb_report_meminfo(struct seq_file *m)
                                   h->free_huge_pages,
                                   h->resv_huge_pages,
                                   h->surplus_huge_pages,
-                                  (PAGE_SIZE << huge_page_order(h)) / 1024);
+                                  huge_page_size(h) / SZ_1K);
        }
 
-       seq_printf(m, "Hugetlb:        %8lu kB\n", total / 1024);
+       seq_printf(m, "Hugetlb:        %8lu kB\n", total / SZ_1K);
 }
 
 int hugetlb_report_node_meminfo(char *buf, int len, int nid)
@@ -3573,7 +3553,7 @@ void hugetlb_show_meminfo(void)
                                h->nr_huge_pages_node[nid],
                                h->free_huge_pages_node[nid],
                                h->surplus_huge_pages_node[nid],
-                               1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
+                               huge_page_size(h) / SZ_1K);
 }
 
 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
@@ -3696,9 +3676,7 @@ static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
 
 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
 {
-       struct hstate *hstate = hstate_vma(vma);
-
-       return 1UL << huge_page_shift(hstate);
+       return huge_page_size(hstate_vma(vma));
 }
 
 /*
@@ -4207,7 +4185,7 @@ retry_avoidcopy:
        spin_lock(ptl);
        ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
        if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
-               ClearPagePrivate(new_page);
+               ClearHPageRestoreReserve(new_page);
 
                /* Break COW */
                huge_ptep_clear_flush(vma, haddr, ptep);
@@ -4216,7 +4194,7 @@ retry_avoidcopy:
                                make_huge_pte(vma, new_page, 1));
                page_remove_rmap(old_page, true);
                hugepage_add_new_anon_rmap(new_page, vma, haddr);
-               set_page_huge_active(new_page);
+               SetHPageMigratable(new_page);
                /* Make the old page be freed below */
                new_page = old_page;
        }
@@ -4274,7 +4252,7 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
 
        if (err)
                return err;
-       ClearPagePrivate(page);
+       ClearHPageRestoreReserve(page);
 
        /*
         * set page dirty so that it will not be removed from cache/file
@@ -4436,7 +4414,7 @@ retry:
                goto backout;
 
        if (anon_rmap) {
-               ClearPagePrivate(page);
+               ClearHPageRestoreReserve(page);
                hugepage_add_new_anon_rmap(page, vma, haddr);
        } else
                page_dup_rmap(page, true);
@@ -4453,12 +4431,12 @@ retry:
        spin_unlock(ptl);
 
        /*
-        * Only make newly allocated pages active.  Existing pages found
-        * in the pagecache could be !page_huge_active() if they have been
-        * isolated for migration.
+        * Only set HPageMigratable in newly allocated pages.  Existing pages
+        * found in the pagecache may not have HPageMigratableset if they have
+        * been isolated for migration.
         */
        if (new_page)
-               set_page_huge_active(page);
+               SetHPageMigratable(page);
 
        unlock_page(page);
 out:
@@ -4750,7 +4728,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
        if (vm_shared) {
                page_dup_rmap(page, true);
        } else {
-               ClearPagePrivate(page);
+               ClearHPageRestoreReserve(page);
                hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
        }
 
@@ -4769,7 +4747,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
        update_mmu_cache(dst_vma, dst_addr, dst_pte);
 
        spin_unlock(ptl);
-       set_page_huge_active(page);
+       SetHPageMigratable(page);
        if (vm_shared)
                unlock_page(page);
        ret = 0;
@@ -5282,7 +5260,7 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma,
         */
        if (pmd_index(addr) != pmd_index(saddr) ||
            vm_flags != svm_flags ||
-           sbase < svma->vm_start || svma->vm_end < s_end)
+           !range_in_vma(svma, sbase, s_end))
                return 0;
 
        return saddr;
@@ -5608,12 +5586,13 @@ bool isolate_huge_page(struct page *page, struct list_head *list)
        bool ret = true;
 
        spin_lock(&hugetlb_lock);
-       if (!PageHeadHuge(page) || !page_huge_active(page) ||
+       if (!PageHeadHuge(page) ||
+           !HPageMigratable(page) ||
            !get_page_unless_zero(page)) {
                ret = false;
                goto unlock;
        }
-       clear_page_huge_active(page);
+       ClearHPageMigratable(page);
        list_move_tail(&page->lru, list);
 unlock:
        spin_unlock(&hugetlb_lock);
@@ -5622,9 +5601,8 @@ unlock:
 
 void putback_active_hugepage(struct page *page)
 {
-       VM_BUG_ON_PAGE(!PageHead(page), page);
        spin_lock(&hugetlb_lock);
-       set_page_huge_active(page);
+       SetHPageMigratable(page);
        list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
        spin_unlock(&hugetlb_lock);
        put_page(page);