BUG_ON(order == 0);
h = &hstates[hugetlb_max_hstate++];
h->order = order;
- h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
+ h->mask = ~(huge_page_size(h) - 1);
for (i = 0; i < MAX_NUMNODES; ++i)
INIT_LIST_HEAD(&h->hugepage_freelists[i]);
INIT_LIST_HEAD(&h->hugepage_activelist);
for_each_hstate(h) {
unsigned long count = h->nr_huge_pages;
- total += (PAGE_SIZE << huge_page_order(h)) * count;
+ total += huge_page_size(h) * count;
if (h == &default_hstate)
seq_printf(m,
h->free_huge_pages,
h->resv_huge_pages,
h->surplus_huge_pages,
- (PAGE_SIZE << huge_page_order(h)) / 1024);
+ huge_page_size(h) / SZ_1K);
}
- seq_printf(m, "Hugetlb: %8lu kB\n", total / 1024);
+ seq_printf(m, "Hugetlb: %8lu kB\n", total / SZ_1K);
}
int hugetlb_report_node_meminfo(char *buf, int len, int nid)
h->nr_huge_pages_node[nid],
h->free_huge_pages_node[nid],
h->surplus_huge_pages_node[nid],
- 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
+ huge_page_size(h) / SZ_1K);
}
void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
{
- struct hstate *hstate = hstate_vma(vma);
-
- return 1UL << huge_page_shift(hstate);
+ return huge_page_size(hstate_vma(vma));
}
/*