1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGETLB_H
3 #define _LINUX_HUGETLB_H
5 #include <linux/mm_types.h>
6 #include <linux/mmdebug.h>
8 #include <linux/hugetlb_inline.h>
9 #include <linux/cgroup.h>
10 #include <linux/list.h>
11 #include <linux/kref.h>
12 #include <linux/pgtable.h>
19 typedef struct { unsigned long pd; } hugepd_t;
20 #define is_hugepd(hugepd) (0)
21 #define __hugepd(x) ((hugepd_t) { (x) })
24 #ifdef CONFIG_HUGETLB_PAGE
26 #include <linux/mempolicy.h>
27 #include <linux/shm.h>
28 #include <asm/tlbflush.h>
30 struct hugepage_subpool {
33 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
34 long used_hpages; /* Used count against maximum, includes */
35 /* both alloced and reserved pages. */
36 struct hstate *hstate;
37 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
38 long rsv_hpages; /* Pages reserved against global pool to */
39 /* sasitfy minimum size. */
45 struct list_head regions;
46 long adds_in_progress;
47 struct list_head region_cache;
48 long region_cache_count;
49 #ifdef CONFIG_CGROUP_HUGETLB
51 * On private mappings, the counter to uncharge reservations is stored
52 * here. If these fields are 0, then either the mapping is shared, or
53 * cgroup accounting is disabled for this resv_map.
55 struct page_counter *reservation_counter;
56 unsigned long pages_per_hpage;
57 struct cgroup_subsys_state *css;
62 * Region tracking -- allows tracking of reservations and instantiated pages
63 * across the pages in a mapping.
65 * The region data structures are embedded into a resv_map and protected
66 * by a resv_map's lock. The set of regions within the resv_map represent
67 * reservations for huge pages, or huge pages that have already been
68 * instantiated within the map. The from and to elements are huge page
69 * indicies into the associated mapping. from indicates the starting index
70 * of the region. to represents the first index past the end of the region.
72 * For example, a file region structure with from == 0 and to == 4 represents
73 * four huge pages in a mapping. It is important to note that the to element
74 * represents the first element past the end of the region. This is used in
75 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
77 * Interval notation of the form [from, to) will be used to indicate that
78 * the endpoint from is inclusive and to is exclusive.
81 struct list_head link;
84 #ifdef CONFIG_CGROUP_HUGETLB
86 * On shared mappings, each reserved region appears as a struct
87 * file_region in resv_map. These fields hold the info needed to
88 * uncharge each reservation.
90 struct page_counter *reservation_counter;
91 struct cgroup_subsys_state *css;
95 extern struct resv_map *resv_map_alloc(void);
96 void resv_map_release(struct kref *ref);
98 extern spinlock_t hugetlb_lock;
99 extern int hugetlb_max_hstate __read_mostly;
100 #define for_each_hstate(h) \
101 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
103 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
105 void hugepage_put_subpool(struct hugepage_subpool *spool);
107 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
108 int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
109 int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
111 int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
113 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
116 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
117 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
118 struct page **, struct vm_area_struct **,
119 unsigned long *, unsigned long *, long, unsigned int,
121 void unmap_hugepage_range(struct vm_area_struct *,
122 unsigned long, unsigned long, struct page *);
123 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
124 struct vm_area_struct *vma,
125 unsigned long start, unsigned long end,
126 struct page *ref_page);
127 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
128 unsigned long start, unsigned long end,
129 struct page *ref_page);
130 void hugetlb_report_meminfo(struct seq_file *);
131 int hugetlb_report_node_meminfo(int, char *);
132 void hugetlb_show_meminfo(void);
133 unsigned long hugetlb_total_pages(void);
134 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
135 unsigned long address, unsigned int flags);
136 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
137 struct vm_area_struct *dst_vma,
138 unsigned long dst_addr,
139 unsigned long src_addr,
140 struct page **pagep);
141 int hugetlb_reserve_pages(struct inode *inode, long from, long to,
142 struct vm_area_struct *vma,
143 vm_flags_t vm_flags);
144 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
146 bool isolate_huge_page(struct page *page, struct list_head *list);
147 void putback_active_hugepage(struct page *page);
148 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
149 void free_huge_page(struct page *page);
150 void hugetlb_fix_reserve_counts(struct inode *inode);
151 extern struct mutex *hugetlb_fault_mutex_table;
152 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
154 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
156 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
158 extern int sysctl_hugetlb_shm_group;
159 extern struct list_head huge_boot_pages;
163 pte_t *huge_pte_alloc(struct mm_struct *mm,
164 unsigned long addr, unsigned long sz);
165 pte_t *huge_pte_offset(struct mm_struct *mm,
166 unsigned long addr, unsigned long sz);
167 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
168 unsigned long *addr, pte_t *ptep);
169 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
170 unsigned long *start, unsigned long *end);
171 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
173 struct page *follow_huge_pd(struct vm_area_struct *vma,
174 unsigned long address, hugepd_t hpd,
175 int flags, int pdshift);
176 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
177 pmd_t *pmd, int flags);
178 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
179 pud_t *pud, int flags);
180 struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
181 pgd_t *pgd, int flags);
183 int pmd_huge(pmd_t pmd);
184 int pud_huge(pud_t pud);
185 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
186 unsigned long address, unsigned long end, pgprot_t newprot);
188 bool is_hugetlb_entry_migration(pte_t pte);
190 #else /* !CONFIG_HUGETLB_PAGE */
192 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
196 static inline unsigned long hugetlb_total_pages(void)
201 static inline struct address_space *hugetlb_page_mapping_lock_write(
207 static inline int huge_pmd_unshare(struct mm_struct *mm,
208 struct vm_area_struct *vma,
209 unsigned long *addr, pte_t *ptep)
214 static inline void adjust_range_if_pmd_sharing_possible(
215 struct vm_area_struct *vma,
216 unsigned long *start, unsigned long *end)
220 static inline long follow_hugetlb_page(struct mm_struct *mm,
221 struct vm_area_struct *vma, struct page **pages,
222 struct vm_area_struct **vmas, unsigned long *position,
223 unsigned long *nr_pages, long i, unsigned int flags,
230 static inline struct page *follow_huge_addr(struct mm_struct *mm,
231 unsigned long address, int write)
233 return ERR_PTR(-EINVAL);
236 static inline int copy_hugetlb_page_range(struct mm_struct *dst,
237 struct mm_struct *src, struct vm_area_struct *vma)
243 static inline void hugetlb_report_meminfo(struct seq_file *m)
247 static inline int hugetlb_report_node_meminfo(int nid, char *buf)
252 static inline void hugetlb_show_meminfo(void)
256 static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
257 unsigned long address, hugepd_t hpd, int flags,
263 static inline struct page *follow_huge_pmd(struct mm_struct *mm,
264 unsigned long address, pmd_t *pmd, int flags)
269 static inline struct page *follow_huge_pud(struct mm_struct *mm,
270 unsigned long address, pud_t *pud, int flags)
275 static inline struct page *follow_huge_pgd(struct mm_struct *mm,
276 unsigned long address, pgd_t *pgd, int flags)
281 static inline int prepare_hugepage_range(struct file *file,
282 unsigned long addr, unsigned long len)
287 static inline int pmd_huge(pmd_t pmd)
292 static inline int pud_huge(pud_t pud)
297 static inline int is_hugepage_only_range(struct mm_struct *mm,
298 unsigned long addr, unsigned long len)
303 static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
304 unsigned long addr, unsigned long end,
305 unsigned long floor, unsigned long ceiling)
310 static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
312 struct vm_area_struct *dst_vma,
313 unsigned long dst_addr,
314 unsigned long src_addr,
321 static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
327 static inline bool isolate_huge_page(struct page *page, struct list_head *list)
332 static inline void putback_active_hugepage(struct page *page)
336 static inline void move_hugetlb_state(struct page *oldpage,
337 struct page *newpage, int reason)
341 static inline unsigned long hugetlb_change_protection(
342 struct vm_area_struct *vma, unsigned long address,
343 unsigned long end, pgprot_t newprot)
348 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
349 struct vm_area_struct *vma, unsigned long start,
350 unsigned long end, struct page *ref_page)
355 static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
356 struct vm_area_struct *vma, unsigned long start,
357 unsigned long end, struct page *ref_page)
362 static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
363 struct vm_area_struct *vma, unsigned long address,
370 #endif /* !CONFIG_HUGETLB_PAGE */
372 * hugepages at page global directory. If arch support
373 * hugepages at pgd level, they need to define this.
376 #define pgd_huge(x) 0
379 #define p4d_huge(x) 0
383 static inline int pgd_write(pgd_t pgd)
390 #define HUGETLB_ANON_FILE "anon_hugepage"
394 * The file will be used as an shm file so shmfs accounting rules
397 HUGETLB_SHMFS_INODE = 1,
399 * The file is being created on the internal vfs mount and shmfs
400 * accounting rules do not apply
402 HUGETLB_ANONHUGE_INODE = 2,
405 #ifdef CONFIG_HUGETLBFS
406 struct hugetlbfs_sb_info {
407 long max_inodes; /* inodes allowed */
408 long free_inodes; /* inodes free */
409 spinlock_t stat_lock;
410 struct hstate *hstate;
411 struct hugepage_subpool *spool;
417 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
419 return sb->s_fs_info;
422 struct hugetlbfs_inode_info {
423 struct shared_policy policy;
424 struct inode vfs_inode;
428 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
430 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
433 extern const struct file_operations hugetlbfs_file_operations;
434 extern const struct vm_operations_struct hugetlb_vm_ops;
435 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
436 struct user_struct **user, int creat_flags,
439 static inline bool is_file_hugepages(struct file *file)
441 if (file->f_op == &hugetlbfs_file_operations)
444 return is_file_shm_hugepages(file);
447 static inline struct hstate *hstate_inode(struct inode *i)
449 return HUGETLBFS_SB(i->i_sb)->hstate;
451 #else /* !CONFIG_HUGETLBFS */
453 #define is_file_hugepages(file) false
454 static inline struct file *
455 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
456 struct user_struct **user, int creat_flags,
459 return ERR_PTR(-ENOSYS);
462 static inline struct hstate *hstate_inode(struct inode *i)
466 #endif /* !CONFIG_HUGETLBFS */
468 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
469 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
470 unsigned long len, unsigned long pgoff,
471 unsigned long flags);
472 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
474 #ifdef CONFIG_HUGETLB_PAGE
476 #define HSTATE_NAME_LEN 32
477 /* Defines one hugetlb page size */
479 int next_nid_to_alloc;
480 int next_nid_to_free;
483 unsigned long max_huge_pages;
484 unsigned long nr_huge_pages;
485 unsigned long free_huge_pages;
486 unsigned long resv_huge_pages;
487 unsigned long surplus_huge_pages;
488 unsigned long nr_overcommit_huge_pages;
489 struct list_head hugepage_activelist;
490 struct list_head hugepage_freelists[MAX_NUMNODES];
491 unsigned int nr_huge_pages_node[MAX_NUMNODES];
492 unsigned int free_huge_pages_node[MAX_NUMNODES];
493 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
494 #ifdef CONFIG_CGROUP_HUGETLB
495 /* cgroup control files */
496 struct cftype cgroup_files_dfl[7];
497 struct cftype cgroup_files_legacy[9];
499 char name[HSTATE_NAME_LEN];
502 struct huge_bootmem_page {
503 struct list_head list;
504 struct hstate *hstate;
507 struct page *alloc_huge_page(struct vm_area_struct *vma,
508 unsigned long addr, int avoid_reserve);
509 struct page *alloc_huge_page_node(struct hstate *h, int nid);
510 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
512 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
513 unsigned long address);
514 struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
515 int nid, nodemask_t *nmask);
516 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
520 int __init __alloc_bootmem_huge_page(struct hstate *h);
521 int __init alloc_bootmem_huge_page(struct hstate *h);
523 void __init hugetlb_add_hstate(unsigned order);
524 bool __init arch_hugetlb_valid_size(unsigned long size);
525 struct hstate *size_to_hstate(unsigned long size);
527 #ifndef HUGE_MAX_HSTATE
528 #define HUGE_MAX_HSTATE 1
531 extern struct hstate hstates[HUGE_MAX_HSTATE];
532 extern unsigned int default_hstate_idx;
534 #define default_hstate (hstates[default_hstate_idx])
536 static inline struct hstate *hstate_file(struct file *f)
538 return hstate_inode(file_inode(f));
541 static inline struct hstate *hstate_sizelog(int page_size_log)
544 return &default_hstate;
546 return size_to_hstate(1UL << page_size_log);
549 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
551 return hstate_file(vma->vm_file);
554 static inline unsigned long huge_page_size(struct hstate *h)
556 return (unsigned long)PAGE_SIZE << h->order;
559 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
561 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
563 static inline unsigned long huge_page_mask(struct hstate *h)
568 static inline unsigned int huge_page_order(struct hstate *h)
573 static inline unsigned huge_page_shift(struct hstate *h)
575 return h->order + PAGE_SHIFT;
578 static inline bool hstate_is_gigantic(struct hstate *h)
580 return huge_page_order(h) >= MAX_ORDER;
583 static inline unsigned int pages_per_huge_page(struct hstate *h)
585 return 1 << h->order;
588 static inline unsigned int blocks_per_huge_page(struct hstate *h)
590 return huge_page_size(h) / 512;
593 #include <asm/hugetlb.h>
595 #ifndef is_hugepage_only_range
596 static inline int is_hugepage_only_range(struct mm_struct *mm,
597 unsigned long addr, unsigned long len)
601 #define is_hugepage_only_range is_hugepage_only_range
604 #ifndef arch_clear_hugepage_flags
605 static inline void arch_clear_hugepage_flags(struct page *page) { }
606 #define arch_clear_hugepage_flags arch_clear_hugepage_flags
609 #ifndef arch_make_huge_pte
610 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
611 struct page *page, int writable)
617 static inline struct hstate *page_hstate(struct page *page)
619 VM_BUG_ON_PAGE(!PageHuge(page), page);
620 return size_to_hstate(page_size(page));
623 static inline unsigned hstate_index_to_shift(unsigned index)
625 return hstates[index].order + PAGE_SHIFT;
628 static inline int hstate_index(struct hstate *h)
633 pgoff_t __basepage_index(struct page *page);
635 /* Return page->index in PAGE_SIZE units */
636 static inline pgoff_t basepage_index(struct page *page)
638 if (!PageCompound(page))
641 return __basepage_index(page);
644 extern int dissolve_free_huge_page(struct page *page);
645 extern int dissolve_free_huge_pages(unsigned long start_pfn,
646 unsigned long end_pfn);
648 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
649 #ifndef arch_hugetlb_migration_supported
650 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
652 if ((huge_page_shift(h) == PMD_SHIFT) ||
653 (huge_page_shift(h) == PUD_SHIFT) ||
654 (huge_page_shift(h) == PGDIR_SHIFT))
661 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
667 static inline bool hugepage_migration_supported(struct hstate *h)
669 return arch_hugetlb_migration_supported(h);
673 * Movability check is different as compared to migration check.
674 * It determines whether or not a huge page should be placed on
675 * movable zone or not. Movability of any huge page should be
676 * required only if huge page size is supported for migration.
677 * There wont be any reason for the huge page to be movable if
678 * it is not migratable to start with. Also the size of the huge
679 * page should be large enough to be placed under a movable zone
680 * and still feasible enough to be migratable. Just the presence
681 * in movable zone does not make the migration feasible.
683 * So even though large huge page sizes like the gigantic ones
684 * are migratable they should not be movable because its not
685 * feasible to migrate them from movable zone.
687 static inline bool hugepage_movable_supported(struct hstate *h)
689 if (!hugepage_migration_supported(h))
692 if (hstate_is_gigantic(h))
697 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
698 struct mm_struct *mm, pte_t *pte)
700 if (huge_page_size(h) == PMD_SIZE)
701 return pmd_lockptr(mm, (pmd_t *) pte);
702 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
703 return &mm->page_table_lock;
706 #ifndef hugepages_supported
708 * Some platform decide whether they support huge pages at boot
709 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
710 * when there is no such support
712 #define hugepages_supported() (HPAGE_SHIFT != 0)
715 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
717 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
719 atomic_long_add(l, &mm->hugetlb_usage);
722 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
724 atomic_long_sub(l, &mm->hugetlb_usage);
727 #ifndef set_huge_swap_pte_at
728 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
729 pte_t *ptep, pte_t pte, unsigned long sz)
731 set_huge_pte_at(mm, addr, ptep, pte);
735 #ifndef huge_ptep_modify_prot_start
736 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
737 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
738 unsigned long addr, pte_t *ptep)
740 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
744 #ifndef huge_ptep_modify_prot_commit
745 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
746 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
747 unsigned long addr, pte_t *ptep,
748 pte_t old_pte, pte_t pte)
750 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
754 #else /* CONFIG_HUGETLB_PAGE */
757 static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
764 static inline struct page *alloc_huge_page_node(struct hstate *h, int nid)
769 static inline struct page *
770 alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask)
775 static inline struct page *alloc_huge_page_vma(struct hstate *h,
776 struct vm_area_struct *vma,
777 unsigned long address)
782 static inline int __alloc_bootmem_huge_page(struct hstate *h)
787 static inline struct hstate *hstate_file(struct file *f)
792 static inline struct hstate *hstate_sizelog(int page_size_log)
797 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
802 static inline struct hstate *page_hstate(struct page *page)
807 static inline unsigned long huge_page_size(struct hstate *h)
812 static inline unsigned long huge_page_mask(struct hstate *h)
817 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
822 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
827 static inline unsigned int huge_page_order(struct hstate *h)
832 static inline unsigned int huge_page_shift(struct hstate *h)
837 static inline bool hstate_is_gigantic(struct hstate *h)
842 static inline unsigned int pages_per_huge_page(struct hstate *h)
847 static inline unsigned hstate_index_to_shift(unsigned index)
852 static inline int hstate_index(struct hstate *h)
857 static inline pgoff_t basepage_index(struct page *page)
862 static inline int dissolve_free_huge_page(struct page *page)
867 static inline int dissolve_free_huge_pages(unsigned long start_pfn,
868 unsigned long end_pfn)
873 static inline bool hugepage_migration_supported(struct hstate *h)
878 static inline bool hugepage_movable_supported(struct hstate *h)
883 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
884 struct mm_struct *mm, pte_t *pte)
886 return &mm->page_table_lock;
889 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
893 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
897 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
898 pte_t *ptep, pte_t pte, unsigned long sz)
901 #endif /* CONFIG_HUGETLB_PAGE */
903 static inline spinlock_t *huge_pte_lock(struct hstate *h,
904 struct mm_struct *mm, pte_t *pte)
908 ptl = huge_pte_lockptr(h, mm, pte);
913 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
914 extern void __init hugetlb_cma_reserve(int order);
915 extern void __init hugetlb_cma_check(void);
917 static inline __init void hugetlb_cma_reserve(int order)
920 static inline __init void hugetlb_cma_check(void)
925 #endif /* _LINUX_HUGETLB_H */